problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_15953 | rasdani/github-patches | git_diff | pytorch__audio-1465 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unused module
[`torchaudio._internal.misc_ops`](https://github.com/pytorch/audio/blob/b059f08742e70700ce4c92296a1131118f67a588/torchaudio/_internal/misc_ops.py) is a residue from refactoring of I/O features in the past releases. We can get rid of the whole module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchaudio/_internal/misc_ops.py`
Content:
```
1 from typing import Union, Callable
2
3 import torch
4 from torch import Tensor
5
6
7 def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:
8 """Audio normalization of a tensor in-place. The normalization can be a bool,
9 a number, or a callable that takes the audio tensor as an input. SoX uses
10 32-bit signed integers internally, thus bool normalizes based on that assumption.
11 """
12
13 if not normalization:
14 return
15
16 if isinstance(normalization, bool):
17 normalization = 1 << 31
18
19 if isinstance(normalization, (float, int)):
20 # normalize with custom value
21 signal /= normalization
22 elif callable(normalization):
23 signal /= normalization(signal)
24
25
26 def check_input(src: Tensor) -> None:
27 if not torch.is_tensor(src):
28 raise TypeError('Expected a tensor, got %s' % type(src))
29 if src.is_cuda:
30 raise TypeError('Expected a CPU based tensor, got %s' % type(src))
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchaudio/_internal/misc_ops.py b/torchaudio/_internal/misc_ops.py
deleted file mode 100644
--- a/torchaudio/_internal/misc_ops.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from typing import Union, Callable
-
-import torch
-from torch import Tensor
-
-
-def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:
- """Audio normalization of a tensor in-place. The normalization can be a bool,
- a number, or a callable that takes the audio tensor as an input. SoX uses
- 32-bit signed integers internally, thus bool normalizes based on that assumption.
- """
-
- if not normalization:
- return
-
- if isinstance(normalization, bool):
- normalization = 1 << 31
-
- if isinstance(normalization, (float, int)):
- # normalize with custom value
- signal /= normalization
- elif callable(normalization):
- signal /= normalization(signal)
-
-
-def check_input(src: Tensor) -> None:
- if not torch.is_tensor(src):
- raise TypeError('Expected a tensor, got %s' % type(src))
- if src.is_cuda:
- raise TypeError('Expected a CPU based tensor, got %s' % type(src))
| {"golden_diff": "diff --git a/torchaudio/_internal/misc_ops.py b/torchaudio/_internal/misc_ops.py\ndeleted file mode 100644\n--- a/torchaudio/_internal/misc_ops.py\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-from typing import Union, Callable\n-\n-import torch\n-from torch import Tensor\n-\n-\n-def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n- \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n- a number, or a callable that takes the audio tensor as an input. SoX uses\n- 32-bit signed integers internally, thus bool normalizes based on that assumption.\n- \"\"\"\n-\n- if not normalization:\n- return\n-\n- if isinstance(normalization, bool):\n- normalization = 1 << 31\n-\n- if isinstance(normalization, (float, int)):\n- # normalize with custom value\n- signal /= normalization\n- elif callable(normalization):\n- signal /= normalization(signal)\n-\n-\n-def check_input(src: Tensor) -> None:\n- if not torch.is_tensor(src):\n- raise TypeError('Expected a tensor, got %s' % type(src))\n- if src.is_cuda:\n- raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "issue": "Remove unused module\n[`torchaudio._internal.misc_ops`](https://github.com/pytorch/audio/blob/b059f08742e70700ce4c92296a1131118f67a588/torchaudio/_internal/misc_ops.py) is a residue from refactoring of I/O features in the past releases. We can get rid of the whole module.\r\n\r\n\n", "before_files": [{"content": "from typing import Union, Callable\n\nimport torch\nfrom torch import Tensor\n\n\ndef normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n a number, or a callable that takes the audio tensor as an input. SoX uses\n 32-bit signed integers internally, thus bool normalizes based on that assumption.\n \"\"\"\n\n if not normalization:\n return\n\n if isinstance(normalization, bool):\n normalization = 1 << 31\n\n if isinstance(normalization, (float, int)):\n # normalize with custom value\n signal /= normalization\n elif callable(normalization):\n signal /= normalization(signal)\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "path": "torchaudio/_internal/misc_ops.py"}], "after_files": [{"content": null, "path": "torchaudio/_internal/misc_ops.py"}]} | 631 | 300 |
gh_patches_debug_691 | rasdani/github-patches | git_diff | ivy-llc__ivy-15263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eigh
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.numpy.func_wrapper import (
4 to_ivy_arrays_and_back,
5 from_zero_dim_arrays_to_scalar,
6 )
7
8
9 @to_ivy_arrays_and_back
10 @from_zero_dim_arrays_to_scalar
11 def eigvalsh(a, /, UPLO="L"):
12 return ivy.eigvalsh(a, UPLO=UPLO)
13
14
15 @to_ivy_arrays_and_back
16 def eig(a):
17 return ivy.eig(a)
18
19
20 @from_zero_dim_arrays_to_scalar
21 def eigh(a, /, UPLO="L"):
22 return ivy.eigh(a, UPLO=UPLO)
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
@@ -17,6 +17,7 @@
return ivy.eig(a)
+@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def eigh(a, /, UPLO="L"):
return ivy.eigh(a, UPLO=UPLO)
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n@@ -17,6 +17,7 @@\n return ivy.eig(a)\n \n \n+@to_ivy_arrays_and_back\n @from_zero_dim_arrays_to_scalar\n def eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "issue": "eigh\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}]} | 449 | 140 |
gh_patches_debug_53306 | rasdani/github-patches | git_diff | privacyidea__privacyidea-2418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update requirements for upcoming version 3.5
Push requirements to newest versions according to https://github.com/privacyidea/privacyidea/wiki/Development-workflow#requirements
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import print_function
3 from setuptools import setup, find_packages
4 import os
5 import stat
6 import sys
7
8 #VERSION = "2.1dev4"
9 VERSION = "3.4"
10
11 # Taken from kennethreitz/requests/setup.py
12 package_directory = os.path.realpath(os.path.dirname(__file__))
13
14
15 def get_file_contents(file_path):
16 """Get the context of the file using full path name."""
17 content = ""
18 try:
19 full_path = os.path.join(package_directory, file_path)
20 content = open(full_path, 'r').read()
21 except:
22 print("### could not open file {0!r}".format(file_path), file=sys.stderr)
23 return content
24
25
26 def get_file_list(file_path):
27 full_path = os.path.join(package_directory, file_path)
28 file_list = os.listdir(full_path)
29 # now we need to add the path to the files
30 return [file_path + f for f in file_list]
31
32
33 install_requires = ["beautifulsoup4[lxml]>=4.3.2",
34 "cbor2>=5.0.1",
35 "configobj>=5.0.6",
36 "croniter>=0.3.8",
37 "cryptography>=2.4.2",
38 "defusedxml>=0.4.1",
39 "ecdsa>=0.13.3",
40 "Flask>=0.10.1",
41 "Flask-Babel>=0.9",
42 "Flask-Migrate>=1.2.0",
43 "Flask-Script>=2.0.5",
44 "Flask-SQLAlchemy>=2.0",
45 "Flask-Versioned>=0.9.4",
46 "future>=0.18.2;python_version<'3.0'",
47 "huey[redis]>=1.11.0",
48 "ldap3>=2.6",
49 "netaddr>=0.7.12",
50 "oauth2client>=2.0.1",
51 "passlib[bcrypt]>=1.7.0",
52 "Pillow>=6.2.1",
53 "PyJWT>=1.3.0",
54 "PyMySQL>=0.6.6",
55 "pyOpenSSL>=17.5",
56 "pyrad>=2.0",
57 "python-dateutil>=2.7.3",
58 "python-gnupg>=0.4.4",
59 "PyYAML>=5.1",
60 "qrcode>=6.1",
61 "requests>=2.7.0",
62 "smpplib>=2.0",
63 "SQLAlchemy>=1.3.0",
64 "sqlsoup>=0.9.0"]
65
66
67 def get_man_pages(dir):
68 """
69 Get man pages in a directory.
70 :param dir:
71 :return: list of file names
72 """
73 files = os.listdir(dir)
74 r_files = []
75 for file in files:
76 if file.endswith(".1"):
77 r_files.append(dir + "/" + file)
78 return r_files
79
80
81 def get_scripts(dir):
82 """
83 Get files that are executable
84 :param dir:
85 :return: list of file names
86 """
87 files = os.listdir(dir)
88 r_files = []
89 for file in files:
90 if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC:
91 r_files.append(dir + "/" + file)
92 return r_files
93
94
95 setup(
96 name='privacyIDEA',
97 version=VERSION,
98 description='privacyIDEA: identity, multifactor authentication (OTP), '
99 'authorization, audit',
100 author='privacyidea.org',
101 license='AGPLv3',
102 author_email='[email protected]',
103 url='http://www.privacyidea.org',
104 keywords='OTP, two factor authentication, management, security',
105 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.9.*',
106 packages=find_packages(),
107 scripts=["pi-manage"] + get_scripts("tools"),
108 extras_require={
109 'doc': ["Sphinx>=1.3.1",
110 "sphinxcontrib-httpdomain>=1.3.0",
111 "sphinxcontrib-plantuml>=0.18"],
112 'test': ["mock>=2.0.0",
113 "pytest>=3.6.0",
114 "pytest-cov>=2.5.1",
115 "responses>=0.9.0"],
116 'postgres': ['psycopg2>=2.8.3']
117 },
118 install_requires=install_requires,
119 include_package_data=True,
120 data_files=[('etc/privacyidea/',
121 ['deploy/apache/privacyideaapp.wsgi',
122 'deploy/privacyidea/dictionary']),
123 ('share/man/man1', get_man_pages("tools")),
124 ('lib/privacyidea/migrations',
125 ["migrations/alembic.ini",
126 "migrations/env.py",
127 "migrations/README",
128 "migrations/script.py.mako"]),
129 ('lib/privacyidea/migrations/versions',
130 get_file_list("migrations/versions/")),
131 ('lib/privacyidea/', ['requirements.txt'])
132 ],
133 classifiers=["Framework :: Flask",
134 "License :: OSI Approved :: "
135 "GNU Affero General Public License v3",
136 "Programming Language :: Python",
137 "Development Status :: 5 - Production/Stable",
138 "Topic :: Internet",
139 "Topic :: Security",
140 "Topic :: System ::"
141 " Systems Administration :: Authentication/Directory",
142 'Programming Language :: Python',
143 'Programming Language :: Python :: 2',
144 'Programming Language :: Python :: 2.7',
145 'Programming Language :: Python :: 3',
146 'Programming Language :: Python :: 3.5',
147 'Programming Language :: Python :: 3.6',
148 'Programming Language :: Python :: 3.7',
149 'Programming Language :: Python :: 3.8'
150 ],
151 zip_safe=False,
152 long_description=get_file_contents('README.rst')
153 )
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,6 +50,7 @@
"oauth2client>=2.0.1",
"passlib[bcrypt]>=1.7.0",
"Pillow>=6.2.1",
+ "pydash>=4.7.4",
"PyJWT>=1.3.0",
"PyMySQL>=0.6.6",
"pyOpenSSL>=17.5",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,6 +50,7 @@\n \"oauth2client>=2.0.1\",\n \"passlib[bcrypt]>=1.7.0\",\n \"Pillow>=6.2.1\",\n+ \"pydash>=4.7.4\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n", "issue": "Update requirements for upcoming version 3.5\nPush requirements to newest versions according to https://github.com/privacyidea/privacyidea/wiki/Development-workflow#requirements\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION = \"2.1dev4\"\nVERSION = \"3.4\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print(\"### could not open file {0!r}\".format(file_path), file=sys.stderr)\n return content\n\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [file_path + f for f in file_list]\n\n\ninstall_requires = [\"beautifulsoup4[lxml]>=4.3.2\",\n \"cbor2>=5.0.1\",\n \"configobj>=5.0.6\",\n \"croniter>=0.3.8\",\n \"cryptography>=2.4.2\",\n \"defusedxml>=0.4.1\",\n \"ecdsa>=0.13.3\",\n \"Flask>=0.10.1\",\n \"Flask-Babel>=0.9\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-Script>=2.0.5\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Versioned>=0.9.4\",\n \"future>=0.18.2;python_version<'3.0'\",\n \"huey[redis]>=1.11.0\",\n \"ldap3>=2.6\",\n \"netaddr>=0.7.12\",\n \"oauth2client>=2.0.1\",\n \"passlib[bcrypt]>=1.7.0\",\n \"Pillow>=6.2.1\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n \"pyrad>=2.0\",\n \"python-dateutil>=2.7.3\",\n \"python-gnupg>=0.4.4\",\n \"PyYAML>=5.1\",\n \"qrcode>=6.1\",\n \"requests>=2.7.0\",\n \"smpplib>=2.0\",\n \"SQLAlchemy>=1.3.0\",\n \"sqlsoup>=0.9.0\"]\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.9.*',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'doc': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\",\n \"sphinxcontrib-plantuml>=0.18\"],\n 'test': [\"mock>=2.0.0\",\n \"pytest>=3.6.0\",\n \"pytest-cov>=2.5.1\",\n \"responses>=0.9.0\"],\n 'postgres': ['psycopg2>=2.8.3']\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\")),\n ('lib/privacyidea/', ['requirements.txt'])\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\",\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION = \"2.1dev4\"\nVERSION = \"3.4\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print(\"### could not open file {0!r}\".format(file_path), file=sys.stderr)\n return content\n\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [file_path + f for f in file_list]\n\n\ninstall_requires = [\"beautifulsoup4[lxml]>=4.3.2\",\n \"cbor2>=5.0.1\",\n \"configobj>=5.0.6\",\n \"croniter>=0.3.8\",\n \"cryptography>=2.4.2\",\n \"defusedxml>=0.4.1\",\n \"ecdsa>=0.13.3\",\n \"Flask>=0.10.1\",\n \"Flask-Babel>=0.9\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-Script>=2.0.5\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Versioned>=0.9.4\",\n \"future>=0.18.2;python_version<'3.0'\",\n \"huey[redis]>=1.11.0\",\n \"ldap3>=2.6\",\n \"netaddr>=0.7.12\",\n \"oauth2client>=2.0.1\",\n \"passlib[bcrypt]>=1.7.0\",\n \"Pillow>=6.2.1\",\n \"pydash>=4.7.4\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n \"pyrad>=2.0\",\n \"python-dateutil>=2.7.3\",\n \"python-gnupg>=0.4.4\",\n \"PyYAML>=5.1\",\n \"qrcode>=6.1\",\n \"requests>=2.7.0\",\n \"smpplib>=2.0\",\n \"SQLAlchemy>=1.3.0\",\n \"sqlsoup>=0.9.0\"]\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.9.*',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'doc': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\",\n \"sphinxcontrib-plantuml>=0.18\"],\n 'test': [\"mock>=2.0.0\",\n \"pytest>=3.6.0\",\n \"pytest-cov>=2.5.1\",\n \"responses>=0.9.0\"],\n 'postgres': ['psycopg2>=2.8.3']\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\")),\n ('lib/privacyidea/', ['requirements.txt'])\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\",\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}]} | 1,965 | 113 |
gh_patches_debug_3635 | rasdani/github-patches | git_diff | ansible__ansible-lint-1625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive: async jobs
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and master branch are affected too -->
##### Summary
<!--- Explain the problem briefly below -->
A `command` module task that is run as an async job is incorrectly treated as a normal sync task.
For async tasks the options like `changed_when` (and `failed_when` and so on) are not given to the async `command` task itself, they are given to the `async_status` module task that is run after the async task.
Ansible-lint does not understand this and complains for rule `no-changed-when` for the `command` task.
Example:
```yaml
---
- name: Asynchronous long task
command: alongtask.sh
async: 1000
poll: 0
register: job_sleeper
- name: Wait for asynchronous job to end
async_status:
jid: '{{ job_sleeper.ansible_job_id }}'
register: job_result
until: job_result.finished
retries: 100
delay: 10
changed_when: [....]
```
Note how the `changed_when` is given in the `async_status` task and not in the `command` task.
##### Issue Type
- Bug Report
##### Ansible and Ansible Lint details
<!--- Paste verbatim output between triple backticks -->
```console (paste below)
ansible --version
2.9.21
ansible-lint --version
5.0.8
```
- ansible installation method: pip
- ansible-lint installation method: pip
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
EL7.9 all updated
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
```yaml
---
- name: Asynchronous yum task
command: alongtask.sh
async: 1000
poll: 0
register: job_sleeper
- name: Wait for asynchronous job to end
async_status:
jid: '{{ job_sleeper.ansible_job_id }}'
register: job_result
until: job_result.finished
retries: 100
delay: 10
changed_when: [....]
```
<!--- Paste example playbooks or commands between triple backticks below -->
```console (paste below)
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### Desired Behaviour
<!--- Describe what you expected to happen when running the steps above -->
Ansible-lint should not detect `no-changed-when` for `command` module task run as async job since the `changed_when` cannot be given to the `command` module task itself.
It should detect that there is a `changed_when` in the following `async_status` task.
##### Actual Behaviour
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
Ansible-lint detects false positive `no-changed-when` for `command` module task run as async job even though `changed_when` cannot be correctly given for an async task - the `changed_when` is given for the subsequent `async_status` module task.
<!--- Paste verbatim command output between triple backticks -->
```paste below
```
[minimum complete verifiable example]: http://stackoverflow.com/help/mcve
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ansiblelint/rules/CommandHasChangesCheckRule.py`
Content:
```
1 # Copyright (c) 2016 Will Thames <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from typing import TYPE_CHECKING, Any, Dict, Union
22
23 from ansiblelint.rules import AnsibleLintRule
24
25 if TYPE_CHECKING:
26 from typing import Optional
27
28 from ansiblelint.file_utils import Lintable
29
30
31 class CommandHasChangesCheckRule(AnsibleLintRule):
32 id = 'no-changed-when'
33 shortdesc = 'Commands should not change things if nothing needs doing'
34 description = (
35 'Commands should either read information (and thus set '
36 '``changed_when``) or not do something if it has already been '
37 'done (using creates/removes) or only do it if another '
38 'check has a particular result (``when``)'
39 )
40 severity = 'HIGH'
41 tags = ['command-shell', 'idempotency']
42 version_added = 'historic'
43
44 _commands = ['command', 'shell', 'raw']
45
46 def matchtask(
47 self, task: Dict[str, Any], file: 'Optional[Lintable]' = None
48 ) -> Union[bool, str]:
49 if task["__ansible_action_type__"] == 'task':
50 if task["action"]["__ansible_module__"] in self._commands:
51 return (
52 'changed_when' not in task
53 and 'when' not in task
54 and 'creates' not in task['action']
55 and 'removes' not in task['action']
56 )
57 return False
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ansiblelint/rules/CommandHasChangesCheckRule.py b/src/ansiblelint/rules/CommandHasChangesCheckRule.py
--- a/src/ansiblelint/rules/CommandHasChangesCheckRule.py
+++ b/src/ansiblelint/rules/CommandHasChangesCheckRule.py
@@ -53,5 +53,6 @@
and 'when' not in task
and 'creates' not in task['action']
and 'removes' not in task['action']
+ and not ('async' in task and task.get('poll') == 0)
)
return False
| {"golden_diff": "diff --git a/src/ansiblelint/rules/CommandHasChangesCheckRule.py b/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n--- a/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n+++ b/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n@@ -53,5 +53,6 @@\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n+ and not ('async' in task and task.get('poll') == 0)\n )\n return False\n", "issue": "False positive: async jobs\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and master branch are affected too -->\r\n\r\n##### Summary\r\n<!--- Explain the problem briefly below -->\r\nA `command` module task that is run as an async job is incorrectly treated as a normal sync task.\r\n\r\nFor async tasks the options like `changed_when` (and `failed_when` and so on) are not given to the async `command` task itself, they are given to the `async_status` module task that is run after the async task.\r\n\r\nAnsible-lint does not understand this and complains for rule `no-changed-when` for the `command` task.\r\n\r\nExample:\r\n```yaml\r\n---\r\n- name: Asynchronous long task\r\n command: alongtask.sh\r\n async: 1000\r\n poll: 0\r\n register: job_sleeper\r\n\r\n- name: Wait for asynchronous job to end\r\n async_status:\r\n jid: '{{ job_sleeper.ansible_job_id }}'\r\n register: job_result\r\n until: job_result.finished\r\n retries: 100\r\n delay: 10\r\n changed_when: [....]\r\n```\r\n\r\nNote how the `changed_when` is given in the `async_status` task and not in the `command` task.\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between triple backticks -->\r\n```console (paste below)\r\nansible --version\r\n2.9.21\r\n\r\nansible-lint --version\r\n5.0.8\r\n\r\n```\r\n\r\n- ansible installation method: pip\r\n- ansible-lint installation method: pip\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nEL7.9 all updated\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n```yaml\r\n---\r\n- name: Asynchronous yum task\r\n command: alongtask.sh\r\n async: 1000\r\n poll: 0\r\n register: job_sleeper\r\n\r\n- name: Wait for asynchronous job to end\r\n async_status:\r\n jid: '{{ job_sleeper.ansible_job_id }}'\r\n register: job_result\r\n until: job_result.finished\r\n retries: 100\r\n delay: 10\r\n changed_when: [....]\r\n```\r\n\r\n<!--- Paste example playbooks or commands between triple backticks below -->\r\n```console (paste below)\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### Desired Behaviour\r\n<!--- Describe what you expected to happen when running the steps above -->\r\nAnsible-lint should not detect `no-changed-when` for `command` module task run as async job since the `changed_when` cannot be given to the `command` module task itself.\r\n\r\nIt should detect that there is a `changed_when` in the following `async_status` task.\r\n\r\n##### Actual Behaviour\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\nAnsible-lint detects false positive `no-changed-when` for `command` module task run as async job even though `changed_when` cannot be correctly given for an async task - the `changed_when` is given for the subsequent `async_status` module task.\r\n\r\n<!--- Paste verbatim command output between triple backticks -->\r\n```paste below\r\n\r\n```\r\n\r\n\r\n[minimum complete verifiable example]: http://stackoverflow.com/help/mcve\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandHasChangesCheckRule(AnsibleLintRule):\n id = 'no-changed-when'\n shortdesc = 'Commands should not change things if nothing needs doing'\n description = (\n 'Commands should either read information (and thus set '\n '``changed_when``) or not do something if it has already been '\n 'done (using creates/removes) or only do it if another '\n 'check has a particular result (``when``)'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idempotency']\n version_added = 'historic'\n\n _commands = ['command', 'shell', 'raw']\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n if task[\"__ansible_action_type__\"] == 'task':\n if task[\"action\"][\"__ansible_module__\"] in self._commands:\n return (\n 'changed_when' not in task\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n )\n return False\n", "path": "src/ansiblelint/rules/CommandHasChangesCheckRule.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandHasChangesCheckRule(AnsibleLintRule):\n id = 'no-changed-when'\n shortdesc = 'Commands should not change things if nothing needs doing'\n description = (\n 'Commands should either read information (and thus set '\n '``changed_when``) or not do something if it has already been '\n 'done (using creates/removes) or only do it if another '\n 'check has a particular result (``when``)'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idempotency']\n version_added = 'historic'\n\n _commands = ['command', 'shell', 'raw']\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n if task[\"__ansible_action_type__\"] == 'task':\n if task[\"action\"][\"__ansible_module__\"] in self._commands:\n return (\n 'changed_when' not in task\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n and not ('async' in task and task.get('poll') == 0)\n )\n return False\n", "path": "src/ansiblelint/rules/CommandHasChangesCheckRule.py"}]} | 1,675 | 129 |
gh_patches_debug_42365 | rasdani/github-patches | git_diff | scrapy__scrapy-3660 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document LogFormatter
Currently, the `LogFormatter` class is only mentioned in the [Release notes](https://docs.scrapy.org/en/latest/news.html) page of the documentation. This class should be properly documented, both its API members and a small section introducing it on the documentation page about [Logging](https://docs.scrapy.org/en/latest/topics/logging.html).
The responses to [Scrapy - Silently drop an item](https://stackoverflow.com/q/13527921/939364) in StackOverflow would be a good starting point.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/logformatter.py`
Content:
```
1 import os
2 import logging
3
4 from twisted.python.failure import Failure
5
6 from scrapy.utils.request import referer_str
7
8 SCRAPEDMSG = u"Scraped from %(src)s" + os.linesep + "%(item)s"
9 DROPPEDMSG = u"Dropped: %(exception)s" + os.linesep + "%(item)s"
10 CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
11
12
13 class LogFormatter(object):
14 """Class for generating log messages for different actions.
15
16 All methods must return a dictionary listing the parameters ``level``,
17 ``msg`` and ``args`` which are going to be used for constructing the log
18 message when calling logging.log.
19
20 Dictionary keys for the method outputs:
21 * ``level`` should be the log level for that action, you can use those
22 from the python logging library: logging.DEBUG, logging.INFO,
23 logging.WARNING, logging.ERROR and logging.CRITICAL.
24
25 * ``msg`` should be a string that can contain different formatting
26 placeholders. This string, formatted with the provided ``args``, is
27 going to be the log message for that action.
28
29 * ``args`` should be a tuple or dict with the formatting placeholders
30 for ``msg``. The final log message is computed as output['msg'] %
31 output['args'].
32 """
33
34 def crawled(self, request, response, spider):
35 request_flags = ' %s' % str(request.flags) if request.flags else ''
36 response_flags = ' %s' % str(response.flags) if response.flags else ''
37 return {
38 'level': logging.DEBUG,
39 'msg': CRAWLEDMSG,
40 'args': {
41 'status': response.status,
42 'request': request,
43 'request_flags' : request_flags,
44 'referer': referer_str(request),
45 'response_flags': response_flags,
46 # backward compatibility with Scrapy logformatter below 1.4 version
47 'flags': response_flags
48 }
49 }
50
51 def scraped(self, item, response, spider):
52 if isinstance(response, Failure):
53 src = response.getErrorMessage()
54 else:
55 src = response
56 return {
57 'level': logging.DEBUG,
58 'msg': SCRAPEDMSG,
59 'args': {
60 'src': src,
61 'item': item,
62 }
63 }
64
65 def dropped(self, item, exception, response, spider):
66 return {
67 'level': logging.WARNING,
68 'msg': DROPPEDMSG,
69 'args': {
70 'exception': exception,
71 'item': item,
72 }
73 }
74
75 @classmethod
76 def from_crawler(cls, crawler):
77 return cls()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py
--- a/scrapy/logformatter.py
+++ b/scrapy/logformatter.py
@@ -12,26 +12,40 @@
class LogFormatter(object):
"""Class for generating log messages for different actions.
-
- All methods must return a dictionary listing the parameters ``level``,
- ``msg`` and ``args`` which are going to be used for constructing the log
- message when calling logging.log.
+
+ All methods must return a dictionary listing the parameters ``level``, ``msg``
+ and ``args`` which are going to be used for constructing the log message when
+ calling ``logging.log``.
Dictionary keys for the method outputs:
- * ``level`` should be the log level for that action, you can use those
- from the python logging library: logging.DEBUG, logging.INFO,
- logging.WARNING, logging.ERROR and logging.CRITICAL.
- * ``msg`` should be a string that can contain different formatting
- placeholders. This string, formatted with the provided ``args``, is
- going to be the log message for that action.
+ * ``level`` is the log level for that action, you can use those from the
+ `python logging library <https://docs.python.org/3/library/logging.html>`_ :
+ ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``
+ and ``logging.CRITICAL``.
+ * ``msg`` should be a string that can contain different formatting placeholders.
+ This string, formatted with the provided ``args``, is going to be the long message
+ for that action.
+ * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.
+ The final log message is computed as ``msg % args``.
- * ``args`` should be a tuple or dict with the formatting placeholders
- for ``msg``. The final log message is computed as output['msg'] %
- output['args'].
- """
+ Here is an example on how to create a custom log formatter to lower the severity level of
+ the log message when an item is dropped from the pipeline::
+ class PoliteLogFormatter(logformatter.LogFormatter):
+ def dropped(self, item, exception, response, spider):
+ return {
+ 'level': logging.INFO, # lowering the level from logging.WARNING
+ 'msg': u"Dropped: %(exception)s" + os.linesep + "%(item)s",
+ 'args': {
+ 'exception': exception,
+ 'item': item,
+ }
+ }
+ """
+
def crawled(self, request, response, spider):
+ """Logs a message when the crawler finds a webpage."""
request_flags = ' %s' % str(request.flags) if request.flags else ''
response_flags = ' %s' % str(response.flags) if response.flags else ''
return {
@@ -40,7 +54,7 @@
'args': {
'status': response.status,
'request': request,
- 'request_flags' : request_flags,
+ 'request_flags': request_flags,
'referer': referer_str(request),
'response_flags': response_flags,
# backward compatibility with Scrapy logformatter below 1.4 version
@@ -49,6 +63,7 @@
}
def scraped(self, item, response, spider):
+ """Logs a message when an item is scraped by a spider."""
if isinstance(response, Failure):
src = response.getErrorMessage()
else:
@@ -63,6 +78,7 @@
}
def dropped(self, item, exception, response, spider):
+ """Logs a message when an item is dropped while it is passing through the item pipeline."""
return {
'level': logging.WARNING,
'msg': DROPPEDMSG,
| {"golden_diff": "diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py\n--- a/scrapy/logformatter.py\n+++ b/scrapy/logformatter.py\n@@ -12,26 +12,40 @@\n \n class LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n-\n- All methods must return a dictionary listing the parameters ``level``,\n- ``msg`` and ``args`` which are going to be used for constructing the log\n- message when calling logging.log.\n+ \n+ All methods must return a dictionary listing the parameters ``level``, ``msg``\n+ and ``args`` which are going to be used for constructing the log message when\n+ calling ``logging.log``.\n \n Dictionary keys for the method outputs:\n- * ``level`` should be the log level for that action, you can use those\n- from the python logging library: logging.DEBUG, logging.INFO,\n- logging.WARNING, logging.ERROR and logging.CRITICAL.\n \n- * ``msg`` should be a string that can contain different formatting\n- placeholders. This string, formatted with the provided ``args``, is\n- going to be the log message for that action.\n+ * ``level`` is the log level for that action, you can use those from the\n+ `python logging library <https://docs.python.org/3/library/logging.html>`_ :\n+ ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``\n+ and ``logging.CRITICAL``.\n+ * ``msg`` should be a string that can contain different formatting placeholders.\n+ This string, formatted with the provided ``args``, is going to be the long message\n+ for that action.\n+ * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.\n+ The final log message is computed as ``msg % args``.\n \n- * ``args`` should be a tuple or dict with the formatting placeholders\n- for ``msg``. The final log message is computed as output['msg'] %\n- output['args'].\n- \"\"\"\n+ Here is an example on how to create a custom log formatter to lower the severity level of\n+ the log message when an item is dropped from the pipeline::\n \n+ class PoliteLogFormatter(logformatter.LogFormatter):\n+ def dropped(self, item, exception, response, spider):\n+ return {\n+ 'level': logging.INFO, # lowering the level from logging.WARNING\n+ 'msg': u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\",\n+ 'args': {\n+ 'exception': exception,\n+ 'item': item,\n+ }\n+ }\n+ \"\"\"\n+ \n def crawled(self, request, response, spider):\n+ \"\"\"Logs a message when the crawler finds a webpage.\"\"\"\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n@@ -40,7 +54,7 @@\n 'args': {\n 'status': response.status,\n 'request': request,\n- 'request_flags' : request_flags,\n+ 'request_flags': request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n@@ -49,6 +63,7 @@\n }\n \n def scraped(self, item, response, spider):\n+ \"\"\"Logs a message when an item is scraped by a spider.\"\"\"\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n@@ -63,6 +78,7 @@\n }\n \n def dropped(self, item, exception, response, spider):\n+ \"\"\"Logs a message when an item is dropped while it is passing through the item pipeline.\"\"\"\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n", "issue": "Document LogFormatter\nCurrently, the `LogFormatter` class is only mentioned in the [Release notes](https://docs.scrapy.org/en/latest/news.html) page of the documentation. This class should be properly documented, both its API members and a small section introducing it on the documentation page about [Logging](https://docs.scrapy.org/en/latest/topics/logging.html).\r\n\r\nThe responses to [Scrapy - Silently drop an item](https://stackoverflow.com/q/13527921/939364) in StackOverflow would be a good starting point.\n", "before_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n\n All methods must return a dictionary listing the parameters ``level``,\n ``msg`` and ``args`` which are going to be used for constructing the log\n message when calling logging.log.\n\n Dictionary keys for the method outputs:\n * ``level`` should be the log level for that action, you can use those\n from the python logging library: logging.DEBUG, logging.INFO,\n logging.WARNING, logging.ERROR and logging.CRITICAL.\n\n * ``msg`` should be a string that can contain different formatting\n placeholders. This string, formatted with the provided ``args``, is\n going to be the log message for that action.\n\n * ``args`` should be a tuple or dict with the formatting placeholders\n for ``msg``. The final log message is computed as output['msg'] %\n output['args'].\n \"\"\"\n\n def crawled(self, request, response, spider):\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n 'flags': response_flags\n }\n }\n\n def scraped(self, item, response, spider):\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}], "after_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n \n All methods must return a dictionary listing the parameters ``level``, ``msg``\n and ``args`` which are going to be used for constructing the log message when\n calling ``logging.log``.\n\n Dictionary keys for the method outputs:\n\n * ``level`` is the log level for that action, you can use those from the\n `python logging library <https://docs.python.org/3/library/logging.html>`_ :\n ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``\n and ``logging.CRITICAL``.\n * ``msg`` should be a string that can contain different formatting placeholders.\n This string, formatted with the provided ``args``, is going to be the long message\n for that action.\n * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.\n The final log message is computed as ``msg % args``.\n\n Here is an example on how to create a custom log formatter to lower the severity level of\n the log message when an item is dropped from the pipeline::\n\n class PoliteLogFormatter(logformatter.LogFormatter):\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.INFO, # lowering the level from logging.WARNING\n 'msg': u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\",\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n \"\"\"\n \n def crawled(self, request, response, spider):\n \"\"\"Logs a message when the crawler finds a webpage.\"\"\"\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags': request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n 'flags': response_flags\n }\n }\n\n def scraped(self, item, response, spider):\n \"\"\"Logs a message when an item is scraped by a spider.\"\"\"\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n \"\"\"Logs a message when an item is dropped while it is passing through the item pipeline.\"\"\"\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}]} | 1,115 | 858 |
gh_patches_debug_32933 | rasdani/github-patches | git_diff | kserve__kserve-524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sample of image_transformer does not work
/kind bug
Sample under docs/samples/transformer/image_transformer is broken, there's python error in it.
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
It's due to PR #492, kfmodel and kfserver is refactored now but the sample still inherit from transformer which does not exist now. Also some other symbols need be renamed.
**What did you expect to happen:**
Sample still works
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/samples/transformer/image_transformer/image_transformer/__main__.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import kfserving
16 import argparse
17 from .image_transformer import ImageTransformer
18
19 DEFAULT_MODEL_NAME = "model"
20
21 parser = argparse.ArgumentParser(parents=[kfserving.server.parser])
22 parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
23 help='The name that the model is served under.')
24 parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)
25
26 args, _ = parser.parse_known_args()
27
28 if __name__ == "__main__":
29 transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,
30 protocol=args.protocol)
31 kfserver = kfserving.KFServer()
32 kfserver.start(models=[transformer])
33
```
Path: `docs/samples/transformer/image_transformer/image_transformer/image_transformer.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import kfserving
16 from typing import List, Dict
17 from kfserving.transformer import Transformer
18 from PIL import Image
19 import torchvision.transforms as transforms
20 import logging
21 import io
22 import numpy as np
23 import base64
24
25 logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)
26
27 transform = transforms.Compose(
28 [transforms.ToTensor(),
29 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
30
31
32 def image_transform(instance):
33 byte_array = base64.b64decode(instance['image_bytes']['b64'])
34 image = Image.open(io.BytesIO(byte_array))
35 a = np.asarray(image)
36 im = Image.fromarray(a)
37 res = transform(im)
38 logging.info(res)
39 return res.tolist()
40
41
42 class ImageTransformer(Transformer):
43
44 def preprocess(self, inputs: Dict) -> Dict:
45 return {'instances': [image_transform(instance) for instance in inputs['instances']]}
46
47 def postprocess(self, inputs: List) -> List:
48 return inputs
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/samples/transformer/image_transformer/image_transformer/__main__.py b/docs/samples/transformer/image_transformer/image_transformer/__main__.py
--- a/docs/samples/transformer/image_transformer/image_transformer/__main__.py
+++ b/docs/samples/transformer/image_transformer/image_transformer/__main__.py
@@ -18,7 +18,7 @@
DEFAULT_MODEL_NAME = "model"
-parser = argparse.ArgumentParser(parents=[kfserving.server.parser])
+parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
help='The name that the model is served under.')
parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)
@@ -26,7 +26,6 @@
args, _ = parser.parse_known_args()
if __name__ == "__main__":
- transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,
- protocol=args.protocol)
+ transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)
kfserver = kfserving.KFServer()
kfserver.start(models=[transformer])
diff --git a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py
--- a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py
+++ b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py
@@ -14,7 +14,6 @@
import kfserving
from typing import List, Dict
-from kfserving.transformer import Transformer
from PIL import Image
import torchvision.transforms as transforms
import logging
@@ -39,7 +38,10 @@
return res.tolist()
-class ImageTransformer(Transformer):
+class ImageTransformer(kfserving.KFModel):
+ def __init__(self, name: str, predictor_host: str):
+ super().__init__(name)
+ self.predictor_host = predictor_host
def preprocess(self, inputs: Dict) -> Dict:
return {'instances': [image_transform(instance) for instance in inputs['instances']]}
| {"golden_diff": "diff --git a/docs/samples/transformer/image_transformer/image_transformer/__main__.py b/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n--- a/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n+++ b/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n@@ -18,7 +18,7 @@\n \n DEFAULT_MODEL_NAME = \"model\"\n \n-parser = argparse.ArgumentParser(parents=[kfserving.server.parser])\n+parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\n parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\n parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n@@ -26,7 +26,6 @@\n args, _ = parser.parse_known_args()\n \n if __name__ == \"__main__\":\n- transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n- protocol=args.protocol)\n+ transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\ndiff --git a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n--- a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n+++ b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n@@ -14,7 +14,6 @@\n \n import kfserving\n from typing import List, Dict\n-from kfserving.transformer import Transformer\n from PIL import Image\n import torchvision.transforms as transforms\n import logging\n@@ -39,7 +38,10 @@\n return res.tolist()\n \n \n-class ImageTransformer(Transformer):\n+class ImageTransformer(kfserving.KFModel):\n+ def __init__(self, name: str, predictor_host: str):\n+ super().__init__(name)\n+ self.predictor_host = predictor_host\n \n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n", "issue": "Sample of image_transformer does not work\n/kind bug\r\nSample under docs/samples/transformer/image_transformer is broken, there's python error in it.\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nIt's due to PR #492, kfmodel and kfserver is refactored now but the sample still inherit from transformer which does not exist now. Also some other symbols need be renamed.\r\n\r\n**What did you expect to happen:**\r\nSample still works\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nimport argparse\nfrom .image_transformer import ImageTransformer\n\nDEFAULT_MODEL_NAME = \"model\"\n\nparser = argparse.ArgumentParser(parents=[kfserving.server.parser])\nparser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\nparser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n protocol=args.protocol)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\n", "path": "docs/samples/transformer/image_transformer/image_transformer/__main__.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nfrom typing import List, Dict\nfrom kfserving.transformer import Transformer\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport logging\nimport io\nimport numpy as np\nimport base64\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ndef image_transform(instance):\n byte_array = base64.b64decode(instance['image_bytes']['b64'])\n image = Image.open(io.BytesIO(byte_array))\n a = np.asarray(image)\n im = Image.fromarray(a)\n res = transform(im)\n logging.info(res)\n return res.tolist()\n\n\nclass ImageTransformer(Transformer):\n\n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n\n def postprocess(self, inputs: List) -> List:\n return inputs\n", "path": "docs/samples/transformer/image_transformer/image_transformer/image_transformer.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nimport argparse\nfrom .image_transformer import ImageTransformer\n\nDEFAULT_MODEL_NAME = \"model\"\n\nparser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\nparser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\nparser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\n", "path": "docs/samples/transformer/image_transformer/image_transformer/__main__.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nfrom typing import List, Dict\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport logging\nimport io\nimport numpy as np\nimport base64\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ndef image_transform(instance):\n byte_array = base64.b64decode(instance['image_bytes']['b64'])\n image = Image.open(io.BytesIO(byte_array))\n a = np.asarray(image)\n im = Image.fromarray(a)\n res = transform(im)\n logging.info(res)\n return res.tolist()\n\n\nclass ImageTransformer(kfserving.KFModel):\n def __init__(self, name: str, predictor_host: str):\n super().__init__(name)\n self.predictor_host = predictor_host\n\n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n\n def postprocess(self, inputs: List) -> List:\n return inputs\n", "path": "docs/samples/transformer/image_transformer/image_transformer/image_transformer.py"}]} | 1,203 | 487 |
gh_patches_debug_53989 | rasdani/github-patches | git_diff | mkdocs__mkdocs-1329 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide length of TableOfContents
Currently, you can only iter over `TableOfContents`. I would like to know the length of it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/toc.py`
Content:
```
1 # coding: utf-8
2
3 """
4 Deals with generating the per-page table of contents.
5
6 For the sake of simplicity we use an existing markdown extension to generate
7 an HTML table of contents, and then parse that into the underlying data.
8
9 The steps we take to generate a table of contents are:
10
11 * Pre-process the markdown, injecting a [TOC] marker.
12 * Generate HTML from markdown.
13 * Post-process the HTML, spliting the content and the table of contents.
14 * Parse table of contents HTML into the underlying data structure.
15 """
16
17 from __future__ import unicode_literals
18
19 try: # pragma: no cover
20 from html.parser import HTMLParser # noqa
21 except ImportError: # pragma: no cover
22 from HTMLParser import HTMLParser # noqa
23
24
25 class TableOfContents(object):
26 """
27 Represents the table of contents for a given page.
28 """
29 def __init__(self, html):
30 self.items = _parse_html_table_of_contents(html)
31
32 def __iter__(self):
33 return iter(self.items)
34
35 def __str__(self):
36 return ''.join([str(item) for item in self])
37
38
39 class AnchorLink(object):
40 """
41 A single entry in the table of contents.
42 """
43 def __init__(self, title, url):
44 self.title, self.url = title, url
45 self.children = []
46
47 def __str__(self):
48 return self.indent_print()
49
50 def indent_print(self, depth=0):
51 indent = ' ' * depth
52 ret = '%s%s - %s\n' % (indent, self.title, self.url)
53 for item in self.children:
54 ret += item.indent_print(depth + 1)
55 return ret
56
57
58 class TOCParser(HTMLParser):
59
60 def __init__(self):
61 HTMLParser.__init__(self)
62 self.links = []
63
64 self.in_anchor = False
65 self.attrs = None
66 self.title = ''
67
68 # Prior to Python3.4 no convert_charrefs keyword existed.
69 # However, in Python3.5 the default was changed to True.
70 # We need the False behavior in all versions but can only
71 # set it if it exists.
72 if hasattr(self, 'convert_charrefs'):
73 self.convert_charrefs = False
74
75 def handle_starttag(self, tag, attrs):
76
77 if not self.in_anchor:
78 if tag == 'a':
79 self.in_anchor = True
80 self.attrs = dict(attrs)
81
82 def handle_endtag(self, tag):
83 if tag == 'a':
84 self.in_anchor = False
85
86 def handle_data(self, data):
87
88 if self.in_anchor:
89 self.title += data
90
91 def handle_charref(self, ref):
92 self.handle_entityref("#" + ref)
93
94 def handle_entityref(self, ref):
95 self.handle_data("&%s;" % ref)
96
97
98 def _parse_html_table_of_contents(html):
99 """
100 Given a table of contents string that has been automatically generated by
101 the markdown library, parse it into a tree of AnchorLink instances.
102
103 Returns a list of all the parent AnchorLink instances.
104 """
105 lines = html.splitlines()[2:-2]
106 parents = []
107 ret = []
108 for line in lines:
109 parser = TOCParser()
110 parser.feed(line)
111 if parser.title:
112 try:
113 href = parser.attrs['href']
114 except KeyError:
115 continue
116 title = parser.title
117 nav = AnchorLink(title, href)
118 # Add the item to its parent if required. If it is a topmost
119 # item then instead append it to our return value.
120 if parents:
121 parents[-1].children.append(nav)
122 else:
123 ret.append(nav)
124 # If this item has children, store it as the current parent
125 if line.endswith('<ul>'):
126 parents.append(nav)
127 elif line.startswith('</ul>'):
128 if parents:
129 parents.pop()
130
131 # For the table of contents, always mark the first element as active
132 if ret:
133 ret[0].active = True
134
135 return ret
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/toc.py b/mkdocs/toc.py
--- a/mkdocs/toc.py
+++ b/mkdocs/toc.py
@@ -32,6 +32,9 @@
def __iter__(self):
return iter(self.items)
+ def __len__(self):
+ return len(self.items)
+
def __str__(self):
return ''.join([str(item) for item in self])
| {"golden_diff": "diff --git a/mkdocs/toc.py b/mkdocs/toc.py\n--- a/mkdocs/toc.py\n+++ b/mkdocs/toc.py\n@@ -32,6 +32,9 @@\n def __iter__(self):\n return iter(self.items)\n \n+ def __len__(self):\n+ return len(self.items)\n+\n def __str__(self):\n return ''.join([str(item) for item in self])\n", "issue": "Provide length of TableOfContents\nCurrently, you can only iter over `TableOfContents`. I would like to know the length of it.\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the per-page table of contents.\n\nFor the sake of simplicity we use an existing markdown extension to generate\nan HTML table of contents, and then parse that into the underlying data.\n\nThe steps we take to generate a table of contents are:\n\n* Pre-process the markdown, injecting a [TOC] marker.\n* Generate HTML from markdown.\n* Post-process the HTML, spliting the content and the table of contents.\n* Parse table of contents HTML into the underlying data structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\ntry: # pragma: no cover\n from html.parser import HTMLParser # noqa\nexcept ImportError: # pragma: no cover\n from HTMLParser import HTMLParser # noqa\n\n\nclass TableOfContents(object):\n \"\"\"\n Represents the table of contents for a given page.\n \"\"\"\n def __init__(self, html):\n self.items = _parse_html_table_of_contents(html)\n\n def __iter__(self):\n return iter(self.items)\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n\nclass AnchorLink(object):\n \"\"\"\n A single entry in the table of contents.\n \"\"\"\n def __init__(self, title, url):\n self.title, self.url = title, url\n self.children = []\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n ret = '%s%s - %s\\n' % (indent, self.title, self.url)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n\nclass TOCParser(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.links = []\n\n self.in_anchor = False\n self.attrs = None\n self.title = ''\n\n # Prior to Python3.4 no convert_charrefs keyword existed.\n # However, in Python3.5 the default was changed to True.\n # We need the False behavior in all versions but can only\n # set it if it exists.\n if hasattr(self, 'convert_charrefs'):\n self.convert_charrefs = False\n\n def handle_starttag(self, tag, attrs):\n\n if not self.in_anchor:\n if tag == 'a':\n self.in_anchor = True\n self.attrs = dict(attrs)\n\n def handle_endtag(self, tag):\n if tag == 'a':\n self.in_anchor = False\n\n def handle_data(self, data):\n\n if self.in_anchor:\n self.title += data\n\n def handle_charref(self, ref):\n self.handle_entityref(\"#\" + ref)\n\n def handle_entityref(self, ref):\n self.handle_data(\"&%s;\" % ref)\n\n\ndef _parse_html_table_of_contents(html):\n \"\"\"\n Given a table of contents string that has been automatically generated by\n the markdown library, parse it into a tree of AnchorLink instances.\n\n Returns a list of all the parent AnchorLink instances.\n \"\"\"\n lines = html.splitlines()[2:-2]\n parents = []\n ret = []\n for line in lines:\n parser = TOCParser()\n parser.feed(line)\n if parser.title:\n try:\n href = parser.attrs['href']\n except KeyError:\n continue\n title = parser.title\n nav = AnchorLink(title, href)\n # Add the item to its parent if required. If it is a topmost\n # item then instead append it to our return value.\n if parents:\n parents[-1].children.append(nav)\n else:\n ret.append(nav)\n # If this item has children, store it as the current parent\n if line.endswith('<ul>'):\n parents.append(nav)\n elif line.startswith('</ul>'):\n if parents:\n parents.pop()\n\n # For the table of contents, always mark the first element as active\n if ret:\n ret[0].active = True\n\n return ret\n", "path": "mkdocs/toc.py"}], "after_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the per-page table of contents.\n\nFor the sake of simplicity we use an existing markdown extension to generate\nan HTML table of contents, and then parse that into the underlying data.\n\nThe steps we take to generate a table of contents are:\n\n* Pre-process the markdown, injecting a [TOC] marker.\n* Generate HTML from markdown.\n* Post-process the HTML, spliting the content and the table of contents.\n* Parse table of contents HTML into the underlying data structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\ntry: # pragma: no cover\n from html.parser import HTMLParser # noqa\nexcept ImportError: # pragma: no cover\n from HTMLParser import HTMLParser # noqa\n\n\nclass TableOfContents(object):\n \"\"\"\n Represents the table of contents for a given page.\n \"\"\"\n def __init__(self, html):\n self.items = _parse_html_table_of_contents(html)\n\n def __iter__(self):\n return iter(self.items)\n\n def __len__(self):\n return len(self.items)\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n\nclass AnchorLink(object):\n \"\"\"\n A single entry in the table of contents.\n \"\"\"\n def __init__(self, title, url):\n self.title, self.url = title, url\n self.children = []\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n ret = '%s%s - %s\\n' % (indent, self.title, self.url)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n\nclass TOCParser(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.links = []\n\n self.in_anchor = False\n self.attrs = None\n self.title = ''\n\n # Prior to Python3.4 no convert_charrefs keyword existed.\n # However, in Python3.5 the default was changed to True.\n # We need the False behavior in all versions but can only\n # set it if it exists.\n if hasattr(self, 'convert_charrefs'):\n self.convert_charrefs = False\n\n def handle_starttag(self, tag, attrs):\n\n if not self.in_anchor:\n if tag == 'a':\n self.in_anchor = True\n self.attrs = dict(attrs)\n\n def handle_endtag(self, tag):\n if tag == 'a':\n self.in_anchor = False\n\n def handle_data(self, data):\n\n if self.in_anchor:\n self.title += data\n\n def handle_charref(self, ref):\n self.handle_entityref(\"#\" + ref)\n\n def handle_entityref(self, ref):\n self.handle_data(\"&%s;\" % ref)\n\n\ndef _parse_html_table_of_contents(html):\n \"\"\"\n Given a table of contents string that has been automatically generated by\n the markdown library, parse it into a tree of AnchorLink instances.\n\n Returns a list of all the parent AnchorLink instances.\n \"\"\"\n lines = html.splitlines()[2:-2]\n parents = []\n ret = []\n for line in lines:\n parser = TOCParser()\n parser.feed(line)\n if parser.title:\n try:\n href = parser.attrs['href']\n except KeyError:\n continue\n title = parser.title\n nav = AnchorLink(title, href)\n # Add the item to its parent if required. If it is a topmost\n # item then instead append it to our return value.\n if parents:\n parents[-1].children.append(nav)\n else:\n ret.append(nav)\n # If this item has children, store it as the current parent\n if line.endswith('<ul>'):\n parents.append(nav)\n elif line.startswith('</ul>'):\n if parents:\n parents.pop()\n\n # For the table of contents, always mark the first element as active\n if ret:\n ret[0].active = True\n\n return ret\n", "path": "mkdocs/toc.py"}]} | 1,468 | 98 |
gh_patches_debug_47845 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
About page requires login
**Describe the bug**
Accessing the "About this server" link (https://bookwyrm.social/about) redirects to login
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://bookwyrm.social/about
2. redirected to login instead of seeing an about page (the URL is login/?next=/about)
**Expected behavior**
Access to information about this site / server
**Desktop (please complete the following information):**
- OS: linux
- Browser firefox
- Version 85 (developer edition)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/landing.py`
Content:
```
1 ''' non-interactive pages '''
2 from django.contrib.auth.decorators import login_required
3 from django.core.paginator import Paginator
4 from django.db.models import Avg, Max
5 from django.template.response import TemplateResponse
6 from django.utils import timezone
7 from django.utils.decorators import method_decorator
8 from django.views import View
9
10 from bookwyrm import forms, models
11 from bookwyrm.settings import PAGE_LENGTH
12 from .helpers import get_activity_feed
13
14
15 # pylint: disable= no-self-use
16 @method_decorator(login_required, name='dispatch')
17 class About(View):
18 ''' create invites '''
19 def get(self, request):
20 ''' more information about the instance '''
21 data = {
22 'title': 'About',
23 }
24 return TemplateResponse(request, 'about.html', data)
25
26 class Home(View):
27 ''' discover page or home feed depending on auth '''
28 def get(self, request):
29 ''' this is the same as the feed on the home tab '''
30 if request.user.is_authenticated:
31 feed_view = Feed.as_view()
32 return feed_view(request, 'home')
33 discover_view = Discover.as_view()
34 return discover_view(request)
35
36 class Discover(View):
37 ''' preview of recently reviewed books '''
38 def get(self, request):
39 ''' tiled book activity page '''
40 books = models.Edition.objects.filter(
41 review__published_date__isnull=False,
42 review__user__local=True,
43 review__privacy__in=['public', 'unlisted'],
44 ).exclude(
45 cover__exact=''
46 ).annotate(
47 Max('review__published_date')
48 ).order_by('-review__published_date__max')[:6]
49
50 ratings = {}
51 for book in books:
52 reviews = models.Review.objects.filter(
53 book__in=book.parent_work.editions.all()
54 )
55 reviews = get_activity_feed(
56 request.user, ['public', 'unlisted'], queryset=reviews)
57 ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']
58 data = {
59 'title': 'Discover',
60 'register_form': forms.RegisterForm(),
61 'books': list(set(books)),
62 'ratings': ratings
63 }
64 return TemplateResponse(request, 'discover.html', data)
65
66
67 @method_decorator(login_required, name='dispatch')
68 class Feed(View):
69 ''' activity stream '''
70 def get(self, request, tab):
71 ''' user's homepage with activity feed '''
72 try:
73 page = int(request.GET.get('page', 1))
74 except ValueError:
75 page = 1
76
77 suggested_books = get_suggested_books(request.user)
78
79 if tab == 'home':
80 activities = get_activity_feed(
81 request.user, ['public', 'unlisted', 'followers'],
82 following_only=True)
83 elif tab == 'local':
84 activities = get_activity_feed(
85 request.user, ['public', 'followers'], local_only=True)
86 else:
87 activities = get_activity_feed(
88 request.user, ['public', 'followers'])
89 paginated = Paginator(activities, PAGE_LENGTH)
90
91 goal = models.AnnualGoal.objects.filter(
92 user=request.user, year=timezone.now().year
93 ).first()
94 data = {
95 'title': 'Updates Feed',
96 'user': request.user,
97 'suggested_books': suggested_books,
98 'activities': paginated.page(page),
99 'tab': tab,
100 'goal': goal,
101 'goal_form': forms.GoalForm(),
102 }
103 return TemplateResponse(request, 'feed.html', data)
104
105
106 def get_suggested_books(user, max_books=5):
107 ''' helper to get a user's recent books '''
108 book_count = 0
109 preset_shelves = [
110 ('reading', max_books), ('read', 2), ('to-read', max_books)
111 ]
112 suggested_books = []
113 for (preset, shelf_max) in preset_shelves:
114 limit = shelf_max if shelf_max < (max_books - book_count) \
115 else max_books - book_count
116 shelf = user.shelf_set.get(identifier=preset)
117
118 shelf_books = shelf.shelfbook_set.order_by(
119 '-updated_date'
120 ).all()[:limit]
121 if not shelf_books:
122 continue
123 shelf_preview = {
124 'name': shelf.name,
125 'books': [s.book for s in shelf_books]
126 }
127 suggested_books.append(shelf_preview)
128 book_count += len(shelf_preview['books'])
129 return suggested_books
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py
--- a/bookwyrm/views/landing.py
+++ b/bookwyrm/views/landing.py
@@ -13,7 +13,6 @@
# pylint: disable= no-self-use
-@method_decorator(login_required, name='dispatch')
class About(View):
''' create invites '''
def get(self, request):
| {"golden_diff": "diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py\n--- a/bookwyrm/views/landing.py\n+++ b/bookwyrm/views/landing.py\n@@ -13,7 +13,6 @@\n \n \n # pylint: disable= no-self-use\n-@method_decorator(login_required, name='dispatch')\n class About(View):\n ''' create invites '''\n def get(self, request):\n", "issue": "About page requires login\n**Describe the bug**\r\nAccessing the \"About this server\" link (https://bookwyrm.social/about) redirects to login\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://bookwyrm.social/about\r\n2. redirected to login instead of seeing an about page (the URL is login/?next=/about)\r\n\r\n**Expected behavior**\r\nAccess to information about this site / server\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux\r\n - Browser firefox\r\n - Version 85 (developer edition)\r\n\n", "before_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py"}], "after_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py"}]} | 1,591 | 89 |
gh_patches_debug_16896 | rasdani/github-patches | git_diff | webkom__lego-1069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong penalty count in email
The counter in the penalty email is still wrong:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lego/apps/feed/feed_handlers/penalty_handler.py`
Content:
```
1 from lego.apps.feed.activities import Activity
2 from lego.apps.feed.feed_handlers.base_handler import BaseHandler
3 from lego.apps.feed.feed_manager import feed_manager
4 from lego.apps.feed.feeds.notification_feed import NotificationFeed
5 from lego.apps.feed.registry import register_handler
6 from lego.apps.feed.verbs import PenaltyVerb
7 from lego.apps.users.models import Penalty
8 from lego.apps.users.notifications import PenaltyNotification
9
10
11 class PenaltyHandler(BaseHandler):
12 model = Penalty
13 manager = feed_manager
14
15 def get_activity(self, penalty):
16 return Activity(
17 actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,
18 time=penalty.created_at, extra_context={
19 'reason': penalty.reason,
20 'weight': penalty.weight,
21 'total': penalty.user.number_of_penalties()
22 }
23 )
24
25 def handle_create(self, penalty):
26 activity = self.get_activity(penalty)
27 self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])
28
29 # Send Notification
30 notification = PenaltyNotification(penalty.user, penalty=penalty)
31 notification.notify()
32
33 def handle_update(self, penalty):
34 activity = self.get_activity(penalty)
35 self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])
36
37 def handle_delete(self, penalty):
38 activity = self.get_activity(penalty)
39 self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])
40
41
42 register_handler(PenaltyHandler)
43
```
Path: `lego/apps/users/notifications.py`
Content:
```
1 from lego.apps.notifications.constants import PENALTY_CREATION
2 from lego.apps.notifications.notification import Notification
3
4
5 class PenaltyNotification(Notification):
6
7 name = PENALTY_CREATION
8
9 def generate_mail(self):
10 penalty = self.kwargs['penalty']
11
12 return self._delay_mail(
13 to_email=self.user.email,
14 context={
15 'name': self.user.full_name,
16 'weight': penalty.weight,
17 'event': penalty.source_event.title,
18 'reason': penalty.reason,
19 'total': self.user.number_of_penalties()
20 },
21 subject=f'Du har fått en ny prikk',
22 plain_template='users/email/penalty.txt',
23 html_template='users/email/penalty.html',
24 )
25
26 def generate_push(self):
27 penalty = self.kwargs['penalty']
28
29 return self._delay_push(
30 template='users/push/penalty.txt', context={
31 'weight': penalty.weight,
32 'event': penalty.source_event.title,
33 }, instance=penalty
34 )
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py
--- a/lego/apps/feed/feed_handlers/penalty_handler.py
+++ b/lego/apps/feed/feed_handlers/penalty_handler.py
@@ -18,7 +18,6 @@
time=penalty.created_at, extra_context={
'reason': penalty.reason,
'weight': penalty.weight,
- 'total': penalty.user.number_of_penalties()
}
)
diff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py
--- a/lego/apps/users/notifications.py
+++ b/lego/apps/users/notifications.py
@@ -16,7 +16,6 @@
'weight': penalty.weight,
'event': penalty.source_event.title,
'reason': penalty.reason,
- 'total': self.user.number_of_penalties()
},
subject=f'Du har fått en ny prikk',
plain_template='users/email/penalty.txt',
| {"golden_diff": "diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py\n--- a/lego/apps/feed/feed_handlers/penalty_handler.py\n+++ b/lego/apps/feed/feed_handlers/penalty_handler.py\n@@ -18,7 +18,6 @@\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n- 'total': penalty.user.number_of_penalties()\n }\n )\n \ndiff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py\n--- a/lego/apps/users/notifications.py\n+++ b/lego/apps/users/notifications.py\n@@ -16,7 +16,6 @@\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n- 'total': self.user.number_of_penalties()\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n", "issue": "Wrong penalty count in email\nThe counter in the penalty email is still wrong:\r\n\r\n\r\n\n", "before_files": [{"content": "from lego.apps.feed.activities import Activity\nfrom lego.apps.feed.feed_handlers.base_handler import BaseHandler\nfrom lego.apps.feed.feed_manager import feed_manager\nfrom lego.apps.feed.feeds.notification_feed import NotificationFeed\nfrom lego.apps.feed.registry import register_handler\nfrom lego.apps.feed.verbs import PenaltyVerb\nfrom lego.apps.users.models import Penalty\nfrom lego.apps.users.notifications import PenaltyNotification\n\n\nclass PenaltyHandler(BaseHandler):\n model = Penalty\n manager = feed_manager\n\n def get_activity(self, penalty):\n return Activity(\n actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n 'total': penalty.user.number_of_penalties()\n }\n )\n\n def handle_create(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n # Send Notification\n notification = PenaltyNotification(penalty.user, penalty=penalty)\n notification.notify()\n\n def handle_update(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n def handle_delete(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n\nregister_handler(PenaltyHandler)\n", "path": "lego/apps/feed/feed_handlers/penalty_handler.py"}, {"content": "from lego.apps.notifications.constants import PENALTY_CREATION\nfrom lego.apps.notifications.notification import Notification\n\n\nclass PenaltyNotification(Notification):\n\n name = PENALTY_CREATION\n\n def generate_mail(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n 'name': self.user.full_name,\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n 'total': self.user.number_of_penalties()\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n html_template='users/email/penalty.html',\n )\n\n def generate_push(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_push(\n template='users/push/penalty.txt', context={\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n }, instance=penalty\n )\n", "path": "lego/apps/users/notifications.py"}], "after_files": [{"content": "from lego.apps.feed.activities import Activity\nfrom lego.apps.feed.feed_handlers.base_handler import BaseHandler\nfrom lego.apps.feed.feed_manager import feed_manager\nfrom lego.apps.feed.feeds.notification_feed import NotificationFeed\nfrom lego.apps.feed.registry import register_handler\nfrom lego.apps.feed.verbs import PenaltyVerb\nfrom lego.apps.users.models import Penalty\nfrom lego.apps.users.notifications import PenaltyNotification\n\n\nclass PenaltyHandler(BaseHandler):\n model = Penalty\n manager = feed_manager\n\n def get_activity(self, penalty):\n return Activity(\n actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n }\n )\n\n def handle_create(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n # Send Notification\n notification = PenaltyNotification(penalty.user, penalty=penalty)\n notification.notify()\n\n def handle_update(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n def handle_delete(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n\nregister_handler(PenaltyHandler)\n", "path": "lego/apps/feed/feed_handlers/penalty_handler.py"}, {"content": "from lego.apps.notifications.constants import PENALTY_CREATION\nfrom lego.apps.notifications.notification import Notification\n\n\nclass PenaltyNotification(Notification):\n\n name = PENALTY_CREATION\n\n def generate_mail(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n 'name': self.user.full_name,\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n html_template='users/email/penalty.html',\n )\n\n def generate_push(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_push(\n template='users/push/penalty.txt', context={\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n }, instance=penalty\n )\n", "path": "lego/apps/users/notifications.py"}]} | 1,064 | 231 |
gh_patches_debug_24988 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1681 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleting a Careeropportunity in the dashboard does not actually delete
When trying to delete a career opportunity in the dashboard, it does not actually delete it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/careeropportunity/dashboard/views.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2 from django.contrib import messages
3 from django.contrib.auth.decorators import login_required
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import get_object_or_404, redirect, render
6 from django.utils import timezone
7 from guardian.decorators import permission_required
8
9 from apps.careeropportunity.forms import AddCareerOpportunityForm
10 from apps.careeropportunity.models import CareerOpportunity
11 from apps.dashboard.tools import get_base_context, has_access
12
13
14 @login_required
15 @permission_required('careeropportunity.view_careeropportunity', return_403=True)
16 def index(request):
17
18 if not has_access(request):
19 raise PermissionDenied
20
21 context = get_base_context(request)
22
23 # "cops" is short for "careeropportunities" which is a fucking long word
24 # "cop" is short for "careeropportunity" which also is a fucking long word
25 cops = CareerOpportunity.objects.all()
26 context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
27 context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
28
29 return render(request, 'careeropportunity/dashboard/index.html', context)
30
31
32 @login_required
33 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
34 def detail(request, opportunity_id=None):
35
36 if not has_access(request):
37 raise PermissionDenied
38
39 context = get_base_context(request)
40 cop = None
41 if opportunity_id:
42 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
43 context['cop'] = cop
44 context['form'] = AddCareerOpportunityForm(instance=cop)
45 else:
46 context['form'] = AddCareerOpportunityForm()
47
48 if request.method == 'POST':
49 if cop:
50 form = AddCareerOpportunityForm(data=request.POST, instance=cop)
51 else:
52 form = AddCareerOpportunityForm(data=request.POST)
53
54 if form.is_valid():
55 form.save()
56 messages.success(request, 'La til ny karrieremulighet')
57 return redirect(index)
58 else:
59 context['form'] = form
60 messages.error(request,
61 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for å se hva som gikk galt.')
62
63 return render(request, 'careeropportunity/dashboard/detail.html', context)
64
65
66 @login_required
67 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
68 def delete(request, opportunity_id=None):
69 if not has_access(request):
70 raise PermissionDenied
71
72 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
73 cop.delete()
74 messages.success(request, 'Slettet karrieremuligheten')
75 return redirect(index)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py
--- a/apps/careeropportunity/dashboard/views.py
+++ b/apps/careeropportunity/dashboard/views.py
@@ -1,4 +1,6 @@
# -*- encoding: utf-8 -*-
+import logging
+
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
@@ -32,6 +34,8 @@
@login_required
@permission_required('careeropportunity.change_careeropportunity', return_403=True)
def detail(request, opportunity_id=None):
+ logger = logging.getLogger(__name__)
+ logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))
if not has_access(request):
raise PermissionDenied
@@ -66,6 +70,8 @@
@login_required
@permission_required('careeropportunity.change_careeropportunity', return_403=True)
def delete(request, opportunity_id=None):
+ logger = logging.getLogger(__name__)
+ logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))
if not has_access(request):
raise PermissionDenied
| {"golden_diff": "diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py\n--- a/apps/careeropportunity/dashboard/views.py\n+++ b/apps/careeropportunity/dashboard/views.py\n@@ -1,4 +1,6 @@\n # -*- encoding: utf-8 -*-\n+import logging\n+\n from django.contrib import messages\n from django.contrib.auth.decorators import login_required\n from django.core.exceptions import PermissionDenied\n@@ -32,6 +34,8 @@\n @login_required\n @permission_required('careeropportunity.change_careeropportunity', return_403=True)\n def detail(request, opportunity_id=None):\n+ logger = logging.getLogger(__name__)\n+ logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n \n if not has_access(request):\n raise PermissionDenied\n@@ -66,6 +70,8 @@\n @login_required\n @permission_required('careeropportunity.change_careeropportunity', return_403=True)\n def delete(request, opportunity_id=None):\n+ logger = logging.getLogger(__name__)\n+ logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n", "issue": "Deleting a Careeropportunity in the dashboard does not actually delete\nWhen trying to delete a career opportunity in the dashboard, it does not actually delete it.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}]} | 1,060 | 269 |
gh_patches_debug_5191 | rasdani/github-patches | git_diff | nf-core__tools-1333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Space missing in tip message for --fix files_unchanged
<!--
# nf-core/tools bug report
Hi there!
Thanks for telling us about a problem with the nf-core/tools package.
Please delete this text and anything that's not relevant from the template below:
-->
## Description of the bug
a space is missing before `--fix files_unchanged`
```
Tip: Some of these linting errors can automatically be resolved with the
following command:
nf-core lint --dir /home/runner/work/rnavar/rnavar--fix files_unchanged
```
## Steps to reproduce
https://github.com/nf-core/rnavar/runs/4317868056?check_suite_focus=true#step:6:100
## Expected behaviour
<!-- A clear and concise description of what you expected to happen. -->
## System
- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->
- Executor: <!-- [e.g. slurm, local, awsbatch...] -->
- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->
- Version of nf-core/tools: <!-- [e.g. 1.1, 1.5, 1.8.2...] -->
- Python version: <!-- [e.g. 3.7, 3.8...] -->
## Nextflow Installation
- Version: <!-- [e.g. 19.10.0] -->
## Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/lint_utils.py`
Content:
```
1 import rich
2 from rich.console import Console
3 from rich.table import Table
4 import logging
5
6 import nf_core.utils
7
8 log = logging.getLogger(__name__)
9
10 # Create a console used by all lint tests
11 console = Console(force_terminal=nf_core.utils.rich_force_colors())
12
13
14 def print_joint_summary(lint_obj, module_lint_obj):
15 """Print a joint summary of the general pipe lint tests and the module lint tests"""
16 nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)
17 nbr_ignored = len(lint_obj.ignored)
18 nbr_fixed = len(lint_obj.fixed)
19 nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)
20 nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)
21
22 def _s(some_length):
23 return "" if some_length == 1 else "s"
24
25 summary_colour = "red" if nbr_failed > 0 else "green"
26 table = Table(box=rich.box.ROUNDED, style=summary_colour)
27 table.add_column(f"LINT RESULTS SUMMARY".format(nbr_passed), no_wrap=True)
28 table.add_row(r"[green][✔] {:>3} Test{} Passed".format(nbr_passed, _s(nbr_passed)))
29 if nbr_fixed:
30 table.add_row(r"[bright blue][?] {:>3} Test{} Fixed".format(nbr_fixed, _s(nbr_fixed)))
31 table.add_row(r"[grey58][?] {:>3} Test{} Ignored".format(nbr_ignored, _s(nbr_ignored)))
32 table.add_row(r"[yellow][!] {:>3} Test Warning{}".format(nbr_warned, _s(nbr_warned)))
33 table.add_row(r"[red][✗] {:>3} Test{} Failed".format(nbr_failed, _s(nbr_failed)))
34 console.print(table)
35
36
37 def print_fixes(lint_obj, module_lint_obj):
38 """Prints available and applied fixes"""
39
40 if len(lint_obj.could_fix):
41 fix_cmd = "nf-core lint {}--fix {}".format(
42 "" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
43 )
44 console.print(
45 f"\nTip: Some of these linting errors can automatically be resolved with the following command:\n\n[blue] {fix_cmd}\n"
46 )
47 if len(lint_obj.fix):
48 console.print(
49 "Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'."
50 )
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py
--- a/nf_core/lint_utils.py
+++ b/nf_core/lint_utils.py
@@ -38,7 +38,7 @@
"""Prints available and applied fixes"""
if len(lint_obj.could_fix):
- fix_cmd = "nf-core lint {}--fix {}".format(
+ fix_cmd = "nf-core lint {} --fix {}".format(
"" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
)
console.print(
| {"golden_diff": "diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py\n--- a/nf_core/lint_utils.py\n+++ b/nf_core/lint_utils.py\n@@ -38,7 +38,7 @@\n \"\"\"Prints available and applied fixes\"\"\"\n \n if len(lint_obj.could_fix):\n- fix_cmd = \"nf-core lint {}--fix {}\".format(\n+ fix_cmd = \"nf-core lint {} --fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n", "issue": "Space missing in tip message for --fix files_unchanged\n<!--\r\n# nf-core/tools bug report\r\n\r\nHi there!\r\n\r\nThanks for telling us about a problem with the nf-core/tools package.\r\nPlease delete this text and anything that's not relevant from the template below:\r\n-->\r\n\r\n## Description of the bug\r\n\r\na space is missing before `--fix files_unchanged`\r\n\r\n```\r\nTip: Some of these linting errors can automatically be resolved with the \r\nfollowing command:\r\n\r\n nf-core lint --dir /home/runner/work/rnavar/rnavar--fix files_unchanged\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nhttps://github.com/nf-core/rnavar/runs/4317868056?check_suite_focus=true#step:6:100\r\n\r\n## Expected behaviour\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## System\r\n\r\n- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->\r\n- Executor: <!-- [e.g. slurm, local, awsbatch...] -->\r\n- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->\r\n- Version of nf-core/tools: <!-- [e.g. 1.1, 1.5, 1.8.2...] -->\r\n- Python version: <!-- [e.g. 3.7, 3.8...] -->\r\n\r\n## Nextflow Installation\r\n\r\n- Version: <!-- [e.g. 19.10.0] -->\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import rich\nfrom rich.console import Console\nfrom rich.table import Table\nimport logging\n\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n# Create a console used by all lint tests\nconsole = Console(force_terminal=nf_core.utils.rich_force_colors())\n\n\ndef print_joint_summary(lint_obj, module_lint_obj):\n \"\"\"Print a joint summary of the general pipe lint tests and the module lint tests\"\"\"\n nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)\n nbr_ignored = len(lint_obj.ignored)\n nbr_fixed = len(lint_obj.fixed)\n nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)\n nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)\n\n def _s(some_length):\n return \"\" if some_length == 1 else \"s\"\n\n summary_colour = \"red\" if nbr_failed > 0 else \"green\"\n table = Table(box=rich.box.ROUNDED, style=summary_colour)\n table.add_column(f\"LINT RESULTS SUMMARY\".format(nbr_passed), no_wrap=True)\n table.add_row(r\"[green][\u2714] {:>3} Test{} Passed\".format(nbr_passed, _s(nbr_passed)))\n if nbr_fixed:\n table.add_row(r\"[bright blue][?] {:>3} Test{} Fixed\".format(nbr_fixed, _s(nbr_fixed)))\n table.add_row(r\"[grey58][?] {:>3} Test{} Ignored\".format(nbr_ignored, _s(nbr_ignored)))\n table.add_row(r\"[yellow][!] {:>3} Test Warning{}\".format(nbr_warned, _s(nbr_warned)))\n table.add_row(r\"[red][\u2717] {:>3} Test{} Failed\".format(nbr_failed, _s(nbr_failed)))\n console.print(table)\n\n\ndef print_fixes(lint_obj, module_lint_obj):\n \"\"\"Prints available and applied fixes\"\"\"\n\n if len(lint_obj.could_fix):\n fix_cmd = \"nf-core lint {}--fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n if len(lint_obj.fix):\n console.print(\n \"Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'.\"\n )\n", "path": "nf_core/lint_utils.py"}], "after_files": [{"content": "import rich\nfrom rich.console import Console\nfrom rich.table import Table\nimport logging\n\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n# Create a console used by all lint tests\nconsole = Console(force_terminal=nf_core.utils.rich_force_colors())\n\n\ndef print_joint_summary(lint_obj, module_lint_obj):\n \"\"\"Print a joint summary of the general pipe lint tests and the module lint tests\"\"\"\n nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)\n nbr_ignored = len(lint_obj.ignored)\n nbr_fixed = len(lint_obj.fixed)\n nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)\n nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)\n\n def _s(some_length):\n return \"\" if some_length == 1 else \"s\"\n\n summary_colour = \"red\" if nbr_failed > 0 else \"green\"\n table = Table(box=rich.box.ROUNDED, style=summary_colour)\n table.add_column(f\"LINT RESULTS SUMMARY\".format(nbr_passed), no_wrap=True)\n table.add_row(r\"[green][\u2714] {:>3} Test{} Passed\".format(nbr_passed, _s(nbr_passed)))\n if nbr_fixed:\n table.add_row(r\"[bright blue][?] {:>3} Test{} Fixed\".format(nbr_fixed, _s(nbr_fixed)))\n table.add_row(r\"[grey58][?] {:>3} Test{} Ignored\".format(nbr_ignored, _s(nbr_ignored)))\n table.add_row(r\"[yellow][!] {:>3} Test Warning{}\".format(nbr_warned, _s(nbr_warned)))\n table.add_row(r\"[red][\u2717] {:>3} Test{} Failed\".format(nbr_failed, _s(nbr_failed)))\n console.print(table)\n\n\ndef print_fixes(lint_obj, module_lint_obj):\n \"\"\"Prints available and applied fixes\"\"\"\n\n if len(lint_obj.could_fix):\n fix_cmd = \"nf-core lint {} --fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n if len(lint_obj.fix):\n console.print(\n \"Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'.\"\n )\n", "path": "nf_core/lint_utils.py"}]} | 1,252 | 141 |
gh_patches_debug_15169 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pending import csv lines displayed under "Successful" title until tried
Importing a CSV into Bookwyrm shows titles being "successfully imported" but they do not show up in the library.
Here's screenshots of the import results, neither the successful nor the failed imports seem to show up:


[Attached is the file which I attempted to import.](https://github.com/bookwyrm-social/bookwyrm/files/6523421/Tomat0.s.Library.csv)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/import_data.py`
Content:
```
1 """ import books from another app """
2 from io import TextIOWrapper
3
4 from django.contrib.auth.decorators import login_required
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.utils.translation import gettext_lazy as _
11 from django.views import View
12
13 from bookwyrm import forms, models
14 from bookwyrm.importers import (
15 Importer,
16 LibrarythingImporter,
17 GoodreadsImporter,
18 StorygraphImporter,
19 )
20 from bookwyrm.tasks import app
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Import(View):
25 """import view"""
26
27 def get(self, request):
28 """load import page"""
29 return TemplateResponse(
30 request,
31 "import.html",
32 {
33 "import_form": forms.ImportForm(),
34 "jobs": models.ImportJob.objects.filter(user=request.user).order_by(
35 "-created_date"
36 ),
37 },
38 )
39
40 def post(self, request):
41 """ingest a goodreads csv"""
42 form = forms.ImportForm(request.POST, request.FILES)
43 if form.is_valid():
44 include_reviews = request.POST.get("include_reviews") == "on"
45 privacy = request.POST.get("privacy")
46 source = request.POST.get("source")
47
48 importer = None
49 if source == "LibraryThing":
50 importer = LibrarythingImporter()
51 elif source == "Storygraph":
52 importer = StorygraphImporter()
53 else:
54 # Default : GoodReads
55 importer = GoodreadsImporter()
56
57 try:
58 job = importer.create_job(
59 request.user,
60 TextIOWrapper(
61 request.FILES["csv_file"], encoding=importer.encoding
62 ),
63 include_reviews,
64 privacy,
65 )
66 except (UnicodeDecodeError, ValueError, KeyError):
67 return HttpResponseBadRequest(_("Not a valid csv file"))
68
69 importer.start_import(job)
70
71 return redirect("/import/%d" % job.id)
72 return HttpResponseBadRequest()
73
74
75 @method_decorator(login_required, name="dispatch")
76 class ImportStatus(View):
77 """status of an existing import"""
78
79 def get(self, request, job_id):
80 """status of an import job"""
81 job = models.ImportJob.objects.get(id=job_id)
82 if job.user != request.user:
83 raise PermissionDenied
84 try:
85 task = app.AsyncResult(job.task_id)
86 except ValueError:
87 task = None
88 items = job.items.order_by("index").all()
89 failed_items = [i for i in items if i.fail_reason]
90 items = [i for i in items if not i.fail_reason]
91 return TemplateResponse(
92 request,
93 "import_status.html",
94 {"job": job, "items": items, "failed_items": failed_items, "task": task},
95 )
96
97 def post(self, request, job_id):
98 """retry lines from an import"""
99 job = get_object_or_404(models.ImportJob, id=job_id)
100 items = []
101 for item in request.POST.getlist("import_item"):
102 items.append(get_object_or_404(models.ImportItem, id=item))
103
104 importer = Importer()
105 job = importer.create_retry_job(
106 request.user,
107 job,
108 items,
109 )
110 importer.start_import(job)
111 return redirect("/import/%d" % job.id)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py
--- a/bookwyrm/views/import_data.py
+++ b/bookwyrm/views/import_data.py
@@ -78,13 +78,15 @@
def get(self, request, job_id):
"""status of an import job"""
- job = models.ImportJob.objects.get(id=job_id)
+ job = get_object_or_404(models.ImportJob, id=job_id)
if job.user != request.user:
raise PermissionDenied
+
try:
task = app.AsyncResult(job.task_id)
except ValueError:
task = None
+
items = job.items.order_by("index").all()
failed_items = [i for i in items if i.fail_reason]
items = [i for i in items if not i.fail_reason]
| {"golden_diff": "diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py\n--- a/bookwyrm/views/import_data.py\n+++ b/bookwyrm/views/import_data.py\n@@ -78,13 +78,15 @@\n \n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n- job = models.ImportJob.objects.get(id=job_id)\n+ job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n+\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n+\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n", "issue": "Pending import csv lines displayed under \"Successful\" title until tried\nImporting a CSV into Bookwyrm shows titles being \"successfully imported\" but they do not show up in the library.\r\n\r\nHere's screenshots of the import results, neither the successful nor the failed imports seem to show up:\r\n\r\n\r\n\r\n\r\n[Attached is the file which I attempted to import.](https://github.com/bookwyrm-social/bookwyrm/files/6523421/Tomat0.s.Library.csv)\r\n\r\n\n", "before_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = models.ImportJob.objects.get(id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}], "after_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}]} | 1,347 | 186 |
gh_patches_debug_23873 | rasdani/github-patches | git_diff | cloudtools__troposphere-186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid json generated with SecurityGroupIngress
Ref: https://s3-us-west-2.amazonaws.com/cloudformation-templates-us-west-2/EC2InstanceWithSecurityGroupSample.template
Invalid format generated:
``` json
"SecurityGroupIngress": [
{
"Properties": {
"CidrIp": "0.0.0.0/0",
"FromPort": "0",
"IpProtocol": "-1",
"ToPort": "65535"
},
"Type": "AWS::EC2::SecurityGroupIngress"
}
]
```
With the above template AWS will complain:
```
Encountered unsupported property Type
```
Correct format:
``` json
"SecurityGroupIngress": [
{
"CidrIp": "0.0.0.0/0",
"FromPort": "0",
"IpProtocol": "-1",
"ToPort": "65535"
}
]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/RedshiftClusterInVpc.py`
Content:
```
1 # Converted from Redshift.template located at:
2 # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
3
4 from troposphere import Template, Parameter, Ref, Equals
5 from troposphere import If, Output, Join, GetAtt
6 from troposphere.redshift import Cluster, ClusterParameterGroup
7 from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup
8 from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment
9 from troposphere.ec2 import SecurityGroup, SecurityGroupIngress
10
11
12 t = Template()
13
14 t.add_version("2010-09-09")
15
16 t.add_description(
17 "AWS CloudFormation Sample Template: Redshift cluster in a VPC")
18
19 dbname = t.add_parameter(Parameter(
20 "DatabaseName",
21 Description="The name of the first database to be created when the "
22 "redshift cluster is created",
23 Type="String",
24 Default="defaultdb",
25 AllowedPattern="([a-z]|[0-9])+",
26 ))
27
28 clustertype = t.add_parameter(Parameter(
29 "ClusterType",
30 Description="The type of the cluster",
31 Type="String",
32 Default="single-node",
33 AllowedValues=[
34 "single-node",
35 "multi-mode"
36 ],
37 ))
38
39 numberofnodes = t.add_parameter(Parameter(
40 "NumberOfNodes",
41 Description="The number of compute nodes in the redshift cluster. "
42 "When cluster type is specified as: 1) single-node, the NumberOfNodes "
43 "parameter should be specified as 1, 2) multi-node, the NumberOfNodes "
44 "parameter should be greater than 1",
45 Type="Number",
46 Default="1",
47 ))
48
49 nodetype = t.add_parameter(Parameter(
50 "NodeType",
51 Description="The node type to be provisioned for the redshift cluster",
52 Type="String",
53 Default="dw2.large",
54 ))
55
56 masterusername = t.add_parameter(Parameter(
57 "MasterUsername",
58 Description="The user name associated with the master user account for "
59 "the redshift cluster that is being created",
60 Type="String",
61 Default="defaultuser",
62 AllowedPattern="([a-z])([a-z]|[0-9])*",
63 NoEcho=True,
64 ))
65
66 masteruserpassword = t.add_parameter(Parameter(
67 "MasterUserPassword",
68 Description="The password associated with the master user account for the "
69 "redshift cluster that is being created.",
70 Type="String",
71 NoEcho=True,
72 ))
73
74 conditions = {
75 "IsMultiNodeCluster": Equals(
76 Ref("ClusterType"),
77 "multi-mode"
78 ),
79 }
80
81 for k in conditions:
82 t.add_condition(k, conditions[k])
83
84 redshiftcluster = t.add_resource(Cluster(
85 "RedshiftCluster",
86 ClusterType=Ref("ClusterType"),
87 NumberOfNodes=If("IsMultiNodeCluster",
88 Ref("NumberOfNodes"), Ref("AWS::NoValue")),
89 NodeType=Ref("NodeType"),
90 DBName=Ref("DatabaseName"),
91 MasterUsername=Ref("MasterUsername"),
92 MasterUserPassword=Ref("MasterUserPassword"),
93 ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"),
94 VpcSecurityGroupIds=Ref("SecurityGroup"),
95 ClusterSubnetGroupName=Ref("RedshiftClusterSubnetGroup"),
96 ))
97
98 amazonredshiftparameter1 = AmazonRedshiftParameter(
99 "AmazonRedshiftParameter1",
100 ParameterName="enable_user_activity_logging",
101 ParameterValue="true",
102 )
103
104 redshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(
105 "RedshiftClusterParameterGroup",
106 Description="Cluster parameter group",
107 ParameterGroupFamily="redshift-1.0",
108 Parameters=[amazonredshiftparameter1],
109 ))
110
111 redshiftclustersubnetgroup = t.add_resource(ClusterSubnetGroup(
112 "RedshiftClusterSubnetGroup",
113 Description="Cluster subnet group",
114 SubnetIds=Ref("Subnet"),
115 ))
116
117 vpc = t.add_resource(VPC(
118 "VPC",
119 CidrBlock="10.0.0.0/16",
120 ))
121
122 subnet = t.add_resource(Subnet(
123 "Subnet",
124 CidrBlock="10.0.0.0/24",
125 VpcId=Ref("VPC"),
126 ))
127
128 internetgateway = t.add_resource(InternetGateway(
129 "InternetGateway",
130 ))
131
132 gatewayattachment = t.add_resource(VPCGatewayAttachment(
133 "GatewayAttachment",
134 VpcId=Ref("VPC"),
135 InternetGatewayId=Ref("InternetGateway"),
136 ))
137
138 securitygroupingress1 = SecurityGroupIngress(
139 "SecurityGroupIngress1",
140 CidrIp="10.0.0.0/16",
141 FromPort="80",
142 ToPort="80",
143 IpProtocol="tcp",
144 )
145
146 securitygroup = t.add_resource(SecurityGroup(
147 "SecurityGroup",
148 GroupDescription="Security Group",
149 SecurityGroupIngress=[securitygroupingress1],
150 VpcId=Ref("VPC"),
151 ))
152
153 t.add_output(Output(
154 "ClusterEndpoint",
155 Value=Join(":", [GetAtt(redshiftcluster, "Endpoint.Address"),
156 GetAtt(redshiftcluster, "Endpoint.Port")]),
157 ))
158
159 print(t.to_json())
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/RedshiftClusterInVpc.py b/examples/RedshiftClusterInVpc.py
--- a/examples/RedshiftClusterInVpc.py
+++ b/examples/RedshiftClusterInVpc.py
@@ -6,7 +6,7 @@
from troposphere.redshift import Cluster, ClusterParameterGroup
from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup
from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment
-from troposphere.ec2 import SecurityGroup, SecurityGroupIngress
+from troposphere.ec2 import SecurityGroup, SecurityGroupRule
t = Template()
@@ -135,18 +135,18 @@
InternetGatewayId=Ref("InternetGateway"),
))
-securitygroupingress1 = SecurityGroupIngress(
- "SecurityGroupIngress1",
- CidrIp="10.0.0.0/16",
- FromPort="80",
- ToPort="80",
- IpProtocol="tcp",
-)
-
securitygroup = t.add_resource(SecurityGroup(
"SecurityGroup",
GroupDescription="Security Group",
- SecurityGroupIngress=[securitygroupingress1],
+ SecurityGroupIngress=[
+ SecurityGroupRule(
+ "SecurityGroupIngress1",
+ CidrIp="10.0.0.0/16",
+ FromPort="80",
+ ToPort="80",
+ IpProtocol="tcp",
+ )
+ ],
VpcId=Ref("VPC"),
))
| {"golden_diff": "diff --git a/examples/RedshiftClusterInVpc.py b/examples/RedshiftClusterInVpc.py\n--- a/examples/RedshiftClusterInVpc.py\n+++ b/examples/RedshiftClusterInVpc.py\n@@ -6,7 +6,7 @@\n from troposphere.redshift import Cluster, ClusterParameterGroup\n from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup\n from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment\n-from troposphere.ec2 import SecurityGroup, SecurityGroupIngress\n+from troposphere.ec2 import SecurityGroup, SecurityGroupRule\n \n \n t = Template()\n@@ -135,18 +135,18 @@\n InternetGatewayId=Ref(\"InternetGateway\"),\n ))\n \n-securitygroupingress1 = SecurityGroupIngress(\n- \"SecurityGroupIngress1\",\n- CidrIp=\"10.0.0.0/16\",\n- FromPort=\"80\",\n- ToPort=\"80\",\n- IpProtocol=\"tcp\",\n-)\n-\n securitygroup = t.add_resource(SecurityGroup(\n \"SecurityGroup\",\n GroupDescription=\"Security Group\",\n- SecurityGroupIngress=[securitygroupingress1],\n+ SecurityGroupIngress=[\n+ SecurityGroupRule(\n+ \"SecurityGroupIngress1\",\n+ CidrIp=\"10.0.0.0/16\",\n+ FromPort=\"80\",\n+ ToPort=\"80\",\n+ IpProtocol=\"tcp\",\n+ )\n+ ],\n VpcId=Ref(\"VPC\"),\n ))\n", "issue": "Invalid json generated with SecurityGroupIngress\nRef: https://s3-us-west-2.amazonaws.com/cloudformation-templates-us-west-2/EC2InstanceWithSecurityGroupSample.template\n\nInvalid format generated:\n\n``` json\n\"SecurityGroupIngress\": [\n {\n \"Properties\": {\n \"CidrIp\": \"0.0.0.0/0\",\n \"FromPort\": \"0\",\n \"IpProtocol\": \"-1\",\n \"ToPort\": \"65535\"\n },\n \"Type\": \"AWS::EC2::SecurityGroupIngress\"\n }\n ]\n```\n\nWith the above template AWS will complain:\n\n```\nEncountered unsupported property Type\n```\n\nCorrect format:\n\n``` json\n\"SecurityGroupIngress\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"FromPort\": \"0\",\n \"IpProtocol\": \"-1\",\n \"ToPort\": \"65535\"\n }\n ]\n```\n\n", "before_files": [{"content": "# Converted from Redshift.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import Template, Parameter, Ref, Equals\nfrom troposphere import If, Output, Join, GetAtt\nfrom troposphere.redshift import Cluster, ClusterParameterGroup\nfrom troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup\nfrom troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupIngress\n\n\nt = Template()\n\nt.add_version(\"2010-09-09\")\n\nt.add_description(\n \"AWS CloudFormation Sample Template: Redshift cluster in a VPC\")\n\ndbname = t.add_parameter(Parameter(\n \"DatabaseName\",\n Description=\"The name of the first database to be created when the \"\n \"redshift cluster is created\",\n Type=\"String\",\n Default=\"defaultdb\",\n AllowedPattern=\"([a-z]|[0-9])+\",\n))\n\nclustertype = t.add_parameter(Parameter(\n \"ClusterType\",\n Description=\"The type of the cluster\",\n Type=\"String\",\n Default=\"single-node\",\n AllowedValues=[\n \"single-node\",\n \"multi-mode\"\n ],\n))\n\nnumberofnodes = t.add_parameter(Parameter(\n \"NumberOfNodes\",\n Description=\"The number of compute nodes in the redshift cluster. \"\n \"When cluster type is specified as: 1) single-node, the NumberOfNodes \"\n \"parameter should be specified as 1, 2) multi-node, the NumberOfNodes \"\n \"parameter should be greater than 1\",\n Type=\"Number\",\n Default=\"1\",\n))\n\nnodetype = t.add_parameter(Parameter(\n \"NodeType\",\n Description=\"The node type to be provisioned for the redshift cluster\",\n Type=\"String\",\n Default=\"dw2.large\",\n))\n\nmasterusername = t.add_parameter(Parameter(\n \"MasterUsername\",\n Description=\"The user name associated with the master user account for \"\n \"the redshift cluster that is being created\",\n Type=\"String\",\n Default=\"defaultuser\",\n AllowedPattern=\"([a-z])([a-z]|[0-9])*\",\n NoEcho=True,\n))\n\nmasteruserpassword = t.add_parameter(Parameter(\n \"MasterUserPassword\",\n Description=\"The password associated with the master user account for the \"\n \"redshift cluster that is being created.\",\n Type=\"String\",\n NoEcho=True,\n))\n\nconditions = {\n \"IsMultiNodeCluster\": Equals(\n Ref(\"ClusterType\"),\n \"multi-mode\"\n ),\n}\n\nfor k in conditions:\n t.add_condition(k, conditions[k])\n\nredshiftcluster = t.add_resource(Cluster(\n \"RedshiftCluster\",\n ClusterType=Ref(\"ClusterType\"),\n NumberOfNodes=If(\"IsMultiNodeCluster\",\n Ref(\"NumberOfNodes\"), Ref(\"AWS::NoValue\")),\n NodeType=Ref(\"NodeType\"),\n DBName=Ref(\"DatabaseName\"),\n MasterUsername=Ref(\"MasterUsername\"),\n MasterUserPassword=Ref(\"MasterUserPassword\"),\n ClusterParameterGroupName=Ref(\"RedshiftClusterParameterGroup\"),\n VpcSecurityGroupIds=Ref(\"SecurityGroup\"),\n ClusterSubnetGroupName=Ref(\"RedshiftClusterSubnetGroup\"),\n))\n\namazonredshiftparameter1 = AmazonRedshiftParameter(\n \"AmazonRedshiftParameter1\",\n ParameterName=\"enable_user_activity_logging\",\n ParameterValue=\"true\",\n)\n\nredshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(\n \"RedshiftClusterParameterGroup\",\n Description=\"Cluster parameter group\",\n ParameterGroupFamily=\"redshift-1.0\",\n Parameters=[amazonredshiftparameter1],\n))\n\nredshiftclustersubnetgroup = t.add_resource(ClusterSubnetGroup(\n \"RedshiftClusterSubnetGroup\",\n Description=\"Cluster subnet group\",\n SubnetIds=Ref(\"Subnet\"),\n))\n\nvpc = t.add_resource(VPC(\n \"VPC\",\n CidrBlock=\"10.0.0.0/16\",\n))\n\nsubnet = t.add_resource(Subnet(\n \"Subnet\",\n CidrBlock=\"10.0.0.0/24\",\n VpcId=Ref(\"VPC\"),\n))\n\ninternetgateway = t.add_resource(InternetGateway(\n \"InternetGateway\",\n))\n\ngatewayattachment = t.add_resource(VPCGatewayAttachment(\n \"GatewayAttachment\",\n VpcId=Ref(\"VPC\"),\n InternetGatewayId=Ref(\"InternetGateway\"),\n))\n\nsecuritygroupingress1 = SecurityGroupIngress(\n \"SecurityGroupIngress1\",\n CidrIp=\"10.0.0.0/16\",\n FromPort=\"80\",\n ToPort=\"80\",\n IpProtocol=\"tcp\",\n)\n\nsecuritygroup = t.add_resource(SecurityGroup(\n \"SecurityGroup\",\n GroupDescription=\"Security Group\",\n SecurityGroupIngress=[securitygroupingress1],\n VpcId=Ref(\"VPC\"),\n))\n\nt.add_output(Output(\n \"ClusterEndpoint\",\n Value=Join(\":\", [GetAtt(redshiftcluster, \"Endpoint.Address\"),\n GetAtt(redshiftcluster, \"Endpoint.Port\")]),\n))\n\nprint(t.to_json())\n", "path": "examples/RedshiftClusterInVpc.py"}], "after_files": [{"content": "# Converted from Redshift.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import Template, Parameter, Ref, Equals\nfrom troposphere import If, Output, Join, GetAtt\nfrom troposphere.redshift import Cluster, ClusterParameterGroup\nfrom troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup\nfrom troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupRule\n\n\nt = Template()\n\nt.add_version(\"2010-09-09\")\n\nt.add_description(\n \"AWS CloudFormation Sample Template: Redshift cluster in a VPC\")\n\ndbname = t.add_parameter(Parameter(\n \"DatabaseName\",\n Description=\"The name of the first database to be created when the \"\n \"redshift cluster is created\",\n Type=\"String\",\n Default=\"defaultdb\",\n AllowedPattern=\"([a-z]|[0-9])+\",\n))\n\nclustertype = t.add_parameter(Parameter(\n \"ClusterType\",\n Description=\"The type of the cluster\",\n Type=\"String\",\n Default=\"single-node\",\n AllowedValues=[\n \"single-node\",\n \"multi-mode\"\n ],\n))\n\nnumberofnodes = t.add_parameter(Parameter(\n \"NumberOfNodes\",\n Description=\"The number of compute nodes in the redshift cluster. \"\n \"When cluster type is specified as: 1) single-node, the NumberOfNodes \"\n \"parameter should be specified as 1, 2) multi-node, the NumberOfNodes \"\n \"parameter should be greater than 1\",\n Type=\"Number\",\n Default=\"1\",\n))\n\nnodetype = t.add_parameter(Parameter(\n \"NodeType\",\n Description=\"The node type to be provisioned for the redshift cluster\",\n Type=\"String\",\n Default=\"dw2.large\",\n))\n\nmasterusername = t.add_parameter(Parameter(\n \"MasterUsername\",\n Description=\"The user name associated with the master user account for \"\n \"the redshift cluster that is being created\",\n Type=\"String\",\n Default=\"defaultuser\",\n AllowedPattern=\"([a-z])([a-z]|[0-9])*\",\n NoEcho=True,\n))\n\nmasteruserpassword = t.add_parameter(Parameter(\n \"MasterUserPassword\",\n Description=\"The password associated with the master user account for the \"\n \"redshift cluster that is being created.\",\n Type=\"String\",\n NoEcho=True,\n))\n\nconditions = {\n \"IsMultiNodeCluster\": Equals(\n Ref(\"ClusterType\"),\n \"multi-mode\"\n ),\n}\n\nfor k in conditions:\n t.add_condition(k, conditions[k])\n\nredshiftcluster = t.add_resource(Cluster(\n \"RedshiftCluster\",\n ClusterType=Ref(\"ClusterType\"),\n NumberOfNodes=If(\"IsMultiNodeCluster\",\n Ref(\"NumberOfNodes\"), Ref(\"AWS::NoValue\")),\n NodeType=Ref(\"NodeType\"),\n DBName=Ref(\"DatabaseName\"),\n MasterUsername=Ref(\"MasterUsername\"),\n MasterUserPassword=Ref(\"MasterUserPassword\"),\n ClusterParameterGroupName=Ref(\"RedshiftClusterParameterGroup\"),\n VpcSecurityGroupIds=Ref(\"SecurityGroup\"),\n ClusterSubnetGroupName=Ref(\"RedshiftClusterSubnetGroup\"),\n))\n\namazonredshiftparameter1 = AmazonRedshiftParameter(\n \"AmazonRedshiftParameter1\",\n ParameterName=\"enable_user_activity_logging\",\n ParameterValue=\"true\",\n)\n\nredshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(\n \"RedshiftClusterParameterGroup\",\n Description=\"Cluster parameter group\",\n ParameterGroupFamily=\"redshift-1.0\",\n Parameters=[amazonredshiftparameter1],\n))\n\nredshiftclustersubnetgroup = t.add_resource(ClusterSubnetGroup(\n \"RedshiftClusterSubnetGroup\",\n Description=\"Cluster subnet group\",\n SubnetIds=Ref(\"Subnet\"),\n))\n\nvpc = t.add_resource(VPC(\n \"VPC\",\n CidrBlock=\"10.0.0.0/16\",\n))\n\nsubnet = t.add_resource(Subnet(\n \"Subnet\",\n CidrBlock=\"10.0.0.0/24\",\n VpcId=Ref(\"VPC\"),\n))\n\ninternetgateway = t.add_resource(InternetGateway(\n \"InternetGateway\",\n))\n\ngatewayattachment = t.add_resource(VPCGatewayAttachment(\n \"GatewayAttachment\",\n VpcId=Ref(\"VPC\"),\n InternetGatewayId=Ref(\"InternetGateway\"),\n))\n\nsecuritygroup = t.add_resource(SecurityGroup(\n \"SecurityGroup\",\n GroupDescription=\"Security Group\",\n SecurityGroupIngress=[\n SecurityGroupRule(\n \"SecurityGroupIngress1\",\n CidrIp=\"10.0.0.0/16\",\n FromPort=\"80\",\n ToPort=\"80\",\n IpProtocol=\"tcp\",\n )\n ],\n VpcId=Ref(\"VPC\"),\n))\n\nt.add_output(Output(\n \"ClusterEndpoint\",\n Value=Join(\":\", [GetAtt(redshiftcluster, \"Endpoint.Address\"),\n GetAtt(redshiftcluster, \"Endpoint.Port\")]),\n))\n\nprint(t.to_json())\n", "path": "examples/RedshiftClusterInVpc.py"}]} | 1,962 | 348 |
gh_patches_debug_10059 | rasdani/github-patches | git_diff | scrapy__scrapy-5269 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ItemLoader: support non-TextResponse
At the moment, `ItemLoader(response=response)` fails if `response` is not a `TextResponse` instance.
Passing a binary response can still be useful, though. For example, to allow processors to access the response from their loader context, and hence be able to report the source URL (`response.url`) when reporting input issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/loader/__init__.py`
Content:
```
1 """
2 Item Loader
3
4 See documentation in docs/topics/loaders.rst
5 """
6 import itemloaders
7
8 from scrapy.item import Item
9 from scrapy.selector import Selector
10
11
12 class ItemLoader(itemloaders.ItemLoader):
13 """
14 A user-friendly abstraction to populate an :ref:`item <topics-items>` with data
15 by applying :ref:`field processors <topics-loaders-processors>` to scraped data.
16 When instantiated with a ``selector`` or a ``response`` it supports
17 data extraction from web pages using :ref:`selectors <topics-selectors>`.
18
19 :param item: The item instance to populate using subsequent calls to
20 :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,
21 or :meth:`~ItemLoader.add_value`.
22 :type item: scrapy.item.Item
23
24 :param selector: The selector to extract data from, when using the
25 :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or
26 :meth:`replace_css` method.
27 :type selector: :class:`~scrapy.selector.Selector` object
28
29 :param response: The response used to construct the selector using the
30 :attr:`default_selector_class`, unless the selector argument is given,
31 in which case this argument is ignored.
32 :type response: :class:`~scrapy.http.Response` object
33
34 If no item is given, one is instantiated automatically using the class in
35 :attr:`default_item_class`.
36
37 The item, selector, response and remaining keyword arguments are
38 assigned to the Loader context (accessible through the :attr:`context` attribute).
39
40 .. attribute:: item
41
42 The item object being parsed by this Item Loader.
43 This is mostly used as a property so, when attempting to override this
44 value, you may want to check out :attr:`default_item_class` first.
45
46 .. attribute:: context
47
48 The currently active :ref:`Context <loaders-context>` of this Item Loader.
49
50 .. attribute:: default_item_class
51
52 An :ref:`item <topics-items>` class (or factory), used to instantiate
53 items when not given in the ``__init__`` method.
54
55 .. attribute:: default_input_processor
56
57 The default input processor to use for those fields which don't specify
58 one.
59
60 .. attribute:: default_output_processor
61
62 The default output processor to use for those fields which don't specify
63 one.
64
65 .. attribute:: default_selector_class
66
67 The class used to construct the :attr:`selector` of this
68 :class:`ItemLoader`, if only a response is given in the ``__init__`` method.
69 If a selector is given in the ``__init__`` method this attribute is ignored.
70 This attribute is sometimes overridden in subclasses.
71
72 .. attribute:: selector
73
74 The :class:`~scrapy.selector.Selector` object to extract data from.
75 It's either the selector given in the ``__init__`` method or one created from
76 the response given in the ``__init__`` method using the
77 :attr:`default_selector_class`. This attribute is meant to be
78 read-only.
79 """
80
81 default_item_class = Item
82 default_selector_class = Selector
83
84 def __init__(self, item=None, selector=None, response=None, parent=None, **context):
85 if selector is None and response is not None:
86 selector = self.default_selector_class(response)
87 context.update(response=response)
88 super().__init__(item=item, selector=selector, parent=parent, **context)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py
--- a/scrapy/loader/__init__.py
+++ b/scrapy/loader/__init__.py
@@ -83,6 +83,9 @@
def __init__(self, item=None, selector=None, response=None, parent=None, **context):
if selector is None and response is not None:
- selector = self.default_selector_class(response)
+ try:
+ selector = self.default_selector_class(response)
+ except AttributeError:
+ selector = None
context.update(response=response)
super().__init__(item=item, selector=selector, parent=parent, **context)
| {"golden_diff": "diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py\n--- a/scrapy/loader/__init__.py\n+++ b/scrapy/loader/__init__.py\n@@ -83,6 +83,9 @@\n \n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n- selector = self.default_selector_class(response)\n+ try:\n+ selector = self.default_selector_class(response)\n+ except AttributeError:\n+ selector = None\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "issue": "ItemLoader: support non-TextResponse\nAt the moment, `ItemLoader(response=response)` fails if `response` is not a `TextResponse` instance.\r\n\r\nPassing a binary response can still be useful, though. For example, to allow processors to access the response from their loader context, and hence be able to report the source URL (`response.url`) when reporting input issues.\n", "before_files": [{"content": "\"\"\"\nItem Loader\n\nSee documentation in docs/topics/loaders.rst\n\"\"\"\nimport itemloaders\n\nfrom scrapy.item import Item\nfrom scrapy.selector import Selector\n\n\nclass ItemLoader(itemloaders.ItemLoader):\n \"\"\"\n A user-friendly abstraction to populate an :ref:`item <topics-items>` with data\n by applying :ref:`field processors <topics-loaders-processors>` to scraped data.\n When instantiated with a ``selector`` or a ``response`` it supports\n data extraction from web pages using :ref:`selectors <topics-selectors>`.\n\n :param item: The item instance to populate using subsequent calls to\n :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,\n or :meth:`~ItemLoader.add_value`.\n :type item: scrapy.item.Item\n\n :param selector: The selector to extract data from, when using the\n :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or\n :meth:`replace_css` method.\n :type selector: :class:`~scrapy.selector.Selector` object\n\n :param response: The response used to construct the selector using the\n :attr:`default_selector_class`, unless the selector argument is given,\n in which case this argument is ignored.\n :type response: :class:`~scrapy.http.Response` object\n\n If no item is given, one is instantiated automatically using the class in\n :attr:`default_item_class`.\n\n The item, selector, response and remaining keyword arguments are\n assigned to the Loader context (accessible through the :attr:`context` attribute).\n\n .. attribute:: item\n\n The item object being parsed by this Item Loader.\n This is mostly used as a property so, when attempting to override this\n value, you may want to check out :attr:`default_item_class` first.\n\n .. attribute:: context\n\n The currently active :ref:`Context <loaders-context>` of this Item Loader.\n\n .. attribute:: default_item_class\n\n An :ref:`item <topics-items>` class (or factory), used to instantiate\n items when not given in the ``__init__`` method.\n\n .. attribute:: default_input_processor\n\n The default input processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_output_processor\n\n The default output processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_selector_class\n\n The class used to construct the :attr:`selector` of this\n :class:`ItemLoader`, if only a response is given in the ``__init__`` method.\n If a selector is given in the ``__init__`` method this attribute is ignored.\n This attribute is sometimes overridden in subclasses.\n\n .. attribute:: selector\n\n The :class:`~scrapy.selector.Selector` object to extract data from.\n It's either the selector given in the ``__init__`` method or one created from\n the response given in the ``__init__`` method using the\n :attr:`default_selector_class`. This attribute is meant to be\n read-only.\n \"\"\"\n\n default_item_class = Item\n default_selector_class = Selector\n\n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n selector = self.default_selector_class(response)\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "path": "scrapy/loader/__init__.py"}], "after_files": [{"content": "\"\"\"\nItem Loader\n\nSee documentation in docs/topics/loaders.rst\n\"\"\"\nimport itemloaders\n\nfrom scrapy.item import Item\nfrom scrapy.selector import Selector\n\n\nclass ItemLoader(itemloaders.ItemLoader):\n \"\"\"\n A user-friendly abstraction to populate an :ref:`item <topics-items>` with data\n by applying :ref:`field processors <topics-loaders-processors>` to scraped data.\n When instantiated with a ``selector`` or a ``response`` it supports\n data extraction from web pages using :ref:`selectors <topics-selectors>`.\n\n :param item: The item instance to populate using subsequent calls to\n :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,\n or :meth:`~ItemLoader.add_value`.\n :type item: scrapy.item.Item\n\n :param selector: The selector to extract data from, when using the\n :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or\n :meth:`replace_css` method.\n :type selector: :class:`~scrapy.selector.Selector` object\n\n :param response: The response used to construct the selector using the\n :attr:`default_selector_class`, unless the selector argument is given,\n in which case this argument is ignored.\n :type response: :class:`~scrapy.http.Response` object\n\n If no item is given, one is instantiated automatically using the class in\n :attr:`default_item_class`.\n\n The item, selector, response and remaining keyword arguments are\n assigned to the Loader context (accessible through the :attr:`context` attribute).\n\n .. attribute:: item\n\n The item object being parsed by this Item Loader.\n This is mostly used as a property so, when attempting to override this\n value, you may want to check out :attr:`default_item_class` first.\n\n .. attribute:: context\n\n The currently active :ref:`Context <loaders-context>` of this Item Loader.\n\n .. attribute:: default_item_class\n\n An :ref:`item <topics-items>` class (or factory), used to instantiate\n items when not given in the ``__init__`` method.\n\n .. attribute:: default_input_processor\n\n The default input processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_output_processor\n\n The default output processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_selector_class\n\n The class used to construct the :attr:`selector` of this\n :class:`ItemLoader`, if only a response is given in the ``__init__`` method.\n If a selector is given in the ``__init__`` method this attribute is ignored.\n This attribute is sometimes overridden in subclasses.\n\n .. attribute:: selector\n\n The :class:`~scrapy.selector.Selector` object to extract data from.\n It's either the selector given in the ``__init__`` method or one created from\n the response given in the ``__init__`` method using the\n :attr:`default_selector_class`. This attribute is meant to be\n read-only.\n \"\"\"\n\n default_item_class = Item\n default_selector_class = Selector\n\n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n try:\n selector = self.default_selector_class(response)\n except AttributeError:\n selector = None\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "path": "scrapy/loader/__init__.py"}]} | 1,269 | 146 |
gh_patches_debug_5555 | rasdani/github-patches | git_diff | getredash__redash-4638 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
error running query : ** '>' is not supported between instance of NoneType and 'int'
Issue Summary:
Database = Oracle 12c
`select count(*) from table `
throwing the following error
`error running query : ** '>' is not supported between instance of NoneType and 'int'`
Redash v9.0.0-alpha(dev)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/oracle.py`
Content:
```
1 import logging
2
3 from redash.utils import json_dumps, json_loads
4 from redash.query_runner import *
5
6 try:
7 import cx_Oracle
8
9 TYPES_MAP = {
10 cx_Oracle.DATETIME: TYPE_DATETIME,
11 cx_Oracle.CLOB: TYPE_STRING,
12 cx_Oracle.LOB: TYPE_STRING,
13 cx_Oracle.FIXED_CHAR: TYPE_STRING,
14 cx_Oracle.FIXED_NCHAR: TYPE_STRING,
15 cx_Oracle.INTERVAL: TYPE_DATETIME,
16 cx_Oracle.LONG_STRING: TYPE_STRING,
17 cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,
18 cx_Oracle.NCHAR: TYPE_STRING,
19 cx_Oracle.NUMBER: TYPE_FLOAT,
20 cx_Oracle.ROWID: TYPE_INTEGER,
21 cx_Oracle.STRING: TYPE_STRING,
22 cx_Oracle.TIMESTAMP: TYPE_DATETIME,
23 }
24
25 ENABLED = True
26 except ImportError:
27 ENABLED = False
28
29 logger = logging.getLogger(__name__)
30
31
32 class Oracle(BaseSQLQueryRunner):
33 noop_query = "SELECT 1 FROM dual"
34
35 @classmethod
36 def get_col_type(cls, col_type, scale):
37 if col_type == cx_Oracle.NUMBER:
38 return TYPE_FLOAT if scale > 0 else TYPE_INTEGER
39 else:
40 return TYPES_MAP.get(col_type, None)
41
42 @classmethod
43 def enabled(cls):
44 return ENABLED
45
46 @classmethod
47 def configuration_schema(cls):
48 return {
49 "type": "object",
50 "properties": {
51 "user": {"type": "string"},
52 "password": {"type": "string"},
53 "host": {"type": "string"},
54 "port": {"type": "number"},
55 "servicename": {"type": "string", "title": "DSN Service Name"},
56 },
57 "required": ["servicename", "user", "password", "host", "port"],
58 "secret": ["password"],
59 }
60
61 @classmethod
62 def type(cls):
63 return "oracle"
64
65 def __init__(self, configuration):
66 super(Oracle, self).__init__(configuration)
67
68 dsn = cx_Oracle.makedsn(
69 self.configuration["host"],
70 self.configuration["port"],
71 service_name=self.configuration["servicename"],
72 )
73
74 self.connection_string = "{}/{}@{}".format(
75 self.configuration["user"], self.configuration["password"], dsn
76 )
77
78 def _get_tables(self, schema):
79 query = """
80 SELECT
81 all_tab_cols.OWNER,
82 all_tab_cols.TABLE_NAME,
83 all_tab_cols.COLUMN_NAME
84 FROM all_tab_cols
85 WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')
86 """
87
88 results, error = self.run_query(query, None)
89
90 if error is not None:
91 raise Exception("Failed getting schema.")
92
93 results = json_loads(results)
94
95 for row in results["rows"]:
96 if row["OWNER"] != None:
97 table_name = "{}.{}".format(row["OWNER"], row["TABLE_NAME"])
98 else:
99 table_name = row["TABLE_NAME"]
100
101 if table_name not in schema:
102 schema[table_name] = {"name": table_name, "columns": []}
103
104 schema[table_name]["columns"].append(row["COLUMN_NAME"])
105
106 return list(schema.values())
107
108 @classmethod
109 def _convert_number(cls, value):
110 try:
111 return int(value)
112 except:
113 return value
114
115 @classmethod
116 def output_handler(cls, cursor, name, default_type, length, precision, scale):
117 if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):
118 return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)
119
120 if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
121 return cursor.var(str, length, cursor.arraysize)
122
123 if default_type == cx_Oracle.NUMBER:
124 if scale <= 0:
125 return cursor.var(
126 cx_Oracle.STRING,
127 255,
128 outconverter=Oracle._convert_number,
129 arraysize=cursor.arraysize,
130 )
131
132 def run_query(self, query, user):
133 connection = cx_Oracle.connect(self.connection_string)
134 connection.outputtypehandler = Oracle.output_handler
135
136 cursor = connection.cursor()
137
138 try:
139 cursor.execute(query)
140 rows_count = cursor.rowcount
141 if cursor.description is not None:
142 columns = self.fetch_columns(
143 [
144 (i[0], Oracle.get_col_type(i[1], i[5]))
145 for i in cursor.description
146 ]
147 )
148 rows = [
149 dict(zip((column["name"] for column in columns), row))
150 for row in cursor
151 ]
152 data = {"columns": columns, "rows": rows}
153 error = None
154 json_data = json_dumps(data)
155 else:
156 columns = [{"name": "Row(s) Affected", "type": "TYPE_INTEGER"}]
157 rows = [{"Row(s) Affected": rows_count}]
158 data = {"columns": columns, "rows": rows}
159 json_data = json_dumps(data)
160 connection.commit()
161 except cx_Oracle.DatabaseError as err:
162 error = "Query failed. {}.".format(str(err))
163 json_data = None
164 except KeyboardInterrupt:
165 connection.cancel()
166 error = "Query cancelled by user."
167 json_data = None
168 finally:
169 connection.close()
170
171 return json_data, error
172
173
174 register(Oracle)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py
--- a/redash/query_runner/oracle.py
+++ b/redash/query_runner/oracle.py
@@ -35,7 +35,11 @@
@classmethod
def get_col_type(cls, col_type, scale):
if col_type == cx_Oracle.NUMBER:
- return TYPE_FLOAT if scale > 0 else TYPE_INTEGER
+ if scale is None:
+ return TYPE_INTEGER
+ if scale > 0:
+ return TYPE_FLOAT
+ return TYPE_INTEGER
else:
return TYPES_MAP.get(col_type, None)
| {"golden_diff": "diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py\n--- a/redash/query_runner/oracle.py\n+++ b/redash/query_runner/oracle.py\n@@ -35,7 +35,11 @@\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n- return TYPE_FLOAT if scale > 0 else TYPE_INTEGER\n+ if scale is None:\n+ return TYPE_INTEGER\n+ if scale > 0:\n+ return TYPE_FLOAT\n+ return TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n", "issue": "error running query : ** '>' is not supported between instance of NoneType and 'int'\nIssue Summary:\r\nDatabase = Oracle 12c\r\n\r\n`select count(*) from table `\r\n\r\nthrowing the following error\r\n\r\n`error running query : ** '>' is not supported between instance of NoneType and 'int'`\r\n\r\nRedash v9.0.0-alpha(dev)\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom redash.utils import json_dumps, json_loads\nfrom redash.query_runner import *\n\ntry:\n import cx_Oracle\n\n TYPES_MAP = {\n cx_Oracle.DATETIME: TYPE_DATETIME,\n cx_Oracle.CLOB: TYPE_STRING,\n cx_Oracle.LOB: TYPE_STRING,\n cx_Oracle.FIXED_CHAR: TYPE_STRING,\n cx_Oracle.FIXED_NCHAR: TYPE_STRING,\n cx_Oracle.INTERVAL: TYPE_DATETIME,\n cx_Oracle.LONG_STRING: TYPE_STRING,\n cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,\n cx_Oracle.NCHAR: TYPE_STRING,\n cx_Oracle.NUMBER: TYPE_FLOAT,\n cx_Oracle.ROWID: TYPE_INTEGER,\n cx_Oracle.STRING: TYPE_STRING,\n cx_Oracle.TIMESTAMP: TYPE_DATETIME,\n }\n\n ENABLED = True\nexcept ImportError:\n ENABLED = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass Oracle(BaseSQLQueryRunner):\n noop_query = \"SELECT 1 FROM dual\"\n\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n return TYPE_FLOAT if scale > 0 else TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n\n @classmethod\n def enabled(cls):\n return ENABLED\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"number\"},\n \"servicename\": {\"type\": \"string\", \"title\": \"DSN Service Name\"},\n },\n \"required\": [\"servicename\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"],\n }\n\n @classmethod\n def type(cls):\n return \"oracle\"\n\n def __init__(self, configuration):\n super(Oracle, self).__init__(configuration)\n\n dsn = cx_Oracle.makedsn(\n self.configuration[\"host\"],\n self.configuration[\"port\"],\n service_name=self.configuration[\"servicename\"],\n )\n\n self.connection_string = \"{}/{}@{}\".format(\n self.configuration[\"user\"], self.configuration[\"password\"], dsn\n )\n\n def _get_tables(self, schema):\n query = \"\"\"\n SELECT\n all_tab_cols.OWNER,\n all_tab_cols.TABLE_NAME,\n all_tab_cols.COLUMN_NAME\n FROM all_tab_cols\n WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')\n \"\"\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n if row[\"OWNER\"] != None:\n table_name = \"{}.{}\".format(row[\"OWNER\"], row[\"TABLE_NAME\"])\n else:\n table_name = row[\"TABLE_NAME\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n schema[table_name][\"columns\"].append(row[\"COLUMN_NAME\"])\n\n return list(schema.values())\n\n @classmethod\n def _convert_number(cls, value):\n try:\n return int(value)\n except:\n return value\n\n @classmethod\n def output_handler(cls, cursor, name, default_type, length, precision, scale):\n if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):\n return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)\n\n if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):\n return cursor.var(str, length, cursor.arraysize)\n\n if default_type == cx_Oracle.NUMBER:\n if scale <= 0:\n return cursor.var(\n cx_Oracle.STRING,\n 255,\n outconverter=Oracle._convert_number,\n arraysize=cursor.arraysize,\n )\n\n def run_query(self, query, user):\n connection = cx_Oracle.connect(self.connection_string)\n connection.outputtypehandler = Oracle.output_handler\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n rows_count = cursor.rowcount\n if cursor.description is not None:\n columns = self.fetch_columns(\n [\n (i[0], Oracle.get_col_type(i[1], i[5]))\n for i in cursor.description\n ]\n )\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n columns = [{\"name\": \"Row(s) Affected\", \"type\": \"TYPE_INTEGER\"}]\n rows = [{\"Row(s) Affected\": rows_count}]\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n connection.commit()\n except cx_Oracle.DatabaseError as err:\n error = \"Query failed. {}.\".format(str(err))\n json_data = None\n except KeyboardInterrupt:\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\n\nregister(Oracle)\n", "path": "redash/query_runner/oracle.py"}], "after_files": [{"content": "import logging\n\nfrom redash.utils import json_dumps, json_loads\nfrom redash.query_runner import *\n\ntry:\n import cx_Oracle\n\n TYPES_MAP = {\n cx_Oracle.DATETIME: TYPE_DATETIME,\n cx_Oracle.CLOB: TYPE_STRING,\n cx_Oracle.LOB: TYPE_STRING,\n cx_Oracle.FIXED_CHAR: TYPE_STRING,\n cx_Oracle.FIXED_NCHAR: TYPE_STRING,\n cx_Oracle.INTERVAL: TYPE_DATETIME,\n cx_Oracle.LONG_STRING: TYPE_STRING,\n cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,\n cx_Oracle.NCHAR: TYPE_STRING,\n cx_Oracle.NUMBER: TYPE_FLOAT,\n cx_Oracle.ROWID: TYPE_INTEGER,\n cx_Oracle.STRING: TYPE_STRING,\n cx_Oracle.TIMESTAMP: TYPE_DATETIME,\n }\n\n ENABLED = True\nexcept ImportError:\n ENABLED = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass Oracle(BaseSQLQueryRunner):\n noop_query = \"SELECT 1 FROM dual\"\n\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n if scale is None:\n return TYPE_INTEGER\n if scale > 0:\n return TYPE_FLOAT\n return TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n\n @classmethod\n def enabled(cls):\n return ENABLED\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"number\"},\n \"servicename\": {\"type\": \"string\", \"title\": \"DSN Service Name\"},\n },\n \"required\": [\"servicename\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"],\n }\n\n @classmethod\n def type(cls):\n return \"oracle\"\n\n def __init__(self, configuration):\n super(Oracle, self).__init__(configuration)\n\n dsn = cx_Oracle.makedsn(\n self.configuration[\"host\"],\n self.configuration[\"port\"],\n service_name=self.configuration[\"servicename\"],\n )\n\n self.connection_string = \"{}/{}@{}\".format(\n self.configuration[\"user\"], self.configuration[\"password\"], dsn\n )\n\n def _get_tables(self, schema):\n query = \"\"\"\n SELECT\n all_tab_cols.OWNER,\n all_tab_cols.TABLE_NAME,\n all_tab_cols.COLUMN_NAME\n FROM all_tab_cols\n WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')\n \"\"\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n if row[\"OWNER\"] != None:\n table_name = \"{}.{}\".format(row[\"OWNER\"], row[\"TABLE_NAME\"])\n else:\n table_name = row[\"TABLE_NAME\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n schema[table_name][\"columns\"].append(row[\"COLUMN_NAME\"])\n\n return list(schema.values())\n\n @classmethod\n def _convert_number(cls, value):\n try:\n return int(value)\n except:\n return value\n\n @classmethod\n def output_handler(cls, cursor, name, default_type, length, precision, scale):\n if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):\n return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)\n\n if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):\n return cursor.var(str, length, cursor.arraysize)\n\n if default_type == cx_Oracle.NUMBER:\n if scale <= 0:\n return cursor.var(\n cx_Oracle.STRING,\n 255,\n outconverter=Oracle._convert_number,\n arraysize=cursor.arraysize,\n )\n\n def run_query(self, query, user):\n connection = cx_Oracle.connect(self.connection_string)\n connection.outputtypehandler = Oracle.output_handler\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n rows_count = cursor.rowcount\n if cursor.description is not None:\n columns = self.fetch_columns(\n [\n (i[0], Oracle.get_col_type(i[1], i[5]))\n for i in cursor.description\n ]\n )\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n columns = [{\"name\": \"Row(s) Affected\", \"type\": \"TYPE_INTEGER\"}]\n rows = [{\"Row(s) Affected\": rows_count}]\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n connection.commit()\n except cx_Oracle.DatabaseError as err:\n error = \"Query failed. {}.\".format(str(err))\n json_data = None\n except KeyboardInterrupt:\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\n\nregister(Oracle)\n", "path": "redash/query_runner/oracle.py"}]} | 1,982 | 140 |
gh_patches_debug_29456 | rasdani/github-patches | git_diff | oppia__oppia-7287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show skill mastery values in the topic viewer
Add a skill tab in the topic viewer that will show skill mastery of all skills in that topic (Once we have enough skill mastery information for the skill)
Milestone 3.2 in @sophiewu6 's GSoC project
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/controllers/topic_viewer.py`
Content:
```
1 # Copyright 2018 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the topic viewer page."""
16
17 from constants import constants
18 from core.controllers import acl_decorators
19 from core.controllers import base
20 from core.domain import story_fetchers
21 from core.domain import topic_fetchers
22 import feconf
23
24
25 class TopicViewerPage(base.BaseHandler):
26 """Renders the topic viewer page."""
27
28 @acl_decorators.can_access_topic_viewer_page
29 def get(self, _):
30 """Handles GET requests."""
31
32 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
33 raise self.PageNotFoundException
34
35 self.render_template('dist/topic-viewer-page.mainpage.html')
36
37
38 class TopicPageDataHandler(base.BaseHandler):
39 """Manages the data that needs to be displayed to a learner on the topic
40 viewer page.
41 """
42 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
43
44 @acl_decorators.can_access_topic_viewer_page
45 def get(self, topic_name):
46 """Handles GET requests."""
47
48 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
49 raise self.PageNotFoundException
50
51 topic = topic_fetchers.get_topic_by_name(topic_name)
52 canonical_story_ids = topic.get_canonical_story_ids(
53 include_only_published=True)
54 additional_story_ids = topic.get_additional_story_ids(
55 include_only_published=True)
56 canonical_story_summaries = [
57 story_fetchers.get_story_summary_by_id(
58 canonical_story_id) for canonical_story_id
59 in canonical_story_ids]
60
61 additional_story_summaries = [
62 story_fetchers.get_story_summary_by_id(
63 additional_story_id) for additional_story_id
64 in additional_story_ids]
65
66 canonical_story_dicts = [
67 summary.to_human_readable_dict() for summary
68 in canonical_story_summaries]
69
70 additional_story_dicts = [
71 summary.to_human_readable_dict() for summary
72 in additional_story_summaries]
73
74 uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
75 subtopics = topic.get_all_subtopics()
76
77 self.values.update({
78 'topic_id': topic.id,
79 'topic_name': topic.name,
80 'canonical_story_dicts': canonical_story_dicts,
81 'additional_story_dicts': additional_story_dicts,
82 'uncategorized_skill_ids': uncategorized_skill_ids,
83 'subtopics': subtopics
84 })
85 self.render_json(self.values)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py
--- a/core/controllers/topic_viewer.py
+++ b/core/controllers/topic_viewer.py
@@ -17,6 +17,7 @@
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
+from core.domain import skill_services
from core.domain import story_fetchers
from core.domain import topic_fetchers
import feconf
@@ -74,12 +75,26 @@
uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
subtopics = topic.get_all_subtopics()
+ assigned_skill_ids = topic.get_all_skill_ids()
+ skill_descriptions = skill_services.get_skill_descriptions_by_ids(
+ topic.id, assigned_skill_ids)
+
+ if self.user_id:
+ degrees_of_mastery = skill_services.get_multi_user_skill_mastery(
+ self.user_id, assigned_skill_ids)
+ else:
+ degrees_of_mastery = {}
+ for skill_id in assigned_skill_ids:
+ degrees_of_mastery[skill_id] = None
+
self.values.update({
'topic_id': topic.id,
'topic_name': topic.name,
'canonical_story_dicts': canonical_story_dicts,
'additional_story_dicts': additional_story_dicts,
'uncategorized_skill_ids': uncategorized_skill_ids,
- 'subtopics': subtopics
+ 'subtopics': subtopics,
+ 'degrees_of_mastery': degrees_of_mastery,
+ 'skill_descriptions': skill_descriptions
})
self.render_json(self.values)
| {"golden_diff": "diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py\n--- a/core/controllers/topic_viewer.py\n+++ b/core/controllers/topic_viewer.py\n@@ -17,6 +17,7 @@\n from constants import constants\n from core.controllers import acl_decorators\n from core.controllers import base\n+from core.domain import skill_services\n from core.domain import story_fetchers\n from core.domain import topic_fetchers\n import feconf\n@@ -74,12 +75,26 @@\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n \n+ assigned_skill_ids = topic.get_all_skill_ids()\n+ skill_descriptions = skill_services.get_skill_descriptions_by_ids(\n+ topic.id, assigned_skill_ids)\n+\n+ if self.user_id:\n+ degrees_of_mastery = skill_services.get_multi_user_skill_mastery(\n+ self.user_id, assigned_skill_ids)\n+ else:\n+ degrees_of_mastery = {}\n+ for skill_id in assigned_skill_ids:\n+ degrees_of_mastery[skill_id] = None\n+\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n- 'subtopics': subtopics\n+ 'subtopics': subtopics,\n+ 'degrees_of_mastery': degrees_of_mastery,\n+ 'skill_descriptions': skill_descriptions\n })\n self.render_json(self.values)\n", "issue": "Show skill mastery values in the topic viewer\nAdd a skill tab in the topic viewer that will show skill mastery of all skills in that topic (Once we have enough skill mastery information for the skill)\r\n\r\nMilestone 3.2 in @sophiewu6 's GSoC project\n", "before_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import story_fetchers\nfrom core.domain import topic_fetchers\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_fetchers.get_topic_by_name(topic_name)\n canonical_story_ids = topic.get_canonical_story_ids(\n include_only_published=True)\n additional_story_ids = topic.get_additional_story_ids(\n include_only_published=True)\n canonical_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in canonical_story_ids]\n\n additional_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}], "after_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import skill_services\nfrom core.domain import story_fetchers\nfrom core.domain import topic_fetchers\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_fetchers.get_topic_by_name(topic_name)\n canonical_story_ids = topic.get_canonical_story_ids(\n include_only_published=True)\n additional_story_ids = topic.get_additional_story_ids(\n include_only_published=True)\n canonical_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in canonical_story_ids]\n\n additional_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n assigned_skill_ids = topic.get_all_skill_ids()\n skill_descriptions = skill_services.get_skill_descriptions_by_ids(\n topic.id, assigned_skill_ids)\n\n if self.user_id:\n degrees_of_mastery = skill_services.get_multi_user_skill_mastery(\n self.user_id, assigned_skill_ids)\n else:\n degrees_of_mastery = {}\n for skill_id in assigned_skill_ids:\n degrees_of_mastery[skill_id] = None\n\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics,\n 'degrees_of_mastery': degrees_of_mastery,\n 'skill_descriptions': skill_descriptions\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}]} | 1,098 | 340 |
gh_patches_debug_34814 | rasdani/github-patches | git_diff | dynaconf__dynaconf-825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFC] Support multidoc yaml files
**Is your feature request related to a problem? Please describe.**
Sometimes it can be difficult or impossible to pass multiple files with config fragments. yaml support multiple documents in one file and `safe_load_all` from pyaml api loads that accordingly. It is standard yaml feature, it would be nice to support it and make in usable in cases when passing one file (composited from more files) would be easier.
**Describe the solution you'd like**
Support `safe_load_all` as yaml loader.
**Describe alternatives you've considered**
Passing multiple files will do the work, however it doesn't have to be always straightforward.
**Additional context**
I have prepared a patch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/loaders/yaml_loader.py`
Content:
```
1 from __future__ import annotations
2
3 import sys
4 from pathlib import Path
5 from typing import TextIO
6 from warnings import warn
7
8 from dynaconf import default_settings
9 from dynaconf.constants import YAML_EXTENSIONS
10 from dynaconf.loaders.base import BaseLoader
11 from dynaconf.utils import object_merge
12 from dynaconf.utils.parse_conf import try_to_encode
13 from dynaconf.vendor.ruamel import yaml
14
15 # Add support for Dynaconf Lazy values to YAML dumper
16 yaml.SafeDumper.yaml_representers[
17 None
18 ] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(
19 self, try_to_encode(data)
20 )
21
22
23 def load(obj, env=None, silent=True, key=None, filename=None, validate=False):
24 """
25 Reads and loads in to "obj" a single key or all keys from source file.
26
27 :param obj: the settings instance
28 :param env: settings current env default='development'
29 :param silent: if errors should raise
30 :param key: if defined load a single key, else load all in env
31 :param filename: Optional custom filename to load
32 :return: None
33 """
34 # Resolve the loaders
35 # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
36 # Possible values are `safe_load, full_load, unsafe_load, load`
37 yaml_reader = getattr(
38 yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
39 )
40 if yaml_reader.__name__ == "unsafe_load": # pragma: no cover
41 warn(
42 "yaml.unsafe_load is deprecated."
43 " Please read https://msg.pyyaml.org/load for full details."
44 " Try to use full_load or safe_load."
45 )
46
47 loader = BaseLoader(
48 obj=obj,
49 env=env,
50 identifier="yaml",
51 extensions=YAML_EXTENSIONS,
52 file_reader=yaml_reader,
53 string_reader=yaml_reader,
54 validate=validate,
55 )
56 loader.load(
57 filename=filename,
58 key=key,
59 silent=silent,
60 )
61
62
63 def write(settings_path, settings_data, merge=True):
64 """Write data to a settings file.
65
66 :param settings_path: the filepath
67 :param settings_data: a dictionary with data
68 :param merge: boolean if existing file should be merged with new data
69 :param stdout: boolean if should output to stdout instead of file
70 """
71 settings_path = Path(settings_path)
72 if settings_path.exists() and merge: # pragma: no cover
73 with open(
74 str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF
75 ) as open_file:
76 object_merge(yaml.safe_load(open_file), settings_data)
77
78 with open(
79 str(settings_path),
80 "w",
81 encoding=default_settings.ENCODING_FOR_DYNACONF,
82 ) as open_file:
83 yaml.dump(
84 settings_data,
85 open_file,
86 Dumper=yaml.dumper.SafeDumper,
87 explicit_start=True,
88 indent=2,
89 default_flow_style=False,
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py
--- a/dynaconf/loaders/yaml_loader.py
+++ b/dynaconf/loaders/yaml_loader.py
@@ -20,6 +20,41 @@
)
+class AllLoader(BaseLoader):
+ """YAML Loader to load multi doc files"""
+
+ @staticmethod
+ def _assign_data(data, source_file, content):
+ """Helper to iterate through all docs in a file"""
+ content = tuple(content)
+ if len(content) == 1:
+ data[source_file] = content[0]
+ elif len(content) > 1:
+ for i, doc in enumerate(content):
+ data[f"{source_file}[{i}]"] = doc
+
+ def get_source_data(self, files):
+ data = {}
+ for source_file in files:
+ if source_file.endswith(self.extensions):
+ try:
+ with open(source_file, **self.opener_params) as open_file:
+ content = self.file_reader(open_file)
+ self.obj._loaded_files.append(source_file)
+ self._assign_data(data, source_file, content)
+ except OSError as e:
+ if ".local." not in source_file:
+ warn(
+ f"{self.identifier}_loader: {source_file} "
+ f":{str(e)}"
+ )
+ else:
+ # for tests it is possible to pass string
+ content = self.string_reader(source_file)
+ self._assign_data(data, source_file, content)
+ return data
+
+
def load(obj, env=None, silent=True, key=None, filename=None, validate=False):
"""
Reads and loads in to "obj" a single key or all keys from source file.
@@ -33,7 +68,8 @@
"""
# Resolve the loaders
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
- # Possible values are `safe_load, full_load, unsafe_load, load`
+ # Possible values are:
+ # `safe_load, full_load, unsafe_load, load, safe_load_all`
yaml_reader = getattr(
yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
)
@@ -44,7 +80,11 @@
" Try to use full_load or safe_load."
)
- loader = BaseLoader(
+ _loader = BaseLoader
+ if yaml_reader.__name__.endswith("_all"):
+ _loader = AllLoader
+
+ loader = _loader(
obj=obj,
env=env,
identifier="yaml",
| {"golden_diff": "diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py\n--- a/dynaconf/loaders/yaml_loader.py\n+++ b/dynaconf/loaders/yaml_loader.py\n@@ -20,6 +20,41 @@\n )\n \n \n+class AllLoader(BaseLoader):\n+ \"\"\"YAML Loader to load multi doc files\"\"\"\n+\n+ @staticmethod\n+ def _assign_data(data, source_file, content):\n+ \"\"\"Helper to iterate through all docs in a file\"\"\"\n+ content = tuple(content)\n+ if len(content) == 1:\n+ data[source_file] = content[0]\n+ elif len(content) > 1:\n+ for i, doc in enumerate(content):\n+ data[f\"{source_file}[{i}]\"] = doc\n+\n+ def get_source_data(self, files):\n+ data = {}\n+ for source_file in files:\n+ if source_file.endswith(self.extensions):\n+ try:\n+ with open(source_file, **self.opener_params) as open_file:\n+ content = self.file_reader(open_file)\n+ self.obj._loaded_files.append(source_file)\n+ self._assign_data(data, source_file, content)\n+ except OSError as e:\n+ if \".local.\" not in source_file:\n+ warn(\n+ f\"{self.identifier}_loader: {source_file} \"\n+ f\":{str(e)}\"\n+ )\n+ else:\n+ # for tests it is possible to pass string\n+ content = self.string_reader(source_file)\n+ self._assign_data(data, source_file, content)\n+ return data\n+\n+\n def load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n@@ -33,7 +68,8 @@\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n- # Possible values are `safe_load, full_load, unsafe_load, load`\n+ # Possible values are:\n+ # `safe_load, full_load, unsafe_load, load, safe_load_all`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n@@ -44,7 +80,11 @@\n \" Try to use full_load or safe_load.\"\n )\n \n- loader = BaseLoader(\n+ _loader = BaseLoader\n+ if yaml_reader.__name__.endswith(\"_all\"):\n+ _loader = AllLoader\n+\n+ loader = _loader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n", "issue": "[RFC] Support multidoc yaml files\n**Is your feature request related to a problem? Please describe.**\r\nSometimes it can be difficult or impossible to pass multiple files with config fragments. yaml support multiple documents in one file and `safe_load_all` from pyaml api loads that accordingly. It is standard yaml feature, it would be nice to support it and make in usable in cases when passing one file (composited from more files) would be easier.\r\n\r\n**Describe the solution you'd like**\r\nSupport `safe_load_all` as yaml loader.\r\n\r\n**Describe alternatives you've considered**\r\nPassing multiple files will do the work, however it doesn't have to be always straightforward.\r\n\r\n**Additional context**\r\nI have prepared a patch\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TextIO\nfrom warnings import warn\n\nfrom dynaconf import default_settings\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils.parse_conf import try_to_encode\nfrom dynaconf.vendor.ruamel import yaml\n\n# Add support for Dynaconf Lazy values to YAML dumper\nyaml.SafeDumper.yaml_representers[\n None\n] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(\n self, try_to_encode(data)\n)\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Possible values are `safe_load, full_load, unsafe_load, load`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n if yaml_reader.__name__ == \"unsafe_load\": # pragma: no cover\n warn(\n \"yaml.unsafe_load is deprecated.\"\n \" Please read https://msg.pyyaml.org/load for full details.\"\n \" Try to use full_load or safe_load.\"\n )\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n extensions=YAML_EXTENSIONS,\n file_reader=yaml_reader,\n string_reader=yaml_reader,\n validate=validate,\n )\n loader.load(\n filename=filename,\n key=key,\n silent=silent,\n )\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n :param stdout: boolean if should output to stdout instead of file\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n with open(\n str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF\n ) as open_file:\n object_merge(yaml.safe_load(open_file), settings_data)\n\n with open(\n str(settings_path),\n \"w\",\n encoding=default_settings.ENCODING_FOR_DYNACONF,\n ) as open_file:\n yaml.dump(\n settings_data,\n open_file,\n Dumper=yaml.dumper.SafeDumper,\n explicit_start=True,\n indent=2,\n default_flow_style=False,\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TextIO\nfrom warnings import warn\n\nfrom dynaconf import default_settings\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils.parse_conf import try_to_encode\nfrom dynaconf.vendor.ruamel import yaml\n\n# Add support for Dynaconf Lazy values to YAML dumper\nyaml.SafeDumper.yaml_representers[\n None\n] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(\n self, try_to_encode(data)\n)\n\n\nclass AllLoader(BaseLoader):\n \"\"\"YAML Loader to load multi doc files\"\"\"\n\n @staticmethod\n def _assign_data(data, source_file, content):\n \"\"\"Helper to iterate through all docs in a file\"\"\"\n content = tuple(content)\n if len(content) == 1:\n data[source_file] = content[0]\n elif len(content) > 1:\n for i, doc in enumerate(content):\n data[f\"{source_file}[{i}]\"] = doc\n\n def get_source_data(self, files):\n data = {}\n for source_file in files:\n if source_file.endswith(self.extensions):\n try:\n with open(source_file, **self.opener_params) as open_file:\n content = self.file_reader(open_file)\n self.obj._loaded_files.append(source_file)\n self._assign_data(data, source_file, content)\n except OSError as e:\n if \".local.\" not in source_file:\n warn(\n f\"{self.identifier}_loader: {source_file} \"\n f\":{str(e)}\"\n )\n else:\n # for tests it is possible to pass string\n content = self.string_reader(source_file)\n self._assign_data(data, source_file, content)\n return data\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Possible values are:\n # `safe_load, full_load, unsafe_load, load, safe_load_all`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n if yaml_reader.__name__ == \"unsafe_load\": # pragma: no cover\n warn(\n \"yaml.unsafe_load is deprecated.\"\n \" Please read https://msg.pyyaml.org/load for full details.\"\n \" Try to use full_load or safe_load.\"\n )\n\n _loader = BaseLoader\n if yaml_reader.__name__.endswith(\"_all\"):\n _loader = AllLoader\n\n loader = _loader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n extensions=YAML_EXTENSIONS,\n file_reader=yaml_reader,\n string_reader=yaml_reader,\n validate=validate,\n )\n loader.load(\n filename=filename,\n key=key,\n silent=silent,\n )\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n :param stdout: boolean if should output to stdout instead of file\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n with open(\n str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF\n ) as open_file:\n object_merge(yaml.safe_load(open_file), settings_data)\n\n with open(\n str(settings_path),\n \"w\",\n encoding=default_settings.ENCODING_FOR_DYNACONF,\n ) as open_file:\n yaml.dump(\n settings_data,\n open_file,\n Dumper=yaml.dumper.SafeDumper,\n explicit_start=True,\n indent=2,\n default_flow_style=False,\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}]} | 1,259 | 601 |
gh_patches_debug_9478 | rasdani/github-patches | git_diff | bridgecrewio__checkov-548 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add new check: API Gateway V2 should have access logging enabled
AccessLogSettings: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-stage.html
Terraform does not currently support this: https://github.com/terraform-providers/terraform-provider-aws/issues/7004
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3 from checkov.common.models.consts import ANY_VALUE
4
5
6 class APIGatewayAccessLogging(BaseResourceValueCheck):
7
8 def __init__(self):
9 name = "Ensure API Gateway has Access Logging enabled"
10 id = "CKV_AWS_76"
11 supported_resources = ['aws_api_gateway_stage']
12 categories = [CheckCategories.LOGGING]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def get_inspected_key(self):
16 return "access_log_settings/[0]/destination_arn"
17
18 def get_expected_value(self):
19 return ANY_VALUE
20
21
22 check = APIGatewayAccessLogging()
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
--- a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
+++ b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
@@ -8,7 +8,7 @@
def __init__(self):
name = "Ensure API Gateway has Access Logging enabled"
id = "CKV_AWS_76"
- supported_resources = ['aws_api_gateway_stage']
+ supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n--- a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n+++ b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n@@ -8,7 +8,7 @@\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n- supported_resources = ['aws_api_gateway_stage']\n+ supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n", "issue": "Add new check: API Gateway V2 should have access logging enabled \nAccessLogSettings: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-stage.html\r\n\r\nTerraform does not currently support this: https://github.com/terraform-providers/terraform-provider-aws/issues/7004\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass APIGatewayAccessLogging(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n supported_resources = ['aws_api_gateway_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"access_log_settings/[0]/destination_arn\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = APIGatewayAccessLogging()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py"}], "after_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass APIGatewayAccessLogging(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"access_log_settings/[0]/destination_arn\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = APIGatewayAccessLogging()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py"}]} | 541 | 166 |
gh_patches_debug_1431 | rasdani/github-patches | git_diff | pyca__cryptography-4077 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
utils.int_from_bytes gives incorrect answers when passed "builtins.bytes" in python 2.7
```
$ mkvirtualenv repro
$ python --version
Python 2.7.12
$ pip install cryptography future
$ python
from cryptography import utils
from builtins import bytes
x = bytes.fromhex('deadbeef')
y = utils.int_from_bytes(x, 'big')
hex(y)
'0x6227deadbeef27'
```
The reason this happens is that `int_from_bytes` (in py27 mode) casts the passed-in value to `bytes`, which, in py27 mode, is an alias for `str`. Passing a `builtins.bytes` value to `str` somewhat insanely wraps the string with `b'` and `'`. These then get parsed by the rest of `int_from_bytes` as if they were part of the original byte string.
I think this is particularly unfortunate since all the "cryptography" functions say they accept and return `bytes` in their docstrings. Ideally it'd be compatible with all three definitions of `bytes`: the py27 alias to `str`, the one from "future", and the py3 one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/utils.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import sys
11 import warnings
12
13
14 # We use a UserWarning subclass, instead of DeprecationWarning, because CPython
15 # decided deprecation warnings should be invisble by default.
16 class CryptographyDeprecationWarning(UserWarning):
17 pass
18
19
20 # Several APIs were deprecated with no specific end-of-life date because of the
21 # ubiquity of their use. They should not be removed until we agree on when that
22 # cycle ends.
23 PersistentlyDeprecated = CryptographyDeprecationWarning
24 DeprecatedIn21 = CryptographyDeprecationWarning
25
26
27 def _check_bytes(name, value):
28 if not isinstance(value, bytes):
29 raise TypeError("{0} must be bytes".format(name))
30
31
32 def read_only_property(name):
33 return property(lambda self: getattr(self, name))
34
35
36 def register_interface(iface):
37 def register_decorator(klass):
38 verify_interface(iface, klass)
39 iface.register(klass)
40 return klass
41 return register_decorator
42
43
44 def register_interface_if(predicate, iface):
45 def register_decorator(klass):
46 if predicate:
47 verify_interface(iface, klass)
48 iface.register(klass)
49 return klass
50 return register_decorator
51
52
53 if hasattr(int, "from_bytes"):
54 int_from_bytes = int.from_bytes
55 else:
56 def int_from_bytes(data, byteorder, signed=False):
57 assert byteorder == 'big'
58 assert not signed
59
60 # call bytes() on data to allow the use of bytearrays
61 return int(bytes(data).encode('hex'), 16)
62
63
64 if hasattr(int, "to_bytes"):
65 def int_to_bytes(integer, length=None):
66 return integer.to_bytes(
67 length or (integer.bit_length() + 7) // 8 or 1, 'big'
68 )
69 else:
70 def int_to_bytes(integer, length=None):
71 hex_string = '%x' % integer
72 if length is None:
73 n = len(hex_string)
74 else:
75 n = length * 2
76 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
77
78
79 class InterfaceNotImplemented(Exception):
80 pass
81
82
83 if hasattr(inspect, "signature"):
84 signature = inspect.signature
85 else:
86 signature = inspect.getargspec
87
88
89 def verify_interface(iface, klass):
90 for method in iface.__abstractmethods__:
91 if not hasattr(klass, method):
92 raise InterfaceNotImplemented(
93 "{0} is missing a {1!r} method".format(klass, method)
94 )
95 if isinstance(getattr(iface, method), abc.abstractproperty):
96 # Can't properly verify these yet.
97 continue
98 sig = signature(getattr(iface, method))
99 actual = signature(getattr(klass, method))
100 if sig != actual:
101 raise InterfaceNotImplemented(
102 "{0}.{1}'s signature differs from the expected. Expected: "
103 "{2!r}. Received: {3!r}".format(
104 klass, method, sig, actual
105 )
106 )
107
108
109 # No longer needed as of 2.2, but retained because we have external consumers
110 # who use it.
111 def bit_length(x):
112 return x.bit_length()
113
114
115 class _DeprecatedValue(object):
116 def __init__(self, value, message, warning_class):
117 self.value = value
118 self.message = message
119 self.warning_class = warning_class
120
121
122 class _ModuleWithDeprecations(object):
123 def __init__(self, module):
124 self.__dict__["_module"] = module
125
126 def __getattr__(self, attr):
127 obj = getattr(self._module, attr)
128 if isinstance(obj, _DeprecatedValue):
129 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
130 obj = obj.value
131 return obj
132
133 def __setattr__(self, attr, value):
134 setattr(self._module, attr, value)
135
136 def __delattr__(self, attr):
137 obj = getattr(self._module, attr)
138 if isinstance(obj, _DeprecatedValue):
139 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
140
141 delattr(self._module, attr)
142
143 def __dir__(self):
144 return ["_module"] + dir(self._module)
145
146
147 def deprecated(value, module_name, message, warning_class):
148 module = sys.modules[module_name]
149 if not isinstance(module, _ModuleWithDeprecations):
150 sys.modules[module_name] = _ModuleWithDeprecations(module)
151 return _DeprecatedValue(value, message, warning_class)
152
153
154 def cached_property(func):
155 cached_name = "_cached_{0}".format(func)
156 sentinel = object()
157
158 def inner(instance):
159 cache = getattr(instance, cached_name, sentinel)
160 if cache is not sentinel:
161 return cache
162 result = func(instance)
163 setattr(instance, cached_name, result)
164 return result
165 return property(inner)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -57,8 +57,7 @@
assert byteorder == 'big'
assert not signed
- # call bytes() on data to allow the use of bytearrays
- return int(bytes(data).encode('hex'), 16)
+ return int(binascii.hexlify(data), 16)
if hasattr(int, "to_bytes"):
| {"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -57,8 +57,7 @@\n assert byteorder == 'big'\n assert not signed\n \n- # call bytes() on data to allow the use of bytearrays\n- return int(bytes(data).encode('hex'), 16)\n+ return int(binascii.hexlify(data), 16)\n \n \n if hasattr(int, \"to_bytes\"):\n", "issue": "utils.int_from_bytes gives incorrect answers when passed \"builtins.bytes\" in python 2.7\n```\r\n$ mkvirtualenv repro\r\n$ python --version\r\nPython 2.7.12\r\n$ pip install cryptography future\r\n$ python\r\n\r\nfrom cryptography import utils\r\nfrom builtins import bytes\r\nx = bytes.fromhex('deadbeef')\r\ny = utils.int_from_bytes(x, 'big')\r\nhex(y)\r\n'0x6227deadbeef27'\r\n```\r\n\r\nThe reason this happens is that `int_from_bytes` (in py27 mode) casts the passed-in value to `bytes`, which, in py27 mode, is an alias for `str`. Passing a `builtins.bytes` value to `str` somewhat insanely wraps the string with `b'` and `'`. These then get parsed by the rest of `int_from_bytes` as if they were part of the original byte string.\r\n\r\nI think this is particularly unfortunate since all the \"cryptography\" functions say they accept and return `bytes` in their docstrings. Ideally it'd be compatible with all three definitions of `bytes`: the py27 alias to `str`, the one from \"future\", and the py3 one.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n # call bytes() on data to allow the use of bytearrays\n return int(bytes(data).encode('hex'), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n", "path": "src/cryptography/utils.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n return int(binascii.hexlify(data), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n", "path": "src/cryptography/utils.py"}]} | 2,003 | 111 |
gh_patches_debug_113 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[META 576] Sanitize `*auth*` instead of `authorization`
[](https://github.com/elastic/apm/issues/576)
[](https://github.com/elastic/apm/issues/577)
Sanitize `*auth*` instead of `authorization`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/conf/constants.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import decimal
32 import re
33 from collections import namedtuple
34
35
36 def _starmatch_to_regex(pattern):
37 """
38 This is a duplicate of starmatch_to_regex() in utils/__init__.py
39
40 Duplication to avoid circular imports
41 """
42 options = re.DOTALL
43 # check if we are case sensitive
44 if pattern.startswith("(?-i)"):
45 pattern = pattern[5:]
46 else:
47 options |= re.IGNORECASE
48 i, n = 0, len(pattern)
49 res = []
50 while i < n:
51 c = pattern[i]
52 i = i + 1
53 if c == "*":
54 res.append(".*")
55 else:
56 res.append(re.escape(c))
57 return re.compile(r"(?:%s)\Z" % "".join(res), options)
58
59
60 EVENTS_API_PATH = "intake/v2/events"
61 AGENT_CONFIG_PATH = "config/v1/agents"
62 SERVER_INFO_PATH = ""
63
64 TRACE_CONTEXT_VERSION = 0
65 TRACEPARENT_HEADER_NAME = "traceparent"
66 TRACEPARENT_LEGACY_HEADER_NAME = "elastic-apm-traceparent"
67 TRACESTATE_HEADER_NAME = "tracestate"
68
69 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
70
71 KEYWORD_MAX_LENGTH = 1024
72
73 HTTP_WITH_BODY = {"POST", "PUT", "PATCH", "DELETE"}
74
75 MASK = "[REDACTED]"
76
77 EXCEPTION_CHAIN_MAX_DEPTH = 50
78
79 ERROR = "error"
80 TRANSACTION = "transaction"
81 SPAN = "span"
82 METRICSET = "metricset"
83
84 LABEL_RE = re.compile('[.*"]')
85
86 HARDCODED_PROCESSORS = ["elasticapm.processors.add_context_lines_to_frames"]
87
88 BASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [
89 "password",
90 "passwd",
91 "pwd",
92 "secret",
93 "*key",
94 "*token*",
95 "*session*",
96 "*credit*",
97 "*card*",
98 "authorization",
99 "set-cookie",
100 ]
101
102 BASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]
103
104 OUTCOME = namedtuple("OUTCOME", ["SUCCESS", "FAILURE", "UNKNOWN"])(
105 SUCCESS="success", FAILURE="failure", UNKNOWN="unknown"
106 )
107
108 try:
109 # Python 2
110 LABEL_TYPES = (bool, int, long, float, decimal.Decimal)
111 except NameError:
112 # Python 3
113 LABEL_TYPES = (bool, int, float, decimal.Decimal)
114
115 TRACESTATE = namedtuple("TRACESTATE", ["SAMPLE_RATE"])(SAMPLE_RATE="s")
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py
--- a/elasticapm/conf/constants.py
+++ b/elasticapm/conf/constants.py
@@ -95,7 +95,7 @@
"*session*",
"*credit*",
"*card*",
- "authorization",
+ "*auth*",
"set-cookie",
]
| {"golden_diff": "diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py\n--- a/elasticapm/conf/constants.py\n+++ b/elasticapm/conf/constants.py\n@@ -95,7 +95,7 @@\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n- \"authorization\",\n+ \"*auth*\",\n \"set-cookie\",\n ]\n", "issue": "[META 576] Sanitize `*auth*` instead of `authorization`\n[](https://github.com/elastic/apm/issues/576)\n\n[](https://github.com/elastic/apm/issues/577)\n\nSanitize `*auth*` instead of `authorization`\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\n\n\ndef _starmatch_to_regex(pattern):\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"authorization\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\n", "path": "elasticapm/conf/constants.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\n\n\ndef _starmatch_to_regex(pattern):\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"*auth*\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\n", "path": "elasticapm/conf/constants.py"}]} | 1,584 | 84 |
gh_patches_debug_12070 | rasdani/github-patches | git_diff | Kinto__kinto-2011 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DELETE /v1/accounts raises a 500
```
$ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin
HTTP/1.1 500 Internal Server Error
Access-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length
Content-Length: 177
Content-Type: application/json
Date: Mon, 28 Jan 2019 20:45:56 GMT
Via: 1.1 alproxy
X-Content-Type-Options: nosniff
```
```
File "/home/natim/kinto/kinto/kinto/plugins/accounts/views.py", line 221, in on_account_changed
username = request.matchdict["id"]
KeyError: 'id'
```
DELETE /v1/accounts raises a 500
```
$ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin
HTTP/1.1 500 Internal Server Error
Access-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length
Content-Length: 177
Content-Type: application/json
Date: Mon, 28 Jan 2019 20:45:56 GMT
Via: 1.1 alproxy
X-Content-Type-Options: nosniff
```
```
File "/home/natim/kinto/kinto/kinto/plugins/accounts/views.py", line 221, in on_account_changed
username = request.matchdict["id"]
KeyError: 'id'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/accounts/views.py`
Content:
```
1 import colander
2 from pyramid import httpexceptions
3 from pyramid.decorator import reify
4 from pyramid.security import Authenticated, Everyone
5 from pyramid.settings import aslist
6 from pyramid.events import subscriber
7
8 from kinto.views import NameGenerator
9 from kinto.core import resource, utils
10 from kinto.core.errors import raise_invalid, http_error
11 from kinto.core.events import ResourceChanged, ACTIONS
12
13 from .utils import hash_password, ACCOUNT_CACHE_KEY, ACCOUNT_POLICY_NAME
14
15
16 def _extract_posted_body_id(request):
17 try:
18 # Anonymous creation with POST.
19 return request.json["data"]["id"]
20 except (ValueError, KeyError):
21 # Bad POST data.
22 if request.method.lower() == "post":
23 error_details = {"name": "data.id", "description": "data.id in body: Required"}
24 raise_invalid(request, **error_details)
25 # Anonymous GET
26 error_msg = "Cannot read accounts."
27 raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)
28
29
30 class AccountIdGenerator(NameGenerator):
31 """Allow @ signs in account IDs."""
32
33 regexp = r"^[a-zA-Z0-9][.@a-zA-Z0-9_-]*$"
34
35
36 class AccountSchema(resource.ResourceSchema):
37 password = colander.SchemaNode(colander.String())
38
39
40 @resource.register()
41 class Account(resource.Resource):
42
43 schema = AccountSchema
44
45 def __init__(self, request, context):
46 # Store if current user is administrator (before accessing get_parent_id())
47 allowed_from_settings = request.registry.settings.get("account_write_principals", [])
48 context.is_administrator = (
49 len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0
50 )
51 # Shortcut to check if current is anonymous (before get_parent_id()).
52 context.is_anonymous = Authenticated not in request.effective_principals
53
54 super().__init__(request, context)
55
56 # Overwrite the current principal set by Resource.
57 if self.model.current_principal == Everyone or context.is_administrator:
58 # Creation is anonymous, but author with write perm is this:
59 self.model.current_principal = f"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}"
60
61 @reify
62 def id_generator(self):
63 # This generator is used for ID validation.
64 return AccountIdGenerator()
65
66 def get_parent_id(self, request):
67 # The whole challenge here is that we want to isolate what
68 # authenticated users can list, but give access to everything to
69 # administrators.
70 # Plus when anonymous create accounts, we have to set their parent id
71 # to the same value they would obtain when authenticated.
72 if self.context.is_administrator:
73 if self.context.on_plural_endpoint:
74 # Accounts created by admin should have userid as parent.
75 if request.method.lower() == "post":
76 return _extract_posted_body_id(request)
77 else:
78 # Admin see all accounts.
79 return "*"
80 else:
81 # No pattern matching for admin on single record.
82 return request.matchdict["id"]
83
84 if not self.context.is_anonymous:
85 # Authenticated users see their own account only.
86 return request.selected_userid
87
88 # Anonymous creation with PUT.
89 if "id" in request.matchdict:
90 return request.matchdict["id"]
91
92 return _extract_posted_body_id(request)
93
94 def plural_post(self):
95 result = super(Account, self).plural_post()
96 if self.context.is_anonymous and self.request.response.status_code == 200:
97 error_details = {"message": "Account ID %r already exists" % result["data"]["id"]}
98 raise http_error(httpexceptions.HTTPForbidden(), **error_details)
99 return result
100
101 def process_object(self, new, old=None):
102 new = super(Account, self).process_object(new, old)
103
104 new["password"] = hash_password(new["password"])
105
106 # Administrators can reach other accounts and anonymous have no
107 # selected_userid. So do not try to enforce.
108 if self.context.is_administrator or self.context.is_anonymous:
109 return new
110
111 # Do not let accounts be created without usernames.
112 if self.model.id_field not in new:
113 error_details = {"name": "data.id", "description": "Accounts must have an ID."}
114 raise_invalid(self.request, **error_details)
115
116 # Otherwise, we force the id to match the authenticated username.
117 if new[self.model.id_field] != self.request.selected_userid:
118 error_details = {
119 "name": "data.id",
120 "description": "Username and account ID do not match.",
121 }
122 raise_invalid(self.request, **error_details)
123
124 return new
125
126
127 # Clear cache on account change
128 @subscriber(
129 ResourceChanged, for_resources=("account",), for_actions=(ACTIONS.UPDATE, ACTIONS.DELETE)
130 )
131 def on_account_changed(event):
132 request = event.request
133 cache = request.registry.cache
134 settings = request.registry.settings
135 # Extract username and password from current user
136 username = request.matchdict["id"]
137 hmac_secret = settings["userid_hmac_secret"]
138 cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))
139 # Delete cache
140 cache.delete(cache_key)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/plugins/accounts/views.py b/kinto/plugins/accounts/views.py
--- a/kinto/plugins/accounts/views.py
+++ b/kinto/plugins/accounts/views.py
@@ -132,9 +132,11 @@
request = event.request
cache = request.registry.cache
settings = request.registry.settings
- # Extract username and password from current user
- username = request.matchdict["id"]
hmac_secret = settings["userid_hmac_secret"]
- cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))
- # Delete cache
- cache.delete(cache_key)
+
+ for obj in event.impacted_objects:
+ # Extract username and password from current user
+ username = obj["old"]["id"]
+ cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))
+ # Delete cache
+ cache.delete(cache_key)
| {"golden_diff": "diff --git a/kinto/plugins/accounts/views.py b/kinto/plugins/accounts/views.py\n--- a/kinto/plugins/accounts/views.py\n+++ b/kinto/plugins/accounts/views.py\n@@ -132,9 +132,11 @@\n request = event.request\n cache = request.registry.cache\n settings = request.registry.settings\n- # Extract username and password from current user\n- username = request.matchdict[\"id\"]\n hmac_secret = settings[\"userid_hmac_secret\"]\n- cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n- # Delete cache\n- cache.delete(cache_key)\n+\n+ for obj in event.impacted_objects:\n+ # Extract username and password from current user\n+ username = obj[\"old\"][\"id\"]\n+ cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n+ # Delete cache\n+ cache.delete(cache_key)\n", "issue": "DELETE /v1/accounts raises a 500\n```\r\n$ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin\r\nHTTP/1.1 500 Internal Server Error\r\nAccess-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length\r\nContent-Length: 177\r\nContent-Type: application/json\r\nDate: Mon, 28 Jan 2019 20:45:56 GMT\r\nVia: 1.1 alproxy\r\nX-Content-Type-Options: nosniff\r\n```\r\n\r\n```\r\n File \"/home/natim/kinto/kinto/kinto/plugins/accounts/views.py\", line 221, in on_account_changed\r\n username = request.matchdict[\"id\"]\r\nKeyError: 'id'\r\n```\nDELETE /v1/accounts raises a 500\n```\r\n$ http DELETE https://natim.alwaysdata.net/v1/accounts --auth admin:admin\r\nHTTP/1.1 500 Internal Server Error\r\nAccess-Control-Expose-Headers: Retry-After, Alert, Backoff, Content-Length\r\nContent-Length: 177\r\nContent-Type: application/json\r\nDate: Mon, 28 Jan 2019 20:45:56 GMT\r\nVia: 1.1 alproxy\r\nX-Content-Type-Options: nosniff\r\n```\r\n\r\n```\r\n File \"/home/natim/kinto/kinto/kinto/plugins/accounts/views.py\", line 221, in on_account_changed\r\n username = request.matchdict[\"id\"]\r\nKeyError: 'id'\r\n```\n", "before_files": [{"content": "import colander\nfrom pyramid import httpexceptions\nfrom pyramid.decorator import reify\nfrom pyramid.security import Authenticated, Everyone\nfrom pyramid.settings import aslist\nfrom pyramid.events import subscriber\n\nfrom kinto.views import NameGenerator\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid, http_error\nfrom kinto.core.events import ResourceChanged, ACTIONS\n\nfrom .utils import hash_password, ACCOUNT_CACHE_KEY, ACCOUNT_POLICY_NAME\n\n\ndef _extract_posted_body_id(request):\n try:\n # Anonymous creation with POST.\n return request.json[\"data\"][\"id\"]\n except (ValueError, KeyError):\n # Bad POST data.\n if request.method.lower() == \"post\":\n error_details = {\"name\": \"data.id\", \"description\": \"data.id in body: Required\"}\n raise_invalid(request, **error_details)\n # Anonymous GET\n error_msg = \"Cannot read accounts.\"\n raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)\n\n\nclass AccountIdGenerator(NameGenerator):\n \"\"\"Allow @ signs in account IDs.\"\"\"\n\n regexp = r\"^[a-zA-Z0-9][.@a-zA-Z0-9_-]*$\"\n\n\nclass AccountSchema(resource.ResourceSchema):\n password = colander.SchemaNode(colander.String())\n\n\[email protected]()\nclass Account(resource.Resource):\n\n schema = AccountSchema\n\n def __init__(self, request, context):\n # Store if current user is administrator (before accessing get_parent_id())\n allowed_from_settings = request.registry.settings.get(\"account_write_principals\", [])\n context.is_administrator = (\n len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0\n )\n # Shortcut to check if current is anonymous (before get_parent_id()).\n context.is_anonymous = Authenticated not in request.effective_principals\n\n super().__init__(request, context)\n\n # Overwrite the current principal set by Resource.\n if self.model.current_principal == Everyone or context.is_administrator:\n # Creation is anonymous, but author with write perm is this:\n self.model.current_principal = f\"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}\"\n\n @reify\n def id_generator(self):\n # This generator is used for ID validation.\n return AccountIdGenerator()\n\n def get_parent_id(self, request):\n # The whole challenge here is that we want to isolate what\n # authenticated users can list, but give access to everything to\n # administrators.\n # Plus when anonymous create accounts, we have to set their parent id\n # to the same value they would obtain when authenticated.\n if self.context.is_administrator:\n if self.context.on_plural_endpoint:\n # Accounts created by admin should have userid as parent.\n if request.method.lower() == \"post\":\n return _extract_posted_body_id(request)\n else:\n # Admin see all accounts.\n return \"*\"\n else:\n # No pattern matching for admin on single record.\n return request.matchdict[\"id\"]\n\n if not self.context.is_anonymous:\n # Authenticated users see their own account only.\n return request.selected_userid\n\n # Anonymous creation with PUT.\n if \"id\" in request.matchdict:\n return request.matchdict[\"id\"]\n\n return _extract_posted_body_id(request)\n\n def plural_post(self):\n result = super(Account, self).plural_post()\n if self.context.is_anonymous and self.request.response.status_code == 200:\n error_details = {\"message\": \"Account ID %r already exists\" % result[\"data\"][\"id\"]}\n raise http_error(httpexceptions.HTTPForbidden(), **error_details)\n return result\n\n def process_object(self, new, old=None):\n new = super(Account, self).process_object(new, old)\n\n new[\"password\"] = hash_password(new[\"password\"])\n\n # Administrators can reach other accounts and anonymous have no\n # selected_userid. So do not try to enforce.\n if self.context.is_administrator or self.context.is_anonymous:\n return new\n\n # Do not let accounts be created without usernames.\n if self.model.id_field not in new:\n error_details = {\"name\": \"data.id\", \"description\": \"Accounts must have an ID.\"}\n raise_invalid(self.request, **error_details)\n\n # Otherwise, we force the id to match the authenticated username.\n if new[self.model.id_field] != self.request.selected_userid:\n error_details = {\n \"name\": \"data.id\",\n \"description\": \"Username and account ID do not match.\",\n }\n raise_invalid(self.request, **error_details)\n\n return new\n\n\n# Clear cache on account change\n@subscriber(\n ResourceChanged, for_resources=(\"account\",), for_actions=(ACTIONS.UPDATE, ACTIONS.DELETE)\n)\ndef on_account_changed(event):\n request = event.request\n cache = request.registry.cache\n settings = request.registry.settings\n # Extract username and password from current user\n username = request.matchdict[\"id\"]\n hmac_secret = settings[\"userid_hmac_secret\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n # Delete cache\n cache.delete(cache_key)\n", "path": "kinto/plugins/accounts/views.py"}], "after_files": [{"content": "import colander\nfrom pyramid import httpexceptions\nfrom pyramid.decorator import reify\nfrom pyramid.security import Authenticated, Everyone\nfrom pyramid.settings import aslist\nfrom pyramid.events import subscriber\n\nfrom kinto.views import NameGenerator\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid, http_error\nfrom kinto.core.events import ResourceChanged, ACTIONS\n\nfrom .utils import hash_password, ACCOUNT_CACHE_KEY, ACCOUNT_POLICY_NAME\n\n\ndef _extract_posted_body_id(request):\n try:\n # Anonymous creation with POST.\n return request.json[\"data\"][\"id\"]\n except (ValueError, KeyError):\n # Bad POST data.\n if request.method.lower() == \"post\":\n error_details = {\"name\": \"data.id\", \"description\": \"data.id in body: Required\"}\n raise_invalid(request, **error_details)\n # Anonymous GET\n error_msg = \"Cannot read accounts.\"\n raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)\n\n\nclass AccountIdGenerator(NameGenerator):\n \"\"\"Allow @ signs in account IDs.\"\"\"\n\n regexp = r\"^[a-zA-Z0-9][.@a-zA-Z0-9_-]*$\"\n\n\nclass AccountSchema(resource.ResourceSchema):\n password = colander.SchemaNode(colander.String())\n\n\[email protected]()\nclass Account(resource.Resource):\n\n schema = AccountSchema\n\n def __init__(self, request, context):\n # Store if current user is administrator (before accessing get_parent_id())\n allowed_from_settings = request.registry.settings.get(\"account_write_principals\", [])\n context.is_administrator = (\n len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0\n )\n # Shortcut to check if current is anonymous (before get_parent_id()).\n context.is_anonymous = Authenticated not in request.effective_principals\n\n super().__init__(request, context)\n\n # Overwrite the current principal set by Resource.\n if self.model.current_principal == Everyone or context.is_administrator:\n # Creation is anonymous, but author with write perm is this:\n self.model.current_principal = f\"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}\"\n\n @reify\n def id_generator(self):\n # This generator is used for ID validation.\n return AccountIdGenerator()\n\n def get_parent_id(self, request):\n # The whole challenge here is that we want to isolate what\n # authenticated users can list, but give access to everything to\n # administrators.\n # Plus when anonymous create accounts, we have to set their parent id\n # to the same value they would obtain when authenticated.\n if self.context.is_administrator:\n if self.context.on_plural_endpoint:\n # Accounts created by admin should have userid as parent.\n if request.method.lower() == \"post\":\n return _extract_posted_body_id(request)\n else:\n # Admin see all accounts.\n return \"*\"\n else:\n # No pattern matching for admin on single record.\n return request.matchdict[\"id\"]\n\n if not self.context.is_anonymous:\n # Authenticated users see their own account only.\n return request.selected_userid\n\n # Anonymous creation with PUT.\n if \"id\" in request.matchdict:\n return request.matchdict[\"id\"]\n\n return _extract_posted_body_id(request)\n\n def plural_post(self):\n result = super(Account, self).plural_post()\n if self.context.is_anonymous and self.request.response.status_code == 200:\n error_details = {\"message\": \"Account ID %r already exists\" % result[\"data\"][\"id\"]}\n raise http_error(httpexceptions.HTTPForbidden(), **error_details)\n return result\n\n def process_object(self, new, old=None):\n new = super(Account, self).process_object(new, old)\n\n new[\"password\"] = hash_password(new[\"password\"])\n\n # Administrators can reach other accounts and anonymous have no\n # selected_userid. So do not try to enforce.\n if self.context.is_administrator or self.context.is_anonymous:\n return new\n\n # Do not let accounts be created without usernames.\n if self.model.id_field not in new:\n error_details = {\"name\": \"data.id\", \"description\": \"Accounts must have an ID.\"}\n raise_invalid(self.request, **error_details)\n\n # Otherwise, we force the id to match the authenticated username.\n if new[self.model.id_field] != self.request.selected_userid:\n error_details = {\n \"name\": \"data.id\",\n \"description\": \"Username and account ID do not match.\",\n }\n raise_invalid(self.request, **error_details)\n\n return new\n\n\n# Clear cache on account change\n@subscriber(\n ResourceChanged, for_resources=(\"account\",), for_actions=(ACTIONS.UPDATE, ACTIONS.DELETE)\n)\ndef on_account_changed(event):\n request = event.request\n cache = request.registry.cache\n settings = request.registry.settings\n hmac_secret = settings[\"userid_hmac_secret\"]\n\n for obj in event.impacted_objects:\n # Extract username and password from current user\n username = obj[\"old\"][\"id\"]\n cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format(username))\n # Delete cache\n cache.delete(cache_key)\n", "path": "kinto/plugins/accounts/views.py"}]} | 2,037 | 199 |
gh_patches_debug_26194 | rasdani/github-patches | git_diff | streamlink__streamlink-95 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Connectcast stream fails with "invalid url"
Attempting to load an active connectcast stream via `streamlink connectcast.tv/streamname` results in an error:
`error: Unable to open URL: (Invalid URL '': No schema supplied. Perhaps you mean http://?)`
Similarly, using `http://connectcast.tv/streamname` for the url also fails.
Running on Windows, built with python 3.5.0rc2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/connectcast.py`
Content:
```
1 import re
2 import json
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http, validate
6 from streamlink.stream import HDSStream
7
8 SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf"
9
10 _url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/")
11 _manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*")
12
13
14 class ConnectCast(Plugin):
15 @classmethod
16 def can_handle_url(self, url):
17 return _url_re.match(url)
18
19 def _get_streams(self):
20 res = http.get(self.url)
21 match = _manifest_re.search(res.text)
22 manifest = match.group(1)
23 streams = {}
24 streams.update(
25 HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)
26 )
27
28 return streams
29
30 __plugin__ = ConnectCast
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/connectcast.py b/src/streamlink/plugins/connectcast.py
--- a/src/streamlink/plugins/connectcast.py
+++ b/src/streamlink/plugins/connectcast.py
@@ -3,13 +3,11 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
-from streamlink.stream import HDSStream
-
-SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf"
-
-_url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/")
-_manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*")
+from streamlink.stream import RTMPStream
+_url_re = re.compile(r"http(?:s)?://connectcast.tv/(\w+)?")
+_stream_re = re.compile(r'<video src="mp4:(.*?)"')
+_stream_url = "http://connectcast.tv/channel/stream/{channel}"
class ConnectCast(Plugin):
@classmethod
@@ -17,14 +15,15 @@
return _url_re.match(url)
def _get_streams(self):
- res = http.get(self.url)
- match = _manifest_re.search(res.text)
- manifest = match.group(1)
- streams = {}
- streams.update(
- HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)
- )
-
- return streams
+ url_match = _url_re.match(self.url)
+ stream_url = _stream_url.format(channel=url_match.group(1))
+ res = self.session.http.get(stream_url)
+ match = _stream_re.search(res.content)
+ if match:
+ params = dict(rtmp="rtmp://stream.connectcast.tv/live",
+ playpath=match.group(1),
+ live=True)
+
+ return dict(live=RTMPStream(self.session, params))
__plugin__ = ConnectCast
| {"golden_diff": "diff --git a/src/streamlink/plugins/connectcast.py b/src/streamlink/plugins/connectcast.py\n--- a/src/streamlink/plugins/connectcast.py\n+++ b/src/streamlink/plugins/connectcast.py\n@@ -3,13 +3,11 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http, validate\n-from streamlink.stream import HDSStream\n-\n-SWF_URL = \"https://www.connectcast.tv/jwplayer/jwplayer.flash.swf\"\n-\n-_url_re = re.compile(\"http(s)?://(\\w+\\.)?connectcast.tv/\")\n-_manifest_re = re.compile(\".*data-playback=\\\"([^\\\"]*)\\\".*\")\n+from streamlink.stream import RTMPStream\n \n+_url_re = re.compile(r\"http(?:s)?://connectcast.tv/(\\w+)?\")\n+_stream_re = re.compile(r'<video src=\"mp4:(.*?)\"')\n+_stream_url = \"http://connectcast.tv/channel/stream/{channel}\"\n \n class ConnectCast(Plugin):\n @classmethod\n@@ -17,14 +15,15 @@\n return _url_re.match(url)\n \n def _get_streams(self):\n- res = http.get(self.url)\n- match = _manifest_re.search(res.text)\n- manifest = match.group(1)\n- streams = {}\n- streams.update(\n- HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)\n- )\n- \n- return streams\n+ url_match = _url_re.match(self.url)\n+ stream_url = _stream_url.format(channel=url_match.group(1))\n+ res = self.session.http.get(stream_url)\n+ match = _stream_re.search(res.content)\n+ if match:\n+ params = dict(rtmp=\"rtmp://stream.connectcast.tv/live\",\n+ playpath=match.group(1),\n+ live=True)\n+\n+ return dict(live=RTMPStream(self.session, params))\n \n __plugin__ = ConnectCast\n", "issue": "Connectcast stream fails with \"invalid url\"\nAttempting to load an active connectcast stream via `streamlink connectcast.tv/streamname` results in an error:\n`error: Unable to open URL: (Invalid URL '': No schema supplied. Perhaps you mean http://?)`\n\nSimilarly, using `http://connectcast.tv/streamname` for the url also fails.\n\nRunning on Windows, built with python 3.5.0rc2\n\n", "before_files": [{"content": "import re\nimport json\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import HDSStream\n\nSWF_URL = \"https://www.connectcast.tv/jwplayer/jwplayer.flash.swf\"\n\n_url_re = re.compile(\"http(s)?://(\\w+\\.)?connectcast.tv/\")\n_manifest_re = re.compile(\".*data-playback=\\\"([^\\\"]*)\\\".*\")\n\n\nclass ConnectCast(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n match = _manifest_re.search(res.text)\n manifest = match.group(1)\n streams = {}\n streams.update(\n HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)\n )\n \n return streams\n\n__plugin__ = ConnectCast\n", "path": "src/streamlink/plugins/connectcast.py"}], "after_files": [{"content": "import re\nimport json\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import RTMPStream\n\n_url_re = re.compile(r\"http(?:s)?://connectcast.tv/(\\w+)?\")\n_stream_re = re.compile(r'<video src=\"mp4:(.*?)\"')\n_stream_url = \"http://connectcast.tv/channel/stream/{channel}\"\n\nclass ConnectCast(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n url_match = _url_re.match(self.url)\n stream_url = _stream_url.format(channel=url_match.group(1))\n res = self.session.http.get(stream_url)\n match = _stream_re.search(res.content)\n if match:\n params = dict(rtmp=\"rtmp://stream.connectcast.tv/live\",\n playpath=match.group(1),\n live=True)\n\n return dict(live=RTMPStream(self.session, params))\n\n__plugin__ = ConnectCast\n", "path": "src/streamlink/plugins/connectcast.py"}]} | 605 | 426 |
gh_patches_debug_3637 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3246 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
#3491 [mB] add video embed to interactive event
**URL:** https://meinberlin-dev.liqd.net/projekte/design-project/
**device & browser:** *Safari Version 14.0 (15610.1.28.1.9, 15610)*
**Comment/Question:**
*Just to confirm, the live stream field should appear just when the project is published? Cause, I can't select the live stream section before being published, otherwise all good*
<img width="1361" alt="Screenshot 2020-11-10 at 16 03 41" src="https://user-images.githubusercontent.com/59610786/98691968-e462ff80-236e-11eb-904b-755ff83b79cc.png">
<img width="1389" alt="Screenshot 2020-11-10 at 16 04 07" src="https://user-images.githubusercontent.com/59610786/98691978-e7f68680-236e-11eb-9a18-53ade0537fa8.png">
<img width="1330" alt="Screenshot 2020-11-10 at 16 04 24" src="https://user-images.githubusercontent.com/59610786/98691980-e927b380-236e-11eb-88a8-ad2c644e58df.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/livequestions/dashboard.py`
Content:
```
1 from django.urls import reverse
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.dashboard import DashboardComponent
5 from adhocracy4.dashboard import components
6
7 from . import views
8
9
10 class LiveStreamComponent(DashboardComponent):
11 identifier = 'live_stream'
12 weight = 20
13 label = _('Live Stream')
14
15 def is_effective(self, module):
16 module_app = module.phases[0].content().app
17 return (module_app == 'meinberlin_livequestions' and
18 not module.project.is_draft)
19
20 def get_progress(self, module):
21 return 0, 0
22
23 def get_base_url(self, module):
24 return reverse('a4dashboard:livequestions-livestream', kwargs={
25 'module_slug': module.slug,
26 })
27
28 def get_urls(self):
29 return [(
30 r'^modules/(?P<module_slug>[-\w_]+)/livestream/$',
31 views.LiveStreamDashboardView.as_view(component=self),
32 'livequestions-livestream'
33 )]
34
35
36 components.register_module(LiveStreamComponent())
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/livequestions/dashboard.py b/meinberlin/apps/livequestions/dashboard.py
--- a/meinberlin/apps/livequestions/dashboard.py
+++ b/meinberlin/apps/livequestions/dashboard.py
@@ -14,8 +14,7 @@
def is_effective(self, module):
module_app = module.phases[0].content().app
- return (module_app == 'meinberlin_livequestions' and
- not module.project.is_draft)
+ return (module_app == 'meinberlin_livequestions')
def get_progress(self, module):
return 0, 0
| {"golden_diff": "diff --git a/meinberlin/apps/livequestions/dashboard.py b/meinberlin/apps/livequestions/dashboard.py\n--- a/meinberlin/apps/livequestions/dashboard.py\n+++ b/meinberlin/apps/livequestions/dashboard.py\n@@ -14,8 +14,7 @@\n \n def is_effective(self, module):\n module_app = module.phases[0].content().app\n- return (module_app == 'meinberlin_livequestions' and\n- not module.project.is_draft)\n+ return (module_app == 'meinberlin_livequestions')\n \n def get_progress(self, module):\n return 0, 0\n", "issue": "#3491 [mB] add video embed to interactive event \n**URL:** https://meinberlin-dev.liqd.net/projekte/design-project/\r\n**device & browser:** *Safari Version 14.0 (15610.1.28.1.9, 15610)*\r\n**Comment/Question:** \r\n*Just to confirm, the live stream field should appear just when the project is published? Cause, I can't select the live stream section before being published, otherwise all good* \r\n\r\n<img width=\"1361\" alt=\"Screenshot 2020-11-10 at 16 03 41\" src=\"https://user-images.githubusercontent.com/59610786/98691968-e462ff80-236e-11eb-904b-755ff83b79cc.png\">\r\n<img width=\"1389\" alt=\"Screenshot 2020-11-10 at 16 04 07\" src=\"https://user-images.githubusercontent.com/59610786/98691978-e7f68680-236e-11eb-9a18-53ade0537fa8.png\">\r\n<img width=\"1330\" alt=\"Screenshot 2020-11-10 at 16 04 24\" src=\"https://user-images.githubusercontent.com/59610786/98691980-e927b380-236e-11eb-88a8-ad2c644e58df.png\">\r\n\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import views\n\n\nclass LiveStreamComponent(DashboardComponent):\n identifier = 'live_stream'\n weight = 20\n label = _('Live Stream')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return (module_app == 'meinberlin_livequestions' and\n not module.project.is_draft)\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse('a4dashboard:livequestions-livestream', kwargs={\n 'module_slug': module.slug,\n })\n\n def get_urls(self):\n return [(\n r'^modules/(?P<module_slug>[-\\w_]+)/livestream/$',\n views.LiveStreamDashboardView.as_view(component=self),\n 'livequestions-livestream'\n )]\n\n\ncomponents.register_module(LiveStreamComponent())\n", "path": "meinberlin/apps/livequestions/dashboard.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import views\n\n\nclass LiveStreamComponent(DashboardComponent):\n identifier = 'live_stream'\n weight = 20\n label = _('Live Stream')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return (module_app == 'meinberlin_livequestions')\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse('a4dashboard:livequestions-livestream', kwargs={\n 'module_slug': module.slug,\n })\n\n def get_urls(self):\n return [(\n r'^modules/(?P<module_slug>[-\\w_]+)/livestream/$',\n views.LiveStreamDashboardView.as_view(component=self),\n 'livequestions-livestream'\n )]\n\n\ncomponents.register_module(LiveStreamComponent())\n", "path": "meinberlin/apps/livequestions/dashboard.py"}]} | 962 | 141 |
gh_patches_debug_11143 | rasdani/github-patches | git_diff | qtile__qtile-2811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set version using importlib.metadata
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
If you are suggesting a new feature/enhancement please instead post it on the
discussions board as an idea: https://github.com/qtile/qtile/discussions/categories/ideas
-->
# Issue description
Currently, if setuptools is not installed on the system running qtile, it will run into issues upon start.
An Arch user reported this downstream: https://bugs.archlinux.org/task/71804
Apart from also guarding against `ModuleNotFoundError` I think it could be a great idea to [use importlib.metadata to provide qtile's version](https://docs.python.org/3.9/library/importlib.metadata.html?highlight=importlib%20metadata#distribution-versions) instead for newer python versions.
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
# Qtile version
0.18.1
# Stack traces
Copied verbatim from the issue reported downstream:
```
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/libqtile/scripts/main.py", line 9, in <module>
import pkg_resources
ModuleNotFoundError: No module named 'pkg_resources'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/qtile", line 33, in <module>
sys.exit(load_entry_point('qtile==0.18.1.dev0+g8e7ecc0a.d20210719', 'console_scripts', 'qtile')())
File "/usr/bin/qtile", line 25, in importlib_load_entry_point
return next(matches).load()
File "/usr/lib/python3.9/importlib/metadata.py", line 77, in load
module = import_module(match.group('module'))
File "/usr/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/usr/lib/python3.9/site-packages/libqtile/scripts/main.py", line 11, in <module>
except (pkg_resources.DistributionNotFound, ImportError):
NameError: name 'pkg_resources' is not defined
```
# Configuration
not important for this issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/scripts/main.py`
Content:
```
1 import argparse
2 import logging
3 import sys
4
5 from libqtile.log_utils import init_log
6 from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top
7
8 try:
9 import pkg_resources
10 VERSION = pkg_resources.require("qtile")[0].version
11 except (pkg_resources.DistributionNotFound, ImportError):
12 VERSION = 'dev'
13
14
15 def main():
16 parent_parser = argparse.ArgumentParser(add_help=False)
17 parent_parser.add_argument(
18 '-l', '--log-level',
19 default='WARNING',
20 dest='log_level',
21 type=str.upper,
22 choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
23 help='Set qtile log level'
24 )
25
26 main_parser = argparse.ArgumentParser(
27 prog='qtile',
28 description='A full-featured, pure-Python tiling window manager.',
29 )
30 main_parser.add_argument(
31 '-v', '--version',
32 action='version',
33 version=VERSION,
34 )
35
36 subparsers = main_parser.add_subparsers()
37 start.add_subcommand(subparsers, [parent_parser])
38 shell.add_subcommand(subparsers, [parent_parser])
39 top.add_subcommand(subparsers, [parent_parser])
40 run_cmd.add_subcommand(subparsers, [parent_parser])
41 cmd_obj.add_subcommand(subparsers, [parent_parser])
42 check.add_subcommand(subparsers, [parent_parser])
43 migrate.add_subcommand(subparsers, [parent_parser])
44
45 # `qtile help` should print help
46 def print_help(options):
47 main_parser.print_help()
48 help_ = subparsers.add_parser("help", help="Print help information and exit")
49 help_.set_defaults(func=print_help)
50
51 options = main_parser.parse_args()
52 try:
53 log_level = getattr(logging, options.log_level)
54 init_log(log_level=log_level, log_color=sys.stdout.isatty())
55 options.func(options)
56 except AttributeError:
57 main_parser.print_usage()
58 print("")
59 print("Did you mean:")
60 print(" ".join(sys.argv + ['start']))
61 sys.exit(1)
62
63
64 if __name__ == "__main__":
65 main()
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py
--- a/libqtile/scripts/main.py
+++ b/libqtile/scripts/main.py
@@ -6,10 +6,16 @@
from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top
try:
- import pkg_resources
- VERSION = pkg_resources.require("qtile")[0].version
-except (pkg_resources.DistributionNotFound, ImportError):
- VERSION = 'dev'
+ # Python>3.7 can get the version from importlib
+ from importlib.metadata import distribution
+ VERSION = distribution("qtile").version
+except ModuleNotFoundError:
+ try:
+ # pkg_resources is required for 3.7
+ import pkg_resources
+ VERSION = pkg_resources.require("qtile")[0].version
+ except (pkg_resources.DistributionNotFound, ModuleNotFoundError):
+ VERSION = 'dev'
def main():
| {"golden_diff": "diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py\n--- a/libqtile/scripts/main.py\n+++ b/libqtile/scripts/main.py\n@@ -6,10 +6,16 @@\n from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n \n try:\n- import pkg_resources\n- VERSION = pkg_resources.require(\"qtile\")[0].version\n-except (pkg_resources.DistributionNotFound, ImportError):\n- VERSION = 'dev'\n+ # Python>3.7 can get the version from importlib\n+ from importlib.metadata import distribution\n+ VERSION = distribution(\"qtile\").version\n+except ModuleNotFoundError:\n+ try:\n+ # pkg_resources is required for 3.7\n+ import pkg_resources\n+ VERSION = pkg_resources.require(\"qtile\")[0].version\n+ except (pkg_resources.DistributionNotFound, ModuleNotFoundError):\n+ VERSION = 'dev'\n \n \n def main():\n", "issue": "Set version using importlib.metadata\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n\r\nIf you are suggesting a new feature/enhancement please instead post it on the\r\ndiscussions board as an idea: https://github.com/qtile/qtile/discussions/categories/ideas\r\n-->\r\n\r\n# Issue description\r\n\r\nCurrently, if setuptools is not installed on the system running qtile, it will run into issues upon start.\r\nAn Arch user reported this downstream: https://bugs.archlinux.org/task/71804\r\n\r\nApart from also guarding against `ModuleNotFoundError` I think it could be a great idea to [use importlib.metadata to provide qtile's version](https://docs.python.org/3.9/library/importlib.metadata.html?highlight=importlib%20metadata#distribution-versions) instead for newer python versions.\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\n# Qtile version\r\n\r\n0.18.1\r\n\r\n# Stack traces\r\n\r\nCopied verbatim from the issue reported downstream:\r\n\r\n```\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3.9/site-packages/libqtile/scripts/main.py\", line 9, in <module>\r\nimport pkg_resources\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\nFile \"/usr/bin/qtile\", line 33, in <module>\r\nsys.exit(load_entry_point('qtile==0.18.1.dev0+g8e7ecc0a.d20210719', 'console_scripts', 'qtile')())\r\nFile \"/usr/bin/qtile\", line 25, in importlib_load_entry_point\r\nreturn next(matches).load()\r\nFile \"/usr/lib/python3.9/importlib/metadata.py\", line 77, in load\r\nmodule = import_module(match.group('module'))\r\nFile \"/usr/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\nreturn _bootstrap._gcd_import(name[level:], package, level)\r\nFile \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\nFile \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\nFile \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\nFile \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\nFile \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\nFile \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\nFile \"/usr/lib/python3.9/site-packages/libqtile/scripts/main.py\", line 11, in <module>\r\nexcept (pkg_resources.DistributionNotFound, ImportError):\r\nNameError: name 'pkg_resources' is not defined\r\n```\r\n\r\n# Configuration\r\n\r\nnot important for this issue\n", "before_files": [{"content": "import argparse\nimport logging\nimport sys\n\nfrom libqtile.log_utils import init_log\nfrom libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef main():\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n type=str.upper,\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n\n main_parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n main_parser.add_argument(\n '-v', '--version',\n action='version',\n version=VERSION,\n )\n\n subparsers = main_parser.add_subparsers()\n start.add_subcommand(subparsers, [parent_parser])\n shell.add_subcommand(subparsers, [parent_parser])\n top.add_subcommand(subparsers, [parent_parser])\n run_cmd.add_subcommand(subparsers, [parent_parser])\n cmd_obj.add_subcommand(subparsers, [parent_parser])\n check.add_subcommand(subparsers, [parent_parser])\n migrate.add_subcommand(subparsers, [parent_parser])\n\n # `qtile help` should print help\n def print_help(options):\n main_parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = main_parser.parse_args()\n try:\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\n except AttributeError:\n main_parser.print_usage()\n print(\"\")\n print(\"Did you mean:\")\n print(\" \".join(sys.argv + ['start']))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "libqtile/scripts/main.py"}], "after_files": [{"content": "import argparse\nimport logging\nimport sys\n\nfrom libqtile.log_utils import init_log\nfrom libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n\ntry:\n # Python>3.7 can get the version from importlib\n from importlib.metadata import distribution\n VERSION = distribution(\"qtile\").version\nexcept ModuleNotFoundError:\n try:\n # pkg_resources is required for 3.7\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\n except (pkg_resources.DistributionNotFound, ModuleNotFoundError):\n VERSION = 'dev'\n\n\ndef main():\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n type=str.upper,\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n\n main_parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n main_parser.add_argument(\n '-v', '--version',\n action='version',\n version=VERSION,\n )\n\n subparsers = main_parser.add_subparsers()\n start.add_subcommand(subparsers, [parent_parser])\n shell.add_subcommand(subparsers, [parent_parser])\n top.add_subcommand(subparsers, [parent_parser])\n run_cmd.add_subcommand(subparsers, [parent_parser])\n cmd_obj.add_subcommand(subparsers, [parent_parser])\n check.add_subcommand(subparsers, [parent_parser])\n migrate.add_subcommand(subparsers, [parent_parser])\n\n # `qtile help` should print help\n def print_help(options):\n main_parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = main_parser.parse_args()\n try:\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\n except AttributeError:\n main_parser.print_usage()\n print(\"\")\n print(\"Did you mean:\")\n print(\" \".join(sys.argv + ['start']))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "libqtile/scripts/main.py"}]} | 1,520 | 215 |
gh_patches_debug_11452 | rasdani/github-patches | git_diff | cupy__cupy-1138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cupy.random.permutation() overwrites its argument.
`cupy.random.permutation()` overwrites its argument.
This is incompatible with `numpy.random.permutation`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/random/permutations.py`
Content:
```
1 from cupy.random import generator
2 import six
3
4
5 def shuffle(a):
6 """Shuffles an array.
7
8 Args:
9 a (cupy.ndarray): The array to be shuffled.
10
11 .. seealso:: :func:`numpy.random.shuffle`
12
13 """
14 rs = generator.get_random_state()
15 return rs.shuffle(a)
16
17
18 def permutation(a):
19 """Returns a permuted range or shuffles an array."""
20 if isinstance(a, six.integer_types):
21 rs = generator.get_random_state()
22 return rs.permutation(a)
23 else:
24 return shuffle(a)
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/random/permutations.py b/cupy/random/permutations.py
--- a/cupy/random/permutations.py
+++ b/cupy/random/permutations.py
@@ -16,9 +16,20 @@
def permutation(a):
- """Returns a permuted range or shuffles an array."""
+ """Returns a permuted range or a permutation of an array.
+
+ Args:
+ a (int or cupy.ndarray): The range or the array to be shuffled.
+
+ Returns:
+ cupy.ndarray: If `a` is an integer, it is permutation range between 0
+ and `a` - 1.
+ Otherwise, it is a permutation of `a`.
+
+ .. seealso:: :func:`numpy.random.permutation`
+ """
+ rs = generator.get_random_state()
if isinstance(a, six.integer_types):
- rs = generator.get_random_state()
return rs.permutation(a)
else:
- return shuffle(a)
+ return a[rs.permutation(len(a))]
| {"golden_diff": "diff --git a/cupy/random/permutations.py b/cupy/random/permutations.py\n--- a/cupy/random/permutations.py\n+++ b/cupy/random/permutations.py\n@@ -16,9 +16,20 @@\n \n \n def permutation(a):\n- \"\"\"Returns a permuted range or shuffles an array.\"\"\"\n+ \"\"\"Returns a permuted range or a permutation of an array.\n+\n+ Args:\n+ a (int or cupy.ndarray): The range or the array to be shuffled.\n+\n+ Returns:\n+ cupy.ndarray: If `a` is an integer, it is permutation range between 0\n+ and `a` - 1.\n+ Otherwise, it is a permutation of `a`.\n+\n+ .. seealso:: :func:`numpy.random.permutation`\n+ \"\"\"\n+ rs = generator.get_random_state()\n if isinstance(a, six.integer_types):\n- rs = generator.get_random_state()\n return rs.permutation(a)\n else:\n- return shuffle(a)\n+ return a[rs.permutation(len(a))]\n", "issue": "cupy.random.permutation() overwrites its argument.\n`cupy.random.permutation()` overwrites its argument.\r\nThis is incompatible with `numpy.random.permutation`.\r\n\n", "before_files": [{"content": "from cupy.random import generator\nimport six\n\n\ndef shuffle(a):\n \"\"\"Shuffles an array.\n\n Args:\n a (cupy.ndarray): The array to be shuffled.\n\n .. seealso:: :func:`numpy.random.shuffle`\n\n \"\"\"\n rs = generator.get_random_state()\n return rs.shuffle(a)\n\n\ndef permutation(a):\n \"\"\"Returns a permuted range or shuffles an array.\"\"\"\n if isinstance(a, six.integer_types):\n rs = generator.get_random_state()\n return rs.permutation(a)\n else:\n return shuffle(a)\n", "path": "cupy/random/permutations.py"}], "after_files": [{"content": "from cupy.random import generator\nimport six\n\n\ndef shuffle(a):\n \"\"\"Shuffles an array.\n\n Args:\n a (cupy.ndarray): The array to be shuffled.\n\n .. seealso:: :func:`numpy.random.shuffle`\n\n \"\"\"\n rs = generator.get_random_state()\n return rs.shuffle(a)\n\n\ndef permutation(a):\n \"\"\"Returns a permuted range or a permutation of an array.\n\n Args:\n a (int or cupy.ndarray): The range or the array to be shuffled.\n\n Returns:\n cupy.ndarray: If `a` is an integer, it is permutation range between 0\n and `a` - 1.\n Otherwise, it is a permutation of `a`.\n\n .. seealso:: :func:`numpy.random.permutation`\n \"\"\"\n rs = generator.get_random_state()\n if isinstance(a, six.integer_types):\n return rs.permutation(a)\n else:\n return a[rs.permutation(len(a))]\n", "path": "cupy/random/permutations.py"}]} | 457 | 228 |
gh_patches_debug_31686 | rasdani/github-patches | git_diff | translate__translate-4045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RemovedInTTK2Warning seems strange
There is ``RemovedInTTK2Warning`` which apparently was meant to flag feature which will be removed in translate-toolkit 2. However it is already out and that did not happen :-).
Either RemovedInTTK2Warning should be renamed as translate-toolkit 2 has already been released, or the deprecation should be applied.
However it seems that quite a lot of the code seems to rely on that behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `translate/misc/multistring.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2006 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # translate is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
20 """Supports a hybrid Unicode string that can also have a list of alternate
21 strings in the strings attribute
22 """
23
24 import warnings
25
26
27 from .deprecation import RemovedInTTK2Warning
28
29
30 def _create_text_type(newtype, string, encoding):
31 """Helper to construct a text type out of characters or bytes. Required to
32 temporarily preserve backwards compatibility. Must be removed in TTK2.
33 """
34 if string is None:
35 string = ''
36 if isinstance(string, str):
37 return str.__new__(newtype, string)
38
39 warnings.warn(
40 'Passing non-ASCII bytes as well as the `encoding` argument to '
41 '`multistring` is deprecated. Always pass unicode characters instead.',
42 RemovedInTTK2Warning, stacklevel=2,
43 )
44 return str.__new__(newtype, string, encoding)
45
46
47 class multistring(str):
48
49 def __new__(newtype, string=u"", *args, **kwargs):
50 encoding = kwargs.pop('encoding', 'utf-8')
51 if isinstance(string, list):
52 if not string:
53 raise ValueError("multistring must contain at least one string")
54 newstring = _create_text_type(newtype, string[0], encoding)
55 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]
56 else:
57 newstring = _create_text_type(newtype, string, encoding)
58 newstring.strings = [newstring]
59 return newstring
60
61 def __init__(self, *args, **kwargs):
62 super().__init__()
63 if not hasattr(self, "strings"):
64 self.strings = []
65
66 def __cmp__(self, otherstring):
67 def cmp_compat(s1, s2):
68 # Python 3 compatible cmp() equivalent
69 return (s1 > s2) - (s1 < s2)
70 if isinstance(otherstring, multistring):
71 parentcompare = cmp_compat(str(self), otherstring)
72 if parentcompare:
73 return parentcompare
74 else:
75 return cmp_compat(self.strings[1:], otherstring.strings[1:])
76 elif isinstance(otherstring, str):
77 return cmp_compat(str(self), otherstring)
78 elif isinstance(otherstring, bytes):
79 return cmp_compat(self.encode('utf-8'), otherstring)
80 elif isinstance(otherstring, list) and otherstring:
81 return cmp_compat(self, multistring(otherstring))
82 else:
83 return cmp_compat(str(type(self)), str(type(otherstring)))
84
85 def __hash__(self):
86 return hash(str(self))
87
88 def __ne__(self, otherstring):
89 return self.__cmp__(otherstring) != 0
90
91 def __eq__(self, otherstring):
92 return self.__cmp__(otherstring) == 0
93
94 def __repr__(self):
95 return u"multistring(%r)" % (
96 [str(item) for item in self.strings]
97 )
98
99 def replace(self, old, new, count=None):
100 if count is None:
101 newstr = multistring(super().replace(old, new))
102 else:
103 newstr = multistring(super().replace(old, new, count))
104 for s in self.strings[1:]:
105 if count is None:
106 newstr.strings.append(s.replace(old, new))
107 else:
108 newstr.strings.append(s.replace(old, new, count))
109 return newstr
110
```
Path: `translate/misc/deprecation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2014 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify it under
8 # the terms of the GNU General Public License as published by the Free Software
9 # Foundation; either version 2 of the License, or (at your option) any later
10 # version.
11 #
12 # translate is distributed in the hope that it will be useful, but WITHOUT ANY
13 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
14 # A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License along with
17 # this program; if not, see <http://www.gnu.org/licenses/>.
18
19 import warnings
20 from functools import wraps
21
22
23 class RemovedInTTK2Warning(DeprecationWarning):
24 pass
25
26
27 def deprecated(message=""):
28 """Decorator that marks functions and methods as deprecated.
29
30 A warning will be emitted when the function or method is used. If a custom
31 message is provided, it will be shown after the default warning message.
32 """
33 def inner_render(func):
34 @wraps(func)
35 def new_func(*args, **kwargs):
36 msg = message # Hack to avoid UnboundLocalError.
37 if msg:
38 msg = "\n" + msg
39 func_code = func.__code__
40 warnings.warn_explicit(
41 "Call to deprecated function {0}.{1}".format(func.__name__,
42 msg),
43 category=DeprecationWarning,
44 filename=func_code.co_filename,
45 lineno=func_code.co_firstlineno + 1
46 )
47 return func(*args, **kwargs)
48 return new_func
49 return inner_render
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/translate/misc/deprecation.py b/translate/misc/deprecation.py
--- a/translate/misc/deprecation.py
+++ b/translate/misc/deprecation.py
@@ -20,10 +20,6 @@
from functools import wraps
-class RemovedInTTK2Warning(DeprecationWarning):
- pass
-
-
def deprecated(message=""):
"""Decorator that marks functions and methods as deprecated.
diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py
--- a/translate/misc/multistring.py
+++ b/translate/misc/multistring.py
@@ -21,40 +21,17 @@
strings in the strings attribute
"""
-import warnings
-
-
-from .deprecation import RemovedInTTK2Warning
-
-
-def _create_text_type(newtype, string, encoding):
- """Helper to construct a text type out of characters or bytes. Required to
- temporarily preserve backwards compatibility. Must be removed in TTK2.
- """
- if string is None:
- string = ''
- if isinstance(string, str):
- return str.__new__(newtype, string)
-
- warnings.warn(
- 'Passing non-ASCII bytes as well as the `encoding` argument to '
- '`multistring` is deprecated. Always pass unicode characters instead.',
- RemovedInTTK2Warning, stacklevel=2,
- )
- return str.__new__(newtype, string, encoding)
-
class multistring(str):
- def __new__(newtype, string=u"", *args, **kwargs):
- encoding = kwargs.pop('encoding', 'utf-8')
+ def __new__(newtype, string=""):
if isinstance(string, list):
if not string:
raise ValueError("multistring must contain at least one string")
- newstring = _create_text_type(newtype, string[0], encoding)
+ newstring = str.__new__(newtype, string[0])
newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]
else:
- newstring = _create_text_type(newtype, string, encoding)
+ newstring = str.__new__(newtype, string)
newstring.strings = [newstring]
return newstring
| {"golden_diff": "diff --git a/translate/misc/deprecation.py b/translate/misc/deprecation.py\n--- a/translate/misc/deprecation.py\n+++ b/translate/misc/deprecation.py\n@@ -20,10 +20,6 @@\n from functools import wraps\n \n \n-class RemovedInTTK2Warning(DeprecationWarning):\n- pass\n-\n-\n def deprecated(message=\"\"):\n \"\"\"Decorator that marks functions and methods as deprecated.\n \ndiff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -21,40 +21,17 @@\n strings in the strings attribute\n \"\"\"\n \n-import warnings\n-\n-\n-from .deprecation import RemovedInTTK2Warning\n-\n-\n-def _create_text_type(newtype, string, encoding):\n- \"\"\"Helper to construct a text type out of characters or bytes. Required to\n- temporarily preserve backwards compatibility. Must be removed in TTK2.\n- \"\"\"\n- if string is None:\n- string = ''\n- if isinstance(string, str):\n- return str.__new__(newtype, string)\n-\n- warnings.warn(\n- 'Passing non-ASCII bytes as well as the `encoding` argument to '\n- '`multistring` is deprecated. Always pass unicode characters instead.',\n- RemovedInTTK2Warning, stacklevel=2,\n- )\n- return str.__new__(newtype, string, encoding)\n-\n \n class multistring(str):\n \n- def __new__(newtype, string=u\"\", *args, **kwargs):\n- encoding = kwargs.pop('encoding', 'utf-8')\n+ def __new__(newtype, string=\"\"):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n- newstring = _create_text_type(newtype, string[0], encoding)\n+ newstring = str.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n- newstring = _create_text_type(newtype, string, encoding)\n+ newstring = str.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n", "issue": "RemovedInTTK2Warning seems strange\nThere is ``RemovedInTTK2Warning`` which apparently was meant to flag feature which will be removed in translate-toolkit 2. However it is already out and that did not happen :-).\r\n\r\nEither RemovedInTTK2Warning should be renamed as translate-toolkit 2 has already been released, or the deprecation should be applied.\r\n\r\nHowever it seems that quite a lot of the code seems to rely on that behavior.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport warnings\n\n\nfrom .deprecation import RemovedInTTK2Warning\n\n\ndef _create_text_type(newtype, string, encoding):\n \"\"\"Helper to construct a text type out of characters or bytes. Required to\n temporarily preserve backwards compatibility. Must be removed in TTK2.\n \"\"\"\n if string is None:\n string = ''\n if isinstance(string, str):\n return str.__new__(newtype, string)\n\n warnings.warn(\n 'Passing non-ASCII bytes as well as the `encoding` argument to '\n '`multistring` is deprecated. Always pass unicode characters instead.',\n RemovedInTTK2Warning, stacklevel=2,\n )\n return str.__new__(newtype, string, encoding)\n\n\nclass multistring(str):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n encoding = kwargs.pop('encoding', 'utf-8')\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = _create_text_type(newtype, string[0], encoding)\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = _create_text_type(newtype, string, encoding)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(str(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, str):\n return cmp_compat(str(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(str(self))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n return u\"multistring(%r)\" % (\n [str(item) for item in self.strings]\n )\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super().replace(old, new))\n else:\n newstr = multistring(super().replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2014 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# translate is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport warnings\nfrom functools import wraps\n\n\nclass RemovedInTTK2Warning(DeprecationWarning):\n pass\n\n\ndef deprecated(message=\"\"):\n \"\"\"Decorator that marks functions and methods as deprecated.\n\n A warning will be emitted when the function or method is used. If a custom\n message is provided, it will be shown after the default warning message.\n \"\"\"\n def inner_render(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n msg = message # Hack to avoid UnboundLocalError.\n if msg:\n msg = \"\\n\" + msg\n func_code = func.__code__\n warnings.warn_explicit(\n \"Call to deprecated function {0}.{1}\".format(func.__name__,\n msg),\n category=DeprecationWarning,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1\n )\n return func(*args, **kwargs)\n return new_func\n return inner_render\n", "path": "translate/misc/deprecation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\n\nclass multistring(str):\n\n def __new__(newtype, string=\"\"):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = str.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = str.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(str(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, str):\n return cmp_compat(str(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(str(self))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n return u\"multistring(%r)\" % (\n [str(item) for item in self.strings]\n )\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super().replace(old, new))\n else:\n newstr = multistring(super().replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2014 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# translate is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport warnings\nfrom functools import wraps\n\n\ndef deprecated(message=\"\"):\n \"\"\"Decorator that marks functions and methods as deprecated.\n\n A warning will be emitted when the function or method is used. If a custom\n message is provided, it will be shown after the default warning message.\n \"\"\"\n def inner_render(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n msg = message # Hack to avoid UnboundLocalError.\n if msg:\n msg = \"\\n\" + msg\n func_code = func.__code__\n warnings.warn_explicit(\n \"Call to deprecated function {0}.{1}\".format(func.__name__,\n msg),\n category=DeprecationWarning,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1\n )\n return func(*args, **kwargs)\n return new_func\n return inner_render\n", "path": "translate/misc/deprecation.py"}]} | 1,956 | 512 |
gh_patches_debug_64393 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider longhorn_steakhouse is broken
During the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/longhorn_steakhouse.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class LongHornSteakhouseSpider(scrapy.Spider):
12 name = "longhorn_steakhouse"
13 item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': "Q3259007"}
14 allowed_domains = []
15 start_urls = [
16 'https://www.longhornsteakhouse.com/locations-sitemap.xml',
17 ]
18 custom_settings = {
19 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
20 }
21 download_delay = 5
22
23 def parse_hours(self, hours):
24 opening_hours = OpeningHours()
25
26 for hour in hours:
27 day, open_close = hour.split(' ')
28 open_time, close_time = open_close.split('-')
29 opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
30 return opening_hours.as_opening_hours()
31
32 def parse(self, response):
33 response.selector.remove_namespaces()
34 urls = response.xpath('//url/loc/text()').extract()
35 for url in urls:
36 yield scrapy.Request(url=url, callback=self.parse_store)
37
38 def parse_store(self, response):
39 store_data = response.xpath('//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()').extract_first()
40 if store_data:
41 data = json.loads(store_data)
42 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
43
44 # Handle store pages that are missing the application/ld+json data
45 addr, city_state_zip, phone = response.xpath('//p[@id="info-link-webhead"]/text()').extract()
46 city, state, postcode = re.search(r'(.*?),\s([A-Z]{2})\s([\d-]+)$', city_state_zip).groups()
47
48 properties = {
49 'name': data.get("name") or response.xpath('//h1[@class="style_h1"]/text()').extract_first().strip(),
50 'ref': data["branchCode"] or ref,
51 'addr_full': data["address"]["streetAddress"].strip() or addr.strip(),
52 'city': data["address"]["addressLocality"] or city,
53 'state': data["address"]["addressRegion"] or state,
54 'postcode': data["address"]["postalCode"] or postcode,
55 'country': data["address"]["addressCountry"],
56 'phone': data.get("telephone") or phone.strip(),
57 'website': data.get("url") or response.url,
58 'lat': float(data["geo"]["latitude"]),
59 'lon': float(data["geo"]["longitude"]),
60 }
61
62 hours = data.get("openingHours")
63 if hours:
64 store_hours = self.parse_hours(hours)
65 properties["opening_hours"] = store_hours
66
67 yield GeojsonPointItem(**properties)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py
--- a/locations/spiders/longhorn_steakhouse.py
+++ b/locations/spiders/longhorn_steakhouse.py
@@ -18,7 +18,7 @@
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
- download_delay = 5
+ download_delay = 1
def parse_hours(self, hours):
opening_hours = OpeningHours()
| {"golden_diff": "diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py\n--- a/locations/spiders/longhorn_steakhouse.py\n+++ b/locations/spiders/longhorn_steakhouse.py\n@@ -18,7 +18,7 @@\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n- download_delay = 5\n+ download_delay = 1\n \n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n", "issue": "Spider longhorn_steakhouse is broken\nDuring the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 5\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 1\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}]} | 1,280 | 167 |
gh_patches_debug_60375 | rasdani/github-patches | git_diff | UTNkar__moore-794 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Translations for footer_en missing in production
I noticed that in the settings the footer option is called footer_en. Seems like a translation has gone missing

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/branding/models.py`
Content:
```
1 from django.db import models
2 from wagtail.contrib.settings.models import BaseSetting, register_setting
3
4 from django.utils.translation import gettext_lazy as _
5 from wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \
6 MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList
7 from wagtail.core import blocks
8 from wagtail.core.fields import StreamField
9 from wagtail.images.edit_handlers import ImageChooserPanel
10 from utils.translation import TranslatedField
11
12
13 @register_setting(icon='fa-window-minimize')
14 class FooterSettings(BaseSetting):
15 class Meta:
16 verbose_name = _('footer_en') # quickfix
17
18 footer_en = StreamField(
19 [('column', blocks.StructBlock([
20 ('size', blocks.IntegerBlock(min_value=1, max_value=12)),
21 ('content', blocks.RichTextBlock()),
22 ]))],
23 blank=True,
24 )
25
26 footer_sv = StreamField(
27 [('column', blocks.StructBlock([
28 ('size', blocks.IntegerBlock(min_value=1, max_value=12)),
29 ('content', blocks.RichTextBlock()),
30 ]))],
31 blank=True,
32 )
33
34 footer = TranslatedField('footer_en', 'footer_sv')
35
36 panels_sv = [
37 StreamFieldPanel('footer_sv')
38 ]
39
40 panels_en = [
41 StreamFieldPanel('footer_en')
42 ]
43
44 edit_handler = TabbedInterface([
45 ObjectList(panels_en, heading=_("English")),
46 ObjectList(panels_sv, heading=_("Swedish"))
47 ])
48
49
50 @register_setting(icon='openquote')
51 class SocialMediaSettings(BaseSetting):
52 class Meta:
53 verbose_name = _('social media accounts')
54
55 facebook = models.URLField(
56 help_text=_('Your Facebook page URL'),
57 blank=True,
58 )
59 instagram = models.CharField(
60 max_length=255,
61 help_text=_('Your Instagram username, without the @'),
62 blank=True,
63 )
64 twitter = models.CharField(
65 max_length=255,
66 help_text=_('Your Twitter username, without the @'),
67 blank=True,
68 )
69
70
71 class Logo(models.Model):
72 class Meta:
73 verbose_name = _('logo')
74 verbose_name_plural = _('logos')
75
76 def __str__(self):
77 logotext = str(_('logo'))
78 return logotext.capitalize()
79
80 CATEGORY_CHOICES = (
81 ('committee', _('Committee')),
82 ('section', _('Section')),
83 )
84
85 category = models.CharField(
86 max_length=20,
87 choices=CATEGORY_CHOICES,
88 verbose_name=_('category'),
89 blank=False,
90 null=False,
91 )
92
93 link = models.URLField(
94 verbose_name=_('links to'),
95 null=False,
96 blank=False,
97 )
98
99 logo = models.ForeignKey(
100 'wagtailimages.Image',
101 verbose_name=_('logo'),
102 null=True,
103 blank=True,
104 on_delete=models.SET_NULL,
105 related_name='+'
106 )
107
108 logo_white = models.ForeignKey(
109 'wagtailimages.Image',
110 verbose_name=_('white logo'),
111 null=True,
112 blank=True,
113 on_delete=models.SET_NULL,
114 related_name='+'
115 )
116
117 logo_black = models.ForeignKey(
118 'wagtailimages.Image',
119 verbose_name=_('black logo'),
120 null=True,
121 blank=True,
122 on_delete=models.SET_NULL,
123 related_name='+'
124 )
125
126 belongs_to = models.ForeignKey(
127 'wagtailcore.Site',
128 verbose_name=_('belongs to'),
129 null=True,
130 blank=True,
131 on_delete=models.SET_NULL,
132 )
133
134 # ------ Administrator settings ------
135 panels = [MultiFieldPanel([
136 FieldRowPanel([
137 FieldPanel('category'),
138 FieldPanel('link'),
139 ]),
140 ImageChooserPanel('logo'),
141 ImageChooserPanel('logo_white'),
142 ImageChooserPanel('logo_black'),
143 FieldPanel('belongs_to'),
144 ])]
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/branding/models.py b/src/branding/models.py
--- a/src/branding/models.py
+++ b/src/branding/models.py
@@ -13,7 +13,7 @@
@register_setting(icon='fa-window-minimize')
class FooterSettings(BaseSetting):
class Meta:
- verbose_name = _('footer_en') # quickfix
+ verbose_name = _('footer') # quickfix
footer_en = StreamField(
[('column', blocks.StructBlock([
| {"golden_diff": "diff --git a/src/branding/models.py b/src/branding/models.py\n--- a/src/branding/models.py\n+++ b/src/branding/models.py\n@@ -13,7 +13,7 @@\n @register_setting(icon='fa-window-minimize')\n class FooterSettings(BaseSetting):\n class Meta:\n- verbose_name = _('footer_en') # quickfix\n+ verbose_name = _('footer') # quickfix\n \n footer_en = StreamField(\n [('column', blocks.StructBlock([\n", "issue": "Translations for footer_en missing in production\nI noticed that in the settings the footer option is called footer_en. Seems like a translation has gone missing\r\n\r\n\n", "before_files": [{"content": "from django.db import models\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\n\nfrom django.utils.translation import gettext_lazy as _\nfrom wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \\\n MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList\nfrom wagtail.core import blocks\nfrom wagtail.core.fields import StreamField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom utils.translation import TranslatedField\n\n\n@register_setting(icon='fa-window-minimize')\nclass FooterSettings(BaseSetting):\n class Meta:\n verbose_name = _('footer_en') # quickfix\n\n footer_en = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer_sv = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer = TranslatedField('footer_en', 'footer_sv')\n\n panels_sv = [\n StreamFieldPanel('footer_sv')\n ]\n\n panels_en = [\n StreamFieldPanel('footer_en')\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(panels_en, heading=_(\"English\")),\n ObjectList(panels_sv, heading=_(\"Swedish\"))\n ])\n\n\n@register_setting(icon='openquote')\nclass SocialMediaSettings(BaseSetting):\n class Meta:\n verbose_name = _('social media accounts')\n\n facebook = models.URLField(\n help_text=_('Your Facebook page URL'),\n blank=True,\n )\n instagram = models.CharField(\n max_length=255,\n help_text=_('Your Instagram username, without the @'),\n blank=True,\n )\n twitter = models.CharField(\n max_length=255,\n help_text=_('Your Twitter username, without the @'),\n blank=True,\n )\n\n\nclass Logo(models.Model):\n class Meta:\n verbose_name = _('logo')\n verbose_name_plural = _('logos')\n\n def __str__(self):\n logotext = str(_('logo'))\n return logotext.capitalize()\n\n CATEGORY_CHOICES = (\n ('committee', _('Committee')),\n ('section', _('Section')),\n )\n\n category = models.CharField(\n max_length=20,\n choices=CATEGORY_CHOICES,\n verbose_name=_('category'),\n blank=False,\n null=False,\n )\n\n link = models.URLField(\n verbose_name=_('links to'),\n null=False,\n blank=False,\n )\n\n logo = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_white = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('white logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_black = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('black logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n belongs_to = models.ForeignKey(\n 'wagtailcore.Site',\n verbose_name=_('belongs to'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n # ------ Administrator settings ------\n panels = [MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('category'),\n FieldPanel('link'),\n ]),\n ImageChooserPanel('logo'),\n ImageChooserPanel('logo_white'),\n ImageChooserPanel('logo_black'),\n FieldPanel('belongs_to'),\n ])]\n", "path": "src/branding/models.py"}], "after_files": [{"content": "from django.db import models\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\n\nfrom django.utils.translation import gettext_lazy as _\nfrom wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \\\n MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList\nfrom wagtail.core import blocks\nfrom wagtail.core.fields import StreamField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom utils.translation import TranslatedField\n\n\n@register_setting(icon='fa-window-minimize')\nclass FooterSettings(BaseSetting):\n class Meta:\n verbose_name = _('footer') # quickfix\n\n footer_en = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer_sv = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer = TranslatedField('footer_en', 'footer_sv')\n\n panels_sv = [\n StreamFieldPanel('footer_sv')\n ]\n\n panels_en = [\n StreamFieldPanel('footer_en')\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(panels_en, heading=_(\"English\")),\n ObjectList(panels_sv, heading=_(\"Swedish\"))\n ])\n\n\n@register_setting(icon='openquote')\nclass SocialMediaSettings(BaseSetting):\n class Meta:\n verbose_name = _('social media accounts')\n\n facebook = models.URLField(\n help_text=_('Your Facebook page URL'),\n blank=True,\n )\n instagram = models.CharField(\n max_length=255,\n help_text=_('Your Instagram username, without the @'),\n blank=True,\n )\n twitter = models.CharField(\n max_length=255,\n help_text=_('Your Twitter username, without the @'),\n blank=True,\n )\n\n\nclass Logo(models.Model):\n class Meta:\n verbose_name = _('logo')\n verbose_name_plural = _('logos')\n\n def __str__(self):\n logotext = str(_('logo'))\n return logotext.capitalize()\n\n CATEGORY_CHOICES = (\n ('committee', _('Committee')),\n ('section', _('Section')),\n )\n\n category = models.CharField(\n max_length=20,\n choices=CATEGORY_CHOICES,\n verbose_name=_('category'),\n blank=False,\n null=False,\n )\n\n link = models.URLField(\n verbose_name=_('links to'),\n null=False,\n blank=False,\n )\n\n logo = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_white = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('white logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_black = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('black logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n belongs_to = models.ForeignKey(\n 'wagtailcore.Site',\n verbose_name=_('belongs to'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n # ------ Administrator settings ------\n panels = [MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('category'),\n FieldPanel('link'),\n ]),\n ImageChooserPanel('logo'),\n ImageChooserPanel('logo_white'),\n ImageChooserPanel('logo_black'),\n FieldPanel('belongs_to'),\n ])]\n", "path": "src/branding/models.py"}]} | 1,497 | 110 |
gh_patches_debug_86 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2754 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transitive import of mitmproxy.version causes warning
Since #1837, we import `.script`, will imports `.flow`, which imports `.version`.
This causes the following warning in pytest:
```
test/mitmproxy/test_version.py::test_version
/Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125:
RuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package
'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in
unpredictable behaviour
warn(RuntimeWarning(msg))
-- Docs: http://doc.pytest.org/en/latest/warnings.html
```
[Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap)
> This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: “Never add a package directory, or any directory inside a package, directly to the Python path”.
> The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path).
Maybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/version.py`
Content:
```
1 import os
2 import subprocess
3
4 # The actual version string. For precompiled binaries, this will be changed to include the build
5 # tag, e.g. "3.0.0.dev0042-0xcafeabc"
6 VERSION = "3.0.0"
7 PATHOD = "pathod " + VERSION
8 MITMPROXY = "mitmproxy " + VERSION
9
10 # Serialization format version. This is displayed nowhere, it just needs to be incremented by one
11 # for each change in the file format.
12 FLOW_FORMAT_VERSION = 5
13
14
15 def get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:
16 """
17 Return a detailed version string, sourced either from a hardcoded VERSION constant
18 or obtained dynamically using git.
19
20 Args:
21 dev: If True, non-tagged releases will include a ".devXXXX" suffix, where XXXX is the number
22 of commits since the last tagged release.
23 build: If True, non-tagged releases will include a "-0xXXXXXXX" suffix, where XXXXXXX are
24 the first seven digits of the commit hash.
25 refresh: If True, always try to use git instead of a potentially hardcoded constant.
26 """
27
28 mitmproxy_version = VERSION
29
30 if "dev" in VERSION and not refresh:
31 pass # There is a hardcoded build tag, so we just use what's there.
32 elif dev or build:
33 here = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
34 try:
35 git_describe = subprocess.check_output(
36 ['git', 'describe', '--tags', '--long'],
37 stderr=subprocess.STDOUT,
38 cwd=here,
39 )
40 last_tag, tag_dist, commit = git_describe.decode().strip().rsplit("-", 2)
41 commit = commit.lstrip("g")[:7]
42 tag_dist = int(tag_dist)
43 except Exception:
44 pass
45 else:
46 # Remove current suffix
47 mitmproxy_version = mitmproxy_version.split(".dev")[0]
48
49 # Add suffix for non-tagged releases
50 if tag_dist > 0:
51 mitmproxy_version += ".dev{tag_dist}".format(tag_dist=tag_dist)
52 # The wheel build tag (we use the commit) must start with a digit, so we include "0x"
53 mitmproxy_version += "-0x{commit}".format(commit=commit)
54
55 if not dev:
56 mitmproxy_version = mitmproxy_version.split(".dev")[0]
57 elif not build:
58 mitmproxy_version = mitmproxy_version.split("-0x")[0]
59
60 return mitmproxy_version
61
62
63 if __name__ == "__main__":
64 print(VERSION)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/version.py b/mitmproxy/version.py
--- a/mitmproxy/version.py
+++ b/mitmproxy/version.py
@@ -60,5 +60,5 @@
return mitmproxy_version
-if __name__ == "__main__":
+if __name__ == "__main__": # pragma: no cover
print(VERSION)
| {"golden_diff": "diff --git a/mitmproxy/version.py b/mitmproxy/version.py\n--- a/mitmproxy/version.py\n+++ b/mitmproxy/version.py\n@@ -60,5 +60,5 @@\n return mitmproxy_version\n \n \n-if __name__ == \"__main__\":\n+if __name__ == \"__main__\": # pragma: no cover\n print(VERSION)\n", "issue": "Transitive import of mitmproxy.version causes warning\nSince #1837, we import `.script`, will imports `.flow`, which imports `.version`.\r\nThis causes the following warning in pytest:\r\n\r\n```\r\ntest/mitmproxy/test_version.py::test_version\r\n /Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125: \r\nRuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package \r\n'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in \r\nunpredictable behaviour\r\n warn(RuntimeWarning(msg))\r\n\r\n-- Docs: http://doc.pytest.org/en/latest/warnings.html\r\n```\r\n\r\n[Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap)\r\n> This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: \u201cNever add a package directory, or any directory inside a package, directly to the Python path\u201d.\r\n\r\n> The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path).\r\n\r\nMaybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better?\n", "before_files": [{"content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\":\n print(VERSION)\n", "path": "mitmproxy/version.py"}], "after_files": [{"content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(VERSION)\n", "path": "mitmproxy/version.py"}]} | 1,301 | 82 |
gh_patches_debug_31447 | rasdani/github-patches | git_diff | sunpy__sunpy-1551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the need to have astropy installed before installing SunPy
Currently you can not have a clean python environment and do a `pip install sunpy` you have to have astropy + numpy installed first.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/io/setup_package.py`
Content:
```
1 from __future__ import absolute_import
2
3 import os
4 import platform
5
6 from distutils.core import Extension
7 from glob import glob
8
9 from astropy_helpers import setup_helpers
10 from astropy.extern import six
11
12
13 def get_extensions():
14
15 if platform.system() == 'Windows' or six.PY3:
16 return list()
17 else:
18 # 'numpy' will be replaced with the proper path to the numpy includes
19 cfg = setup_helpers.DistutilsExtensionArgs()
20 cfg['include_dirs'].append('numpy')
21 cfg['sources'].extend(glob(os.path.join(os.path.dirname(__file__), 'src', 'ana', '*.c')))
22 cfg['extra_compile_args'].extend(['-std=c99', '-O3'])
23 # Squash some warnings
24 cfg['extra_compile_args'].extend(['-Wno-unused-but-set-variable',
25 '-Wno-unused-variable',
26 '-Wno-unused-result'])
27
28 e = Extension('sunpy.io._pyana', **cfg)
29 return [e]
30
31 def requires_2to3():
32 return False
33
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # This file is based havily on the astropy version here:
3 # https://github.com/astropy/package-template/blob/master/setup.py
4 # Which is licensed under the astropy license.
5
6 import glob
7 import os
8 import sys
9
10 import ah_bootstrap
11 from setuptools import setup
12
13 # A dirty hack to get around some early import/configurations ambiguities
14 if sys.version_info[0] >= 3:
15 import builtins
16 else:
17 import __builtin__ as builtins
18 builtins._ASTROPY_SETUP_ = True
19
20 # -- Read the Docs Setup -----------------------------------------------------
21
22 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
23
24 if on_rtd:
25 os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'
26 os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'
27
28 from astropy_helpers.setup_helpers import (
29 register_commands, adjust_compiler, get_debug_option, get_package_info)
30 from astropy_helpers.git_helpers import get_git_devstr
31 from astropy_helpers.version_helpers import generate_version_py
32 from sunpy.tests.setup_command import SunPyTest
33
34 # Get some values from the setup.cfg
35 from distutils import config
36 conf = config.ConfigParser()
37 conf.read(['setup.cfg'])
38 metadata = dict(conf.items('metadata'))
39
40 PACKAGENAME = metadata.get('package_name', 'packagename')
41 DESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')
42 AUTHOR = metadata.get('author', 'The SunPy Community')
43 AUTHOR_EMAIL = metadata.get('author_email', '[email protected]')
44 LICENSE = metadata.get('license', 'BSD 2-Clause')
45 URL = metadata.get('url', 'http://sunpy.org')
46
47 LONG_DESCRIPTION = "SunPy is a Python library for solar physics data analysis."
48
49 # Store the package name in a built-in variable so it's easy
50 # to get from other parts of the setup infrastructure
51 builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
52
53 # VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
54 VERSION = '0.7.dev'
55
56 # Indicates if this version is a release version
57 RELEASE = 'dev' not in VERSION
58
59 if not RELEASE:
60 VERSION += get_git_devstr(False)
61
62 # Populate the dict of setup command overrides; this should be done before
63 # invoking any other functionality from distutils since it can potentially
64 # modify distutils' behavior.
65 cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
66
67 # Overwrite the Astropy Testing framework
68 cmdclassd['test'] = type('SunPyTest', (SunPyTest,),
69 {'package_name': 'sunpy'})
70
71 # Adjust the compiler in case the default on this platform is to use a
72 # broken one.
73 adjust_compiler(PACKAGENAME)
74
75 # Freeze build information in version.py
76 generate_version_py(PACKAGENAME, VERSION, RELEASE,
77 get_debug_option(PACKAGENAME))
78
79 # Treat everything in scripts except README.rst as a script to be installed
80 scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
81 if os.path.basename(fname) != 'README.rst']
82
83
84 # Get configuration information from all of the various subpackages.
85 # See the docstring for setup_helpers.update_package_files for more
86 # details.
87 package_info = get_package_info()
88
89 # Add the project-global data
90 package_info['package_data'].setdefault(PACKAGENAME, [])
91
92 # Include all .c files, recursively, including those generated by
93 # Cython, since we can not do this in MANIFEST.in with a "dynamic"
94 # directory name.
95 c_files = []
96 for root, dirs, files in os.walk(PACKAGENAME):
97 for filename in files:
98 if filename.endswith('.c'):
99 c_files.append(
100 os.path.join(
101 os.path.relpath(root, PACKAGENAME), filename))
102 package_info['package_data'][PACKAGENAME].extend(c_files)
103
104 extras_require = {'database': ["sqlalchemy"],
105 'image': ["scikit-image"],
106 'jpeg2000': ["glymur"],
107 'net': ["suds", "beautifulsoup4", "requests"]}
108 extras_require['all'] = extras_require['database'] + extras_require['image'] + \
109 extras_require['net'] + ["wcsaxes>=0.6"]
110
111 setup(name=PACKAGENAME,
112 version=VERSION,
113 description=DESCRIPTION,
114 scripts=scripts,
115 setup_requires=['numpy>1.7.1'],
116 install_requires=['numpy>1.7.1',
117 'astropy>=1.0.0',
118 'scipy',
119 'pandas>=0.12.0',
120 'matplotlib>=1.1'],
121 extras_require=extras_require,
122 provides=[PACKAGENAME],
123 author=AUTHOR,
124 author_email=AUTHOR_EMAIL,
125 license=LICENSE,
126 url=URL,
127 long_description=LONG_DESCRIPTION,
128 cmdclass=cmdclassd,
129 zip_safe=False,
130 use_2to3=False,
131 include_package_data=True,
132 **package_info
133 )
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,6 @@
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
-from sunpy.tests.setup_command import SunPyTest
# Get some values from the setup.cfg
from distutils import config
@@ -64,9 +63,14 @@
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
-# Overwrite the Astropy Testing framework
-cmdclassd['test'] = type('SunPyTest', (SunPyTest,),
- {'package_name': 'sunpy'})
+try:
+ from sunpy.tests.setup_command import SunPyTest
+ # Overwrite the Astropy Testing framework
+ cmdclassd['test'] = type('SunPyTest', (SunPyTest,),
+ {'package_name': 'sunpy'})
+except Exception:
+ # Catch everything, if it doesn't work, we still want SunPy to install.
+ pass
# Adjust the compiler in case the default on this platform is to use a
# broken one.
diff --git a/sunpy/io/setup_package.py b/sunpy/io/setup_package.py
--- a/sunpy/io/setup_package.py
+++ b/sunpy/io/setup_package.py
@@ -1,18 +1,18 @@
from __future__ import absolute_import
import os
+import sys
import platform
from distutils.core import Extension
from glob import glob
from astropy_helpers import setup_helpers
-from astropy.extern import six
def get_extensions():
- if platform.system() == 'Windows' or six.PY3:
+ if platform.system() == 'Windows' or sys.version_info.major == 3:
return list()
else:
# 'numpy' will be replaced with the proper path to the numpy includes
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,6 @@\n register_commands, adjust_compiler, get_debug_option, get_package_info)\n from astropy_helpers.git_helpers import get_git_devstr\n from astropy_helpers.version_helpers import generate_version_py\n-from sunpy.tests.setup_command import SunPyTest\n \n # Get some values from the setup.cfg\n from distutils import config\n@@ -64,9 +63,14 @@\n # modify distutils' behavior.\n cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n \n-# Overwrite the Astropy Testing framework\n-cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n- {'package_name': 'sunpy'})\n+try:\n+ from sunpy.tests.setup_command import SunPyTest\n+ # Overwrite the Astropy Testing framework\n+ cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n+ {'package_name': 'sunpy'})\n+except Exception:\n+ # Catch everything, if it doesn't work, we still want SunPy to install.\n+ pass\n \n # Adjust the compiler in case the default on this platform is to use a\n # broken one.\ndiff --git a/sunpy/io/setup_package.py b/sunpy/io/setup_package.py\n--- a/sunpy/io/setup_package.py\n+++ b/sunpy/io/setup_package.py\n@@ -1,18 +1,18 @@\n from __future__ import absolute_import\n \n import os\n+import sys\n import platform\n \n from distutils.core import Extension\n from glob import glob\n \n from astropy_helpers import setup_helpers\n-from astropy.extern import six\n \n \n def get_extensions():\n \n- if platform.system() == 'Windows' or six.PY3:\n+ if platform.system() == 'Windows' or sys.version_info.major == 3:\n return list()\n else:\n # 'numpy' will be replaced with the proper path to the numpy includes\n", "issue": "Remove the need to have astropy installed before installing SunPy\nCurrently you can not have a clean python environment and do a `pip install sunpy` you have to have astropy + numpy installed first.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom distutils.core import Extension\nfrom glob import glob\n\nfrom astropy_helpers import setup_helpers\nfrom astropy.extern import six\n\n\ndef get_extensions():\n\n if platform.system() == 'Windows' or six.PY3:\n return list()\n else:\n # 'numpy' will be replaced with the proper path to the numpy includes\n cfg = setup_helpers.DistutilsExtensionArgs()\n cfg['include_dirs'].append('numpy')\n cfg['sources'].extend(glob(os.path.join(os.path.dirname(__file__), 'src', 'ana', '*.c')))\n cfg['extra_compile_args'].extend(['-std=c99', '-O3'])\n # Squash some warnings\n cfg['extra_compile_args'].extend(['-Wno-unused-but-set-variable',\n '-Wno-unused-variable',\n '-Wno-unused-result'])\n\n e = Extension('sunpy.io._pyana', **cfg)\n return [e]\n\ndef requires_2to3():\n return False\n", "path": "sunpy/io/setup_package.py"}, {"content": "#!/usr/bin/env python\n# This file is based havily on the astropy version here:\n# https://github.com/astropy/package-template/blob/master/setup.py\n# Which is licensed under the astropy license.\n\nimport glob\nimport os\nimport sys\n\nimport ah_bootstrap\nfrom setuptools import setup\n\n# A dirty hack to get around some early import/configurations ambiguities\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\nbuiltins._ASTROPY_SETUP_ = True\n\n# -- Read the Docs Setup -----------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n\nfrom astropy_helpers.setup_helpers import (\n register_commands, adjust_compiler, get_debug_option, get_package_info)\nfrom astropy_helpers.git_helpers import get_git_devstr\nfrom astropy_helpers.version_helpers import generate_version_py\nfrom sunpy.tests.setup_command import SunPyTest\n\n# Get some values from the setup.cfg\nfrom distutils import config\nconf = config.ConfigParser()\nconf.read(['setup.cfg'])\nmetadata = dict(conf.items('metadata'))\n\nPACKAGENAME = metadata.get('package_name', 'packagename')\nDESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')\nAUTHOR = metadata.get('author', 'The SunPy Community')\nAUTHOR_EMAIL = metadata.get('author_email', '[email protected]')\nLICENSE = metadata.get('license', 'BSD 2-Clause')\nURL = metadata.get('url', 'http://sunpy.org')\n\nLONG_DESCRIPTION = \"SunPy is a Python library for solar physics data analysis.\"\n\n# Store the package name in a built-in variable so it's easy\n# to get from other parts of the setup infrastructure\nbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME\n\n# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)\nVERSION = '0.7.dev'\n\n# Indicates if this version is a release version\nRELEASE = 'dev' not in VERSION\n\nif not RELEASE:\n VERSION += get_git_devstr(False)\n\n# Populate the dict of setup command overrides; this should be done before\n# invoking any other functionality from distutils since it can potentially\n# modify distutils' behavior.\ncmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n\n# Overwrite the Astropy Testing framework\ncmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n {'package_name': 'sunpy'})\n\n# Adjust the compiler in case the default on this platform is to use a\n# broken one.\nadjust_compiler(PACKAGENAME)\n\n# Freeze build information in version.py\ngenerate_version_py(PACKAGENAME, VERSION, RELEASE,\n get_debug_option(PACKAGENAME))\n\n# Treat everything in scripts except README.rst as a script to be installed\nscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))\n if os.path.basename(fname) != 'README.rst']\n\n\n# Get configuration information from all of the various subpackages.\n# See the docstring for setup_helpers.update_package_files for more\n# details.\npackage_info = get_package_info()\n\n# Add the project-global data\npackage_info['package_data'].setdefault(PACKAGENAME, [])\n\n# Include all .c files, recursively, including those generated by\n# Cython, since we can not do this in MANIFEST.in with a \"dynamic\"\n# directory name.\nc_files = []\nfor root, dirs, files in os.walk(PACKAGENAME):\n for filename in files:\n if filename.endswith('.c'):\n c_files.append(\n os.path.join(\n os.path.relpath(root, PACKAGENAME), filename))\npackage_info['package_data'][PACKAGENAME].extend(c_files)\n\nextras_require = {'database': [\"sqlalchemy\"],\n 'image': [\"scikit-image\"],\n 'jpeg2000': [\"glymur\"],\n 'net': [\"suds\", \"beautifulsoup4\", \"requests\"]}\nextras_require['all'] = extras_require['database'] + extras_require['image'] + \\\n extras_require['net'] + [\"wcsaxes>=0.6\"]\n\nsetup(name=PACKAGENAME,\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n setup_requires=['numpy>1.7.1'],\n install_requires=['numpy>1.7.1',\n 'astropy>=1.0.0',\n 'scipy',\n 'pandas>=0.12.0',\n 'matplotlib>=1.1'],\n extras_require=extras_require,\n provides=[PACKAGENAME],\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n long_description=LONG_DESCRIPTION,\n cmdclass=cmdclassd,\n zip_safe=False,\n use_2to3=False,\n include_package_data=True,\n **package_info\n )\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport sys\nimport platform\n\nfrom distutils.core import Extension\nfrom glob import glob\n\nfrom astropy_helpers import setup_helpers\n\n\ndef get_extensions():\n\n if platform.system() == 'Windows' or sys.version_info.major == 3:\n return list()\n else:\n # 'numpy' will be replaced with the proper path to the numpy includes\n cfg = setup_helpers.DistutilsExtensionArgs()\n cfg['include_dirs'].append('numpy')\n cfg['sources'].extend(glob(os.path.join(os.path.dirname(__file__), 'src', 'ana', '*.c')))\n cfg['extra_compile_args'].extend(['-std=c99', '-O3'])\n # Squash some warnings\n cfg['extra_compile_args'].extend(['-Wno-unused-but-set-variable',\n '-Wno-unused-variable',\n '-Wno-unused-result'])\n\n e = Extension('sunpy.io._pyana', **cfg)\n return [e]\n\ndef requires_2to3():\n return False\n", "path": "sunpy/io/setup_package.py"}, {"content": "#!/usr/bin/env python\n# This file is based havily on the astropy version here:\n# https://github.com/astropy/package-template/blob/master/setup.py\n# Which is licensed under the astropy license.\n\nimport glob\nimport os\nimport sys\n\nimport ah_bootstrap\nfrom setuptools import setup\n\n# A dirty hack to get around some early import/configurations ambiguities\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\nbuiltins._ASTROPY_SETUP_ = True\n\n# -- Read the Docs Setup -----------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n os.environ['HOME'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n os.environ['SUNPY_CONFIGDIR'] = '/home/docs/checkouts/readthedocs.org/user_builds/sunpy/'\n\nfrom astropy_helpers.setup_helpers import (\n register_commands, adjust_compiler, get_debug_option, get_package_info)\nfrom astropy_helpers.git_helpers import get_git_devstr\nfrom astropy_helpers.version_helpers import generate_version_py\n\n# Get some values from the setup.cfg\nfrom distutils import config\nconf = config.ConfigParser()\nconf.read(['setup.cfg'])\nmetadata = dict(conf.items('metadata'))\n\nPACKAGENAME = metadata.get('package_name', 'packagename')\nDESCRIPTION = metadata.get('description', 'SunPy: Python for Solar Physics')\nAUTHOR = metadata.get('author', 'The SunPy Community')\nAUTHOR_EMAIL = metadata.get('author_email', '[email protected]')\nLICENSE = metadata.get('license', 'BSD 2-Clause')\nURL = metadata.get('url', 'http://sunpy.org')\n\nLONG_DESCRIPTION = \"SunPy is a Python library for solar physics data analysis.\"\n\n# Store the package name in a built-in variable so it's easy\n# to get from other parts of the setup infrastructure\nbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME\n\n# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)\nVERSION = '0.7.dev'\n\n# Indicates if this version is a release version\nRELEASE = 'dev' not in VERSION\n\nif not RELEASE:\n VERSION += get_git_devstr(False)\n\n# Populate the dict of setup command overrides; this should be done before\n# invoking any other functionality from distutils since it can potentially\n# modify distutils' behavior.\ncmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n\ntry:\n from sunpy.tests.setup_command import SunPyTest\n # Overwrite the Astropy Testing framework\n cmdclassd['test'] = type('SunPyTest', (SunPyTest,),\n {'package_name': 'sunpy'})\nexcept Exception:\n # Catch everything, if it doesn't work, we still want SunPy to install.\n pass\n\n# Adjust the compiler in case the default on this platform is to use a\n# broken one.\nadjust_compiler(PACKAGENAME)\n\n# Freeze build information in version.py\ngenerate_version_py(PACKAGENAME, VERSION, RELEASE,\n get_debug_option(PACKAGENAME))\n\n# Treat everything in scripts except README.rst as a script to be installed\nscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))\n if os.path.basename(fname) != 'README.rst']\n\n\n# Get configuration information from all of the various subpackages.\n# See the docstring for setup_helpers.update_package_files for more\n# details.\npackage_info = get_package_info()\n\n# Add the project-global data\npackage_info['package_data'].setdefault(PACKAGENAME, [])\n\n# Include all .c files, recursively, including those generated by\n# Cython, since we can not do this in MANIFEST.in with a \"dynamic\"\n# directory name.\nc_files = []\nfor root, dirs, files in os.walk(PACKAGENAME):\n for filename in files:\n if filename.endswith('.c'):\n c_files.append(\n os.path.join(\n os.path.relpath(root, PACKAGENAME), filename))\npackage_info['package_data'][PACKAGENAME].extend(c_files)\n\nextras_require = {'database': [\"sqlalchemy\"],\n 'image': [\"scikit-image\"],\n 'jpeg2000': [\"glymur\"],\n 'net': [\"suds\", \"beautifulsoup4\", \"requests\"]}\nextras_require['all'] = extras_require['database'] + extras_require['image'] + \\\n extras_require['net'] + [\"wcsaxes>=0.6\"]\n\nsetup(name=PACKAGENAME,\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n setup_requires=['numpy>1.7.1'],\n install_requires=['numpy>1.7.1',\n 'astropy>=1.0.0',\n 'scipy',\n 'pandas>=0.12.0',\n 'matplotlib>=1.1'],\n extras_require=extras_require,\n provides=[PACKAGENAME],\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n long_description=LONG_DESCRIPTION,\n cmdclass=cmdclassd,\n zip_safe=False,\n use_2to3=False,\n include_package_data=True,\n **package_info\n )\n", "path": "setup.py"}]} | 2,017 | 438 |
gh_patches_debug_28668 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1888 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot save user profile with an empty zip code
## What kind of an issue is this?
- [x] Bug report
## What is the expected behaviour?
That you can save the form and receive guidance without having to input a ZIP code.
## What is the current behaviour?
If you don't have a ZIP code in the field the form doesn't validate and you get a runtime error, which doesn't tell the user why it didn't work.
## How do you reproduce this problem?
Go to my profile, empty the ZIP code field and save.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/profiles/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import re
3
4 from django import forms
5 from django.contrib import auth
6 from django.utils.translation import ugettext as _
7
8 from apps.authentication.models import OnlineUser, Position
9 from apps.profiles.models import Privacy
10
11
12 class ProfileForm(forms.ModelForm):
13 class Meta(object):
14 model = OnlineUser
15
16 fields = [
17 'nickname',
18 'website',
19 'phone_number',
20 'address',
21 'zip_code',
22 'allergies',
23 'compiled',
24 'bio',
25 'gender',
26 'github',
27 'linkedin'
28 ]
29 widgets = {
30 'allergies': forms.Textarea(attrs={'id': 'allergies'}),
31 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),
32 'bio': forms.Textarea(attrs={'id': 'bio'}),
33 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),
34 }
35
36 def clean(self):
37 super(ProfileForm, self).clean()
38
39 cleaned_data = self.cleaned_data
40
41 # ZIP code digits only
42 zip_code = cleaned_data['zip_code']
43 if len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
44 self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")])
45
46 return cleaned_data
47
48
49 class PrivacyForm(forms.ModelForm):
50 class Meta(object):
51 model = Privacy
52 exclude = ['user', 'expose_nickname']
53
54
55 class MailSettingsForm(forms.ModelForm):
56 class Meta(object):
57 model = OnlineUser
58 fields = ['infomail', ]
59
60
61 class PositionForm(forms.ModelForm):
62 class Meta(object):
63 model = Position
64 exclude = ['user']
65 widgets = {
66 'committee': forms.Select(attrs={'class': 'form-control'}),
67 'position': forms.Select(attrs={'class': 'form-control'}),
68 }
69
70 def clean(self):
71 super(PositionForm, self).clean()
72
73 range_compiler = re.compile(r'\d{4}-\d{4}')
74 year_range = self.cleaned_data['period']
75
76 # If it doesn't match the format YYYY-YYYY
77 if not range_compiler.match(year_range):
78 self._errors['period'] = self.error_class(
79 [_('Feil format. Dobbelsjekk at input er på formatet YYYY-YYYY.')]
80 )
81 return self.cleaned_data
82
83 years = year_range.split('-')
84
85 # If somewhat they fucked up input, we don't want None-shit after the split.
86 if not years[0] or not years[1]:
87 self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])
88 return self.cleaned_data
89
90 # If first year is larger than latter, or the diff is more than one, fail.
91 if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:
92 self._errors['period'] = self.error_class([_('Ikke gyldig års-intervall. Bare ett år er tillat.')])
93
94 return self.cleaned_data
95
96
97 class MembershipSettingsForm(forms.ModelForm):
98 def __init__(self, *args, **kwargs):
99 super(MembershipSettingsForm, self).__init__(*args, **kwargs)
100 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'
101
102 class Meta(object):
103 model = OnlineUser
104 fields = ['field_of_study', 'started_date']
105
106 widgets = {
107 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),
108 }
109
110
111 class InternalServicesForm(forms.Form):
112 ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Online passord"))
113 services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Ønsket service passord"))
114 current_user = None
115
116 def clean(self):
117 super(InternalServicesForm, self).clean()
118 if self.is_valid():
119 cleaned_data = self.cleaned_data
120
121 # User object relation here
122 user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])
123
124 if user is None or user.id != self.current_user.id:
125 self._errors['ow4_password'] = self.error_class([_(u"Passordet er ikke korrekt.")])
126
127 return cleaned_data
128
```
Path: `apps/authentication/templatetags/gravatar_url_resolver.py`
Content:
```
1 import hashlib
2 import urllib
3
4 from django import template
5 from django.conf import settings
6
7 register = template.Library()
8
9
10 @register.assignment_tag(takes_context=True)
11 def gravatar_url(context, user, size):
12 prefix = "https://" if context['request'].is_secure() else "http://"
13 default = "%s%s%s_%s.png" % (
14 prefix,
15 context['request'].META['HTTP_HOST'],
16 settings.DEFAULT_PROFILE_PICTURE_PREFIX,
17 user.gender
18 )
19
20 grav_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.email.encode()).hexdigest() + "?"
21 grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})
22
23 return grav_url
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py
--- a/apps/authentication/templatetags/gravatar_url_resolver.py
+++ b/apps/authentication/templatetags/gravatar_url_resolver.py
@@ -12,7 +12,7 @@
prefix = "https://" if context['request'].is_secure() else "http://"
default = "%s%s%s_%s.png" % (
prefix,
- context['request'].META['HTTP_HOST'],
+ context['request'].META.get('HTTP_HOST', 'localhost'),
settings.DEFAULT_PROFILE_PICTURE_PREFIX,
user.gender
)
diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py
--- a/apps/profiles/forms.py
+++ b/apps/profiles/forms.py
@@ -8,6 +8,8 @@
from apps.authentication.models import OnlineUser, Position
from apps.profiles.models import Privacy
+ZIP_CODE_VALIDATION_ERROR = "Postnummer må bestå av fire siffer."
+
class ProfileForm(forms.ModelForm):
class Meta(object):
@@ -33,17 +35,10 @@
'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),
}
- def clean(self):
- super(ProfileForm, self).clean()
-
- cleaned_data = self.cleaned_data
-
- # ZIP code digits only
- zip_code = cleaned_data['zip_code']
- if len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
- self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")])
-
- return cleaned_data
+ def clean_zip_code(self):
+ zip_code = self.cleaned_data['zip_code']
+ if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
+ self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)
class PrivacyForm(forms.ModelForm):
| {"golden_diff": "diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py\n--- a/apps/authentication/templatetags/gravatar_url_resolver.py\n+++ b/apps/authentication/templatetags/gravatar_url_resolver.py\n@@ -12,7 +12,7 @@\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n- context['request'].META['HTTP_HOST'],\n+ context['request'].META.get('HTTP_HOST', 'localhost'),\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\ndiff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -8,6 +8,8 @@\n from apps.authentication.models import OnlineUser, Position\n from apps.profiles.models import Privacy\n \n+ZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n+\n \n class ProfileForm(forms.ModelForm):\n class Meta(object):\n@@ -33,17 +35,10 @@\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n \n- def clean(self):\n- super(ProfileForm, self).clean()\n-\n- cleaned_data = self.cleaned_data\n-\n- # ZIP code digits only\n- zip_code = cleaned_data['zip_code']\n- if len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n- self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n-\n- return cleaned_data\n+ def clean_zip_code(self):\n+ zip_code = self.cleaned_data['zip_code']\n+ if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n+ self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n \n \n class PrivacyForm(forms.ModelForm):\n", "issue": "Cannot save user profile with an empty zip code\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n\r\n## What is the expected behaviour?\r\n\r\nThat you can save the form and receive guidance without having to input a ZIP code.\r\n\r\n## What is the current behaviour?\r\n\r\nIf you don't have a ZIP code in the field the form doesn't validate and you get a runtime error, which doesn't tell the user why it didn't work.\r\n\r\n## How do you reproduce this problem? \r\n\r\nGo to my profile, empty the ZIP code field and save.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}, {"content": "import hashlib\nimport urllib\n\nfrom django import template\nfrom django.conf import settings\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\n\n grav_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.email.encode()).hexdigest() + \"?\"\n grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n\n return grav_url\n", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\nZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}, {"content": "import hashlib\nimport urllib\n\nfrom django import template\nfrom django.conf import settings\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n context['request'].META.get('HTTP_HOST', 'localhost'),\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\n\n grav_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.email.encode()).hexdigest() + \"?\"\n grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n\n return grav_url\n", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}]} | 1,829 | 439 |
gh_patches_debug_25459 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-948 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
anytime_fitness.py null values
The scraper currently includes address2 whether it is null or not, resulting in ", None" being appended to many (most?) of the addr:full fields.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/anytime_fitness.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5
6 class AnytimeFitnessSpider(scrapy.Spider):
7 name = 'anytime_fitness'
8 allowed_domains = ['www.anytimefitness.com']
9
10 def start_requests(self):
11 url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'
12 yield scrapy.Request(url, callback=self.parse)
13
14 def parse(self, response):
15 gyms = json.loads(response.body_as_unicode())
16
17 for gym in gyms:
18 yield GeojsonPointItem(
19 lat = gym['latitude'],
20 lon = gym['longitude'],
21 addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),
22 city = gym['content']['city'],
23 phone = gym['content']['phone'],
24 state = gym['content']['state_abbr'],
25 postcode = gym['content']['zip'],
26 ref = gym['content']['url'],
27 country = gym['content']['country']
28 )
29
30
31
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py
--- a/locations/spiders/anytime_fitness.py
+++ b/locations/spiders/anytime_fitness.py
@@ -2,6 +2,8 @@
import scrapy
from locations.items import GeojsonPointItem
import json
+import html
+
class AnytimeFitnessSpider(scrapy.Spider):
name = 'anytime_fitness'
@@ -18,14 +20,13 @@
yield GeojsonPointItem(
lat = gym['latitude'],
lon = gym['longitude'],
- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),
+ addr_full = ", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])),
city = gym['content']['city'],
phone = gym['content']['phone'],
state = gym['content']['state_abbr'],
postcode = gym['content']['zip'],
ref = gym['content']['url'],
- country = gym['content']['country']
+ country = gym['content']['country'],
+ name = html.unescape(gym['content']['title']),
+ extras = {"number": gym['content']['number']}
)
-
-
-
| {"golden_diff": "diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py\n--- a/locations/spiders/anytime_fitness.py\n+++ b/locations/spiders/anytime_fitness.py\n@@ -2,6 +2,8 @@\n import scrapy\n from locations.items import GeojsonPointItem\n import json\n+import html\n+\n \n class AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n@@ -18,14 +20,13 @@\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n+ addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n- country = gym['content']['country']\n+ country = gym['content']['country'],\n+ name = html.unescape(gym['content']['title']),\n+ extras = {\"number\": gym['content']['number']}\n )\n- \n- \n-\n", "issue": "anytime_fitness.py null values\nThe scraper currently includes address2 whether it is null or not, resulting in \", None\" being appended to many (most?) of the addr:full fields.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country']\n )\n \n \n \n", "path": "locations/spiders/anytime_fitness.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\nimport html\n\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country'],\n name = html.unescape(gym['content']['title']),\n extras = {\"number\": gym['content']['number']}\n )\n", "path": "locations/spiders/anytime_fitness.py"}]} | 576 | 283 |
gh_patches_debug_9873 | rasdani/github-patches | git_diff | wright-group__WrightTools-992 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
collection.convert
Would like to support syntax `collection.convert(unit)`.
Would convert all contained data objects recursively.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/collection/_collection.py`
Content:
```
1 """Collection."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 from .. import data as wt_data
10 from .. import exceptions as wt_exceptions
11 from .._group import Group
12
13
14 # --- define --------------------------------------------------------------------------------------
15
16
17 __all__ = ["Collection"]
18
19
20 # --- classes -------------------------------------------------------------------------------------
21
22
23 class Collection(Group):
24 """Nestable Collection of Data objects."""
25
26 class_name = "Collection"
27
28 def __iter__(self):
29 self.__n = 0
30 return self
31
32 def __len__(self):
33 return len(self.item_names)
34
35 def __next__(self):
36 if self.__n < len(self):
37 out = self.item_names[self.__n]
38 self.__n += 1
39 else:
40 raise StopIteration
41 return out
42
43 def __repr__(self):
44 return "<WrightTools.Collection '{0}' {1} at {2}>".format(
45 self.natural_name, self.item_names, "::".join([self.filepath, self.name])
46 )
47
48 def __getitem__(self, key):
49 if isinstance(key, int):
50 key = self.item_names[key]
51 if key == "":
52 return None
53 return super().__getitem__(key)
54
55 def __setitem__(self, key, value):
56 raise NotImplementedError
57
58 @property
59 def _leaf(self):
60 return self.natural_name
61
62 def _print_branch(self, prefix, depth, verbose):
63 for i, name in enumerate(self.item_names):
64 item = self[name]
65 if i + 1 == len(self.item_names):
66 s = prefix + "└── {0}: {1}".format(i, item._leaf)
67 p = prefix + " "
68 else:
69 s = prefix + "├── {0}: {1}".format(i, item._leaf)
70 p = prefix + "│ "
71 print(s)
72 if depth > 1 and hasattr(item, "_print_branch"):
73 item._print_branch(p, depth=depth - 1, verbose=verbose)
74
75 def create_collection(self, name="collection", position=None, **kwargs):
76 """Create a new child colleciton.
77
78 Parameters
79 ----------
80 name : string
81 Unique identifier.
82 position : integer (optional)
83 Location to insert. Default is None (append).
84 kwargs
85 Additional arguments to child collection instantiation.
86
87 Returns
88 -------
89 WrightTools Collection
90 New child.
91 """
92 if name in self.item_names:
93 wt_exceptions.ObjectExistsWarning.warn(name)
94 return self[name]
95 collection = Collection(
96 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
97 )
98 if position is not None:
99 self.attrs["item_names"] = np.insert(
100 self.attrs["item_names"][:-1], position, collection.natural_name.encode()
101 )
102 setattr(self, name, collection)
103 return collection
104
105 def create_data(self, name="data", position=None, **kwargs):
106 """Create a new child data.
107
108 Parameters
109 ----------
110 name : string
111 Unique identifier.
112 position : integer (optional)
113 Location to insert. Default is None (append).
114 kwargs
115 Additional arguments to child data instantiation.
116
117 Returns
118 -------
119 WrightTools Data
120 New child.
121 """
122 if name in self.item_names:
123 wt_exceptions.ObjectExistsWarning.warn(name)
124 return self[name]
125
126 if name == "":
127 data = None
128 natural_name = "".encode()
129 else:
130 data = wt_data.Data(
131 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
132 )
133 natural_name = data.natural_name.encode()
134 if position is not None:
135 self.attrs["item_names"] = np.insert(
136 self.attrs["item_names"][:-1], position, natural_name
137 )
138 setattr(self, name, data)
139 return data
140
141 def index(self):
142 """Index."""
143 raise NotImplementedError
144
145 def print_tree(self, depth=9, *, verbose=False):
146 """Print a ascii-formatted tree representation of the collection contents.
147
148 Parameters
149 ----------
150 depth : integer (optional)
151 Number of layers to include in the tree. Default is 9.
152 verbose : boolean (optional)
153 Toggle inclusion of extra information. Default is True.
154 """
155 print("{0} ({1})".format(self.natural_name, self.filepath))
156 self._print_branch("", depth=depth, verbose=verbose)
157
158 def flush(self):
159 """Ensure contents are written to file."""
160 for name in self.item_names:
161 item = self[name]
162 item.flush()
163 self.file.flush()
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py
--- a/WrightTools/collection/_collection.py
+++ b/WrightTools/collection/_collection.py
@@ -102,6 +102,18 @@
setattr(self, name, collection)
return collection
+ def convert(self, units, convert_variables=False, verbose=True):
+ """Convert units of a collection.
+ Parameters
+ ----------
+ units: string
+ Units to convert to.
+ """
+
+ for name in self.item_names:
+ item = self[name]
+ item.convert(units, convert_variables=convert_variables, verbose=verbose)
+
def create_data(self, name="data", position=None, **kwargs):
"""Create a new child data.
| {"golden_diff": "diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py\n--- a/WrightTools/collection/_collection.py\n+++ b/WrightTools/collection/_collection.py\n@@ -102,6 +102,18 @@\n setattr(self, name, collection)\n return collection\n \n+ def convert(self, units, convert_variables=False, verbose=True):\n+ \"\"\"Convert units of a collection.\n+ Parameters\n+ ----------\n+ units: string\n+ Units to convert to.\n+ \"\"\"\n+\n+ for name in self.item_names:\n+ item = self[name]\n+ item.convert(units, convert_variables=convert_variables, verbose=verbose)\n+\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n", "issue": "collection.convert\nWould like to support syntax `collection.convert(unit)`.\r\n\r\nWould convert all contained data objects recursively.\n", "before_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import data as wt_data\nfrom .. import exceptions as wt_exceptions\nfrom .._group import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Collection\"]\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n\n class_name = \"Collection\"\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return \"<WrightTools.Collection '{0}' {1} at {2}>\".format(\n self.natural_name, self.item_names, \"::\".join([self.filepath, self.name])\n )\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n if key == \"\":\n return None\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def _leaf(self):\n return self.natural_name\n\n def _print_branch(self, prefix, depth, verbose):\n for i, name in enumerate(self.item_names):\n item = self[name]\n if i + 1 == len(self.item_names):\n s = prefix + \"\u2514\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \" \"\n else:\n s = prefix + \"\u251c\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \"\u2502 \"\n print(s)\n if depth > 1 and hasattr(item, \"_print_branch\"):\n item._print_branch(p, depth=depth - 1, verbose=verbose)\n\n def create_collection(self, name=\"collection\", position=None, **kwargs):\n \"\"\"Create a new child colleciton.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child collection instantiation.\n\n Returns\n -------\n WrightTools Collection\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n collection = Collection(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, collection.natural_name.encode()\n )\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child data instantiation.\n\n Returns\n -------\n WrightTools Data\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n\n if name == \"\":\n data = None\n natural_name = \"\".encode()\n else:\n data = wt_data.Data(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n natural_name = data.natural_name.encode()\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, natural_name\n )\n setattr(self, name, data)\n return data\n\n def index(self):\n \"\"\"Index.\"\"\"\n raise NotImplementedError\n\n def print_tree(self, depth=9, *, verbose=False):\n \"\"\"Print a ascii-formatted tree representation of the collection contents.\n\n Parameters\n ----------\n depth : integer (optional)\n Number of layers to include in the tree. Default is 9.\n verbose : boolean (optional)\n Toggle inclusion of extra information. Default is True.\n \"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=depth, verbose=verbose)\n\n def flush(self):\n \"\"\"Ensure contents are written to file.\"\"\"\n for name in self.item_names:\n item = self[name]\n item.flush()\n self.file.flush()\n", "path": "WrightTools/collection/_collection.py"}], "after_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import data as wt_data\nfrom .. import exceptions as wt_exceptions\nfrom .._group import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Collection\"]\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n\n class_name = \"Collection\"\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return \"<WrightTools.Collection '{0}' {1} at {2}>\".format(\n self.natural_name, self.item_names, \"::\".join([self.filepath, self.name])\n )\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n if key == \"\":\n return None\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def _leaf(self):\n return self.natural_name\n\n def _print_branch(self, prefix, depth, verbose):\n for i, name in enumerate(self.item_names):\n item = self[name]\n if i + 1 == len(self.item_names):\n s = prefix + \"\u2514\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \" \"\n else:\n s = prefix + \"\u251c\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \"\u2502 \"\n print(s)\n if depth > 1 and hasattr(item, \"_print_branch\"):\n item._print_branch(p, depth=depth - 1, verbose=verbose)\n\n def create_collection(self, name=\"collection\", position=None, **kwargs):\n \"\"\"Create a new child colleciton.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child collection instantiation.\n\n Returns\n -------\n WrightTools Collection\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n collection = Collection(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, collection.natural_name.encode()\n )\n setattr(self, name, collection)\n return collection\n\n def convert(self, units, convert_variables=False, verbose=True):\n \"\"\"Convert units of a collection.\n Parameters\n ----------\n units: string\n Units to convert to.\n \"\"\"\n\n for name in self.item_names:\n item = self[name]\n item.convert(units, convert_variables=convert_variables, verbose=verbose)\n\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child data instantiation.\n\n Returns\n -------\n WrightTools Data\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n\n if name == \"\":\n data = None\n natural_name = \"\".encode()\n else:\n data = wt_data.Data(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n natural_name = data.natural_name.encode()\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, natural_name\n )\n setattr(self, name, data)\n return data\n\n def index(self):\n \"\"\"Index.\"\"\"\n raise NotImplementedError\n\n def print_tree(self, depth=9, *, verbose=False):\n \"\"\"Print a ascii-formatted tree representation of the collection contents.\n\n Parameters\n ----------\n depth : integer (optional)\n Number of layers to include in the tree. Default is 9.\n verbose : boolean (optional)\n Toggle inclusion of extra information. Default is True.\n \"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=depth, verbose=verbose)\n\n def flush(self):\n \"\"\"Ensure contents are written to file.\"\"\"\n for name in self.item_names:\n item = self[name]\n item.flush()\n self.file.flush()\n", "path": "WrightTools/collection/_collection.py"}]} | 1,671 | 176 |
gh_patches_debug_26291 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3117 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The requirement of superuser postgresql access is problematic
## Problem
Mathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/
## Proposed solution
The mathesar user should not require superuser access.
## Additional context
The superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/install.py`
Content:
```
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.sql import install as sql_install
6 from db.types import install as types_install
7
8
9 def install_mathesar(
10 database_name, username, password, hostname, port, skip_confirm
11 ):
12 """Create database and install Mathesar on it."""
13 user_db_engine = engine.create_future_engine(
14 username, password, hostname, database_name, port,
15 connect_args={"connect_timeout": 10}
16 )
17 try:
18 user_db_engine.connect()
19 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
20 sql_install.install(user_db_engine)
21 types_install.install_mathesar_on_database(user_db_engine)
22 user_db_engine.dispose()
23 except OperationalError:
24 database_created = _create_database(
25 database_name=database_name,
26 hostname=hostname,
27 username=username,
28 password=password,
29 port=port,
30 skip_confirm=skip_confirm
31 )
32 if database_created:
33 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
34 sql_install.install(user_db_engine)
35 types_install.install_mathesar_on_database(user_db_engine)
36 user_db_engine.dispose()
37 else:
38 print(f"Skipping installing on DB with key {database_name}.")
39
40
41 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
42 if skip_confirm is True:
43 create_database = "y"
44 else:
45 create_database = input(
46 f"Create a new Database called {database_name}? (y/n) > "
47 )
48 if create_database.lower() in ["y", "yes"]:
49 # We need to connect to an existing database inorder to create a new Database.
50 # So we use the default Database `postgres` that comes with postgres.
51 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
52 root_database = "postgres"
53 root_db_engine = engine.create_future_engine(
54 username, password, hostname, root_database, port,
55 connect_args={"connect_timeout": 10}
56 )
57 with root_db_engine.connect() as conn:
58 conn.execution_options(isolation_level="AUTOCOMMIT")
59 conn.execute(text(f'CREATE DATABASE "{database_name}"'))
60 root_db_engine.dispose()
61 print(f"Created DB is {database_name}.")
62 return True
63 else:
64 print(f"Database {database_name} not created!")
65 return False
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -1,5 +1,6 @@
+from psycopg.errors import InsufficientPrivilege
from sqlalchemy import text
-from sqlalchemy.exc import OperationalError
+from sqlalchemy.exc import OperationalError, ProgrammingError
from db import engine
from db.sql import install as sql_install
@@ -54,12 +55,20 @@
username, password, hostname, root_database, port,
connect_args={"connect_timeout": 10}
)
- with root_db_engine.connect() as conn:
- conn.execution_options(isolation_level="AUTOCOMMIT")
- conn.execute(text(f'CREATE DATABASE "{database_name}"'))
- root_db_engine.dispose()
- print(f"Created DB is {database_name}.")
- return True
+ try:
+ with root_db_engine.connect() as conn:
+ conn.execution_options(isolation_level="AUTOCOMMIT")
+ conn.execute(text(f'CREATE DATABASE "{database_name}"'))
+ root_db_engine.dispose()
+ print(f"Created DB is {database_name}.")
+ return True
+ except ProgrammingError as e:
+ if isinstance(e.orig, InsufficientPrivilege):
+ print(f"Database {database_name} could not be created due to Insufficient Privilege")
+ return False
+ except Exception:
+ print(f"Database {database_name} could not be created!")
+ return False
else:
print(f"Database {database_name} not created!")
return False
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -1,5 +1,6 @@\n+from psycopg.errors import InsufficientPrivilege\n from sqlalchemy import text\n-from sqlalchemy.exc import OperationalError\n+from sqlalchemy.exc import OperationalError, ProgrammingError\n \n from db import engine\n from db.sql import install as sql_install\n@@ -54,12 +55,20 @@\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n- with root_db_engine.connect() as conn:\n- conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n- root_db_engine.dispose()\n- print(f\"Created DB is {database_name}.\")\n- return True\n+ try:\n+ with root_db_engine.connect() as conn:\n+ conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n+ root_db_engine.dispose()\n+ print(f\"Created DB is {database_name}.\")\n+ return True\n+ except ProgrammingError as e:\n+ if isinstance(e.orig, InsufficientPrivilege):\n+ print(f\"Database {database_name} could not be created due to Insufficient Privilege\")\n+ return False\n+ except Exception:\n+ print(f\"Database {database_name} could not be created!\")\n+ return False\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "issue": "The requirement of superuser postgresql access is problematic\n## Problem\r\nMathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/ \r\n\r\n## Proposed solution\r\nThe mathesar user should not require superuser access. \r\n\r\n## Additional context\r\nThe superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.sql import install as sql_install\nfrom db.types import install as types_install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}], "after_files": [{"content": "from psycopg.errors import InsufficientPrivilege\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\n\nfrom db import engine\nfrom db.sql import install as sql_install\nfrom db.types import install as types_install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n except ProgrammingError as e:\n if isinstance(e.orig, InsufficientPrivilege):\n print(f\"Database {database_name} could not be created due to Insufficient Privilege\")\n return False\n except Exception:\n print(f\"Database {database_name} could not be created!\")\n return False\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 1,072 | 342 |
gh_patches_debug_30331 | rasdani/github-patches | git_diff | e-valuation__EvaP-424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UserProfile missing when User is not created during import
Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.
UserProfile missing when User is not created during import
Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/views.py`
Content:
```
1 from django.contrib import messages
2 from django.contrib.auth import login as auth_login
3 from django.shortcuts import redirect, render_to_response
4 from django.template import RequestContext
5 from django.utils.translation import ugettext as _
6
7 from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
8 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate
9
10
11 def index(request):
12 """Main entry page into EvaP providing all the login options available. THe username/password
13 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
14 The login key mechanism is meant to be used to include external participants, e.g. visiting
15 students or visiting contributors.
16 """
17
18 # parse the form data into the respective form
19 submit_type = request.POST.get("submit_type", "no_submit")
20 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
21 login_key_form = LoginKeyForm(request.POST if submit_type == "login_key" else None)
22 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
23
24 # process form data
25 if request.method == 'POST':
26 if new_key_form.is_valid():
27 # user wants a new login key
28 profile = new_key_form.get_profile()
29 profile.generate_login_key()
30 profile.save()
31
32 EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())
33
34 messages.success(request, _(u"Successfully sent email with new login key."))
35 elif login_key_form.is_valid():
36 # user would like to login with a login key and passed key test
37 auth_login(request, login_key_form.get_user())
38 elif login_username_form.is_valid():
39 # user would like to login with username and password and passed password test
40 auth_login(request, login_username_form.get_user())
41
42 # clean up our test cookie
43 if request.session.test_cookie_worked():
44 request.session.delete_test_cookie()
45
46 # if not logged in by now, render form
47 if not request.user.is_active:
48 # set test cookie to verify whether they work in the next step
49 request.session.set_test_cookie()
50
51 return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))
52 else:
53 # check for redirect variable
54 redirect_to = request.GET.get("next", None)
55 if redirect_to is not None:
56 if redirect_to.startswith("/fsr/"):
57 if request.user.is_staff:
58 return redirect(redirect_to)
59 elif redirect_to.startswith("/contributor/"):
60 if UserProfile.get_for_user(request.user).is_contributor:
61 return redirect(redirect_to)
62 else:
63 return redirect(redirect_to)
64
65 # redirect user to appropriate start page
66 if request.user.is_staff:
67 return redirect('evap.fsr.views.index')
68 elif UserProfile.get_for_user(request.user).is_editor_or_delegate:
69 return redirect('evap.contributor.views.index')
70 else:
71 return redirect('evap.student.views.index')
72
73
74 def faq(request):
75 return render_to_response("faq.html", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -50,6 +50,8 @@
return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))
else:
+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)
+
# check for redirect variable
redirect_to = request.GET.get("next", None)
if redirect_to is not None:
@@ -57,7 +59,7 @@
if request.user.is_staff:
return redirect(redirect_to)
elif redirect_to.startswith("/contributor/"):
- if UserProfile.get_for_user(request.user).is_contributor:
+ if userprofile.is_contributor:
return redirect(redirect_to)
else:
return redirect(redirect_to)
@@ -65,7 +67,7 @@
# redirect user to appropriate start page
if request.user.is_staff:
return redirect('evap.fsr.views.index')
- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:
+ elif userprofile.is_editor_or_delegate:
return redirect('evap.contributor.views.index')
else:
return redirect('evap.student.views.index')
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -50,6 +50,8 @@\n \n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)\n+\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n@@ -57,7 +59,7 @@\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n- if UserProfile.get_for_user(request.user).is_contributor:\n+ if userprofile.is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n@@ -65,7 +67,7 @@\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n+ elif userprofile.is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n", "issue": "UserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\nUserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_profile()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())\n\n messages.success(request, _(u\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_active:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/fsr/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if UserProfile.get_for_user(request.user).is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n\n\ndef faq(request):\n return render_to_response(\"faq.html\", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))\n", "path": "evap/evaluation/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_profile()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())\n\n messages.success(request, _(u\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_active:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n userprofile, _ = UserProfile.objects.get_or_create(user=request.user)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/fsr/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if userprofile.is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif userprofile.is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n\n\ndef faq(request):\n return render_to_response(\"faq.html\", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))\n", "path": "evap/evaluation/views.py"}]} | 1,221 | 304 |
gh_patches_debug_19870 | rasdani/github-patches | git_diff | Parsl__parsl-389 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
parsl installed from pip tries to determine its version using git
Every time I run parsl I get:
```
kacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py
fatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'
```
and in logs:
```
2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state
Traceback (most recent call last):
File "/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py", line 19, in get_version
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/utils.py`
Content:
```
1 import logging
2 import os
3 import shlex
4 import subprocess
5 import threading
6 import time
7 from contextlib import contextmanager
8 from functools import wraps
9
10 import parsl
11 from parsl.version import VERSION
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_version():
17 version = parsl.__version__
18 work_tree = os.path.dirname(os.path.dirname(__file__))
19 git_dir = os.path.join(work_tree, '.git')
20 env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
21 try:
22 cmd = shlex.split('git rev-parse --short HEAD')
23 head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
24 diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
25 status = 'dirty' if diff else 'clean'
26 version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
27 except Exception as e:
28 pass
29
30 return version
31
32
33 def get_all_checkpoints(rundir="runinfo"):
34 """Finds the checkpoints from all last runs.
35
36 Note that checkpoints are incremental, and this helper will not find
37 previous checkpoints from earlier than the most recent run. It probably
38 should be made to do so.
39
40 Kwargs:
41 - rundir(str) : Path to the runinfo directory
42
43 Returns:
44 - a list suitable for the checkpointFiles parameter of DataFlowKernel
45 constructor
46
47 """
48
49 if(not(os.path.isdir(rundir))):
50 return []
51
52 dirs = sorted(os.listdir(rundir))
53
54 checkpoints = []
55
56 for runid in dirs:
57
58 checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
59
60 if(os.path.isdir(checkpoint)):
61 checkpoints.append(checkpoint)
62
63 return checkpoints
64
65
66 def get_last_checkpoint(rundir="runinfo"):
67 """Finds the checkpoint from the last run, if one exists.
68
69 Note that checkpoints are incremental, and this helper will not find
70 previous checkpoints from earlier than the most recent run. It probably
71 should be made to do so.
72
73 Kwargs:
74 - rundir(str) : Path to the runinfo directory
75
76 Returns:
77 - a list suitable for checkpointFiles parameter of DataFlowKernel
78 constructor, with 0 or 1 elements
79
80 """
81
82 if(not(os.path.isdir(rundir))):
83 return []
84
85 dirs = sorted(os.listdir(rundir))
86
87 if(len(dirs) == 0):
88 return []
89
90 last_runid = dirs[-1]
91 last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
92
93 if(not(os.path.isdir(last_checkpoint))):
94 return []
95
96 return [last_checkpoint]
97
98
99 def timeout(seconds=None):
100 def decorator(func, *args, **kwargs):
101 @wraps(func)
102 def wrapper(*args, **kwargs):
103 t = threading.Thread(target=func, args=args, kwargs=kwargs)
104 t.start()
105 result = t.join(seconds)
106 if t.is_alive():
107 raise RuntimeError('timed out in {}'.format(func))
108 return result
109 return wrapper
110 return decorator
111
112
113 @contextmanager
114 def wait_for_file(path, seconds=10):
115 for i in range(0, int(seconds * 100)):
116 time.sleep(seconds / 100.)
117 if os.path.exists(path):
118 break
119 yield
120
121
122 @contextmanager
123 def time_limited_open(path, mode, seconds=1):
124 wait_for_file(path, seconds)
125
126 f = open(path, mode)
127 yield f
128 f.close()
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/utils.py b/parsl/utils.py
--- a/parsl/utils.py
+++ b/parsl/utils.py
@@ -17,15 +17,16 @@
version = parsl.__version__
work_tree = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(work_tree, '.git')
- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
- try:
- cmd = shlex.split('git rev-parse --short HEAD')
- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
- status = 'dirty' if diff else 'clean'
- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
- except Exception as e:
- pass
+ if os.path.exists(git_dir):
+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
+ try:
+ cmd = shlex.split('git rev-parse --short HEAD')
+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
+ status = 'dirty' if diff else 'clean'
+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
+ except Exception as e:
+ pass
return version
| {"golden_diff": "diff --git a/parsl/utils.py b/parsl/utils.py\n--- a/parsl/utils.py\n+++ b/parsl/utils.py\n@@ -17,15 +17,16 @@\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n- try:\n- cmd = shlex.split('git rev-parse --short HEAD')\n- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n- status = 'dirty' if diff else 'clean'\n- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n- except Exception as e:\n- pass\n+ if os.path.exists(git_dir):\n+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n+ try:\n+ cmd = shlex.split('git rev-parse --short HEAD')\n+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n+ status = 'dirty' if diff else 'clean'\n+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n+ except Exception as e:\n+ pass\n \n return version\n", "issue": "parsl installed from pip tries to determine its version using git\nEvery time I run parsl I get:\r\n\r\n```\r\nkacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py\r\nfatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'\r\n```\r\n\r\nand in logs:\r\n\r\n```\r\n2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state\r\nTraceback (most recent call last):\r\n File \"/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py\", line 19, in get_version\r\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 336, in check_output\r\n **kwargs).stdout\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 418, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.\r\n```\n", "before_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef wait_for_file(path, seconds=10):\n for i in range(0, int(seconds * 100)):\n time.sleep(seconds / 100.)\n if os.path.exists(path):\n break\n yield\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n wait_for_file(path, seconds)\n\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}], "after_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n if os.path.exists(git_dir):\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef wait_for_file(path, seconds=10):\n for i in range(0, int(seconds * 100)):\n time.sleep(seconds / 100.)\n if os.path.exists(path):\n break\n yield\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n wait_for_file(path, seconds)\n\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}]} | 1,615 | 342 |
gh_patches_debug_8615 | rasdani/github-patches | git_diff | secdev__scapy-373 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unknown.version in egg.info and in the banner
Hi,
I'm porting 2.3.3 to OpenBSD. I had two issues:
1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'
I patched setup.py to hardcode the version then it worked
```
--- setup.py.orig Tue Oct 18 10:44:43 2016
+++ setup.py Mon Oct 31 17:19:45 2016
@@ -47,7 +47,7 @@ if os.name == "nt":
setup(
name='scapy',
- version=__import__('scapy').VERSION,
+ version='2.3.3',
packages=[
'scapy',
'scapy/arch',
```
I now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info
2. running scapy it says "Welcome to Scapy (unknown.version)" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/__init__.py`
Content:
```
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## This program is published under a GPLv2 license
5
6 """
7 Scapy: create, send, sniff, dissect and manipulate network packets.
8
9 Usable either from an interactive console or as a Python library.
10 http://www.secdev.org/projects/scapy
11 """
12
13 import os
14 import re
15 import subprocess
16
17
18 _SCAPY_PKG_DIR = os.path.dirname(__file__)
19
20 def _version_from_git_describe():
21 """
22 Read the version from ``git describe``. It returns the latest tag with an
23 optional suffix if the current directory is not exactly on the tag.
24
25 Example::
26
27 $ git describe --always
28 v2.3.2-346-g164a52c075c8
29
30 The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are
31 removed if present.
32
33 If the current directory is not exactly on the tag, a ``.devN`` suffix is
34 appended where N is the number of commits made after the last tag.
35
36 Example::
37
38 >>> _version_from_git_describe()
39 '2.3.2.dev346'
40 """
41 p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,
42 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
43
44 out, err = p.communicate()
45
46 if p.returncode == 0:
47 tag = out.strip()
48 match = re.match(r'^v?(.+?)-(\d+)-g[a-f0-9]+$', tag)
49 if match:
50 # remove the 'v' prefix and add a '.devN' suffix
51 return '%s.dev%s' % (match.group(1), match.group(2))
52 else:
53 # just remove the 'v' prefix
54 return re.sub(r'^v', '', tag)
55 else:
56 raise subprocess.CalledProcessError(p.returncode, err)
57
58 def _version():
59 version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')
60 try:
61 tag = _version_from_git_describe()
62 # successfully read the tag from git, write it in VERSION for
63 # installation and/or archive generation.
64 with open(version_file, 'w') as f:
65 f.write(tag)
66 return tag
67 except:
68 # failed to read the tag from git, try to read it from a VERSION file
69 try:
70 with open(version_file, 'r') as f:
71 tag = f.read()
72 return tag
73 except:
74 return 'unknown.version'
75
76 VERSION = _version()
77
78 if __name__ == "__main__":
79 from scapy.main import interact
80 interact()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scapy/__init__.py b/scapy/__init__.py
--- a/scapy/__init__.py
+++ b/scapy/__init__.py
@@ -71,7 +71,17 @@
tag = f.read()
return tag
except:
- return 'unknown.version'
+ # Rely on git archive "export-subst" git attribute.
+ # See 'man gitattributes' for more details.
+ git_archive_id = '$Format:%h %d$'
+ sha1 = git_archive_id.strip().split()[0]
+ match = re.search(r'tag:(\S+)', git_archive_id)
+ if match:
+ return match.group(1)
+ elif sha1:
+ return sha1
+ else:
+ return 'unknown.version'
VERSION = _version()
| {"golden_diff": "diff --git a/scapy/__init__.py b/scapy/__init__.py\n--- a/scapy/__init__.py\n+++ b/scapy/__init__.py\n@@ -71,7 +71,17 @@\n tag = f.read()\n return tag\n except:\n- return 'unknown.version'\n+ # Rely on git archive \"export-subst\" git attribute.\n+ # See 'man gitattributes' for more details.\n+ git_archive_id = '$Format:%h %d$'\n+ sha1 = git_archive_id.strip().split()[0]\n+ match = re.search(r'tag:(\\S+)', git_archive_id)\n+ if match:\n+ return match.group(1)\n+ elif sha1:\n+ return sha1\n+ else:\n+ return 'unknown.version'\n \n VERSION = _version()\n", "issue": "unknown.version in egg.info and in the banner\nHi,\r\n\r\nI'm porting 2.3.3 to OpenBSD. I had two issues:\r\n1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'\r\n\r\nI patched setup.py to hardcode the version then it worked\r\n```\r\n--- setup.py.orig Tue Oct 18 10:44:43 2016\r\n+++ setup.py Mon Oct 31 17:19:45 2016\r\n@@ -47,7 +47,7 @@ if os.name == \"nt\":\r\n \r\n setup(\r\n name='scapy',\r\n- version=__import__('scapy').VERSION,\r\n+ version='2.3.3',\r\n packages=[\r\n 'scapy',\r\n 'scapy/arch',\r\n\r\n```\r\nI now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info\r\n\r\n2. running scapy it says \"Welcome to Scapy (unknown.version)\" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.strip()\n match = re.match(r'^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub(r'^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}], "after_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.strip()\n match = re.match(r'^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub(r'^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n # Rely on git archive \"export-subst\" git attribute.\n # See 'man gitattributes' for more details.\n git_archive_id = '$Format:%h %d$'\n sha1 = git_archive_id.strip().split()[0]\n match = re.search(r'tag:(\\S+)', git_archive_id)\n if match:\n return match.group(1)\n elif sha1:\n return sha1\n else:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}]} | 1,313 | 186 |
gh_patches_debug_24772 | rasdani/github-patches | git_diff | Flexget__Flexget-548 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[add] jinja split: Adds split into jinja filters
Sorry about this, I found the solution
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/input/trakt_emit.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 import hashlib
3 import logging
4 from urlparse import urljoin
5
6 from requests import RequestException
7
8 from flexget import plugin
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.utils import json
12 from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url
13
14 log = logging.getLogger('trakt_emit')
15
16
17 class TraktEmit(object):
18 """
19 Creates an entry for the latest or the next item in your watched or collected
20 episodes in your trakt account.
21
22 Syntax:
23
24 trakt_emit:
25 username: <value>
26 position: <last|next>
27 context: <collect|collected|watch|watched>
28 list: <value>
29
30 Options username, password and api_key are required.
31
32 """
33
34 schema = {
35 'type': 'object',
36 'properties': {
37 'username': {'type': 'string'},
38 'password': {'type': 'string'},
39 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},
40 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},
41 'list': {'type': 'string'}
42 },
43 'required': ['username'],
44 'additionalProperties': False
45 }
46
47 def on_task_input(self, task, config):
48 session = get_session(config['username'], config.get('password'))
49 listed_series = {}
50 if config.get('list'):
51 url = urljoin(API_URL, 'users/%s/' % config['username'])
52 if config['list'] in ['collection', 'watchlist', 'watched']:
53 url = urljoin(url, '%s/shows' % config['list'])
54 else:
55 url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))
56 try:
57 data = session.get(url).json()
58 except RequestException as e:
59 raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))
60 if not data:
61 log.warning('The list "%s" is empty.' % config['list'])
62 return
63 for item in data:
64 if item['show'] is not None:
65 if not item['show']['title']:
66 # Seems we can get entries with a blank show title sometimes
67 log.warning('Found trakt list show with no series name.')
68 continue
69 trakt_id = item['show']['ids']['trakt']
70 listed_series[trakt_id] = {
71 'series_name': item['show']['title'],
72 'trakt_id': trakt_id,
73 'tvdb_id': item['show']['ids']['tvdb']}
74 context = config['context']
75 if context == 'collected':
76 context = 'collection'
77 entries = []
78 for trakt_id, fields in listed_series.iteritems():
79 url = get_api_url('shows', trakt_id, 'progress', context)
80 try:
81 data = session.get(url).json()
82 except RequestException as e:
83 raise plugin.PluginError('TODO: error message')
84 if config['position'] == 'next' and data.get('next_episode'):
85 # If the next episode is already in the trakt database, we'll get it here
86 eps = data['next_episode']['season']
87 epn = data['next_episode']['number']
88 else:
89 # If we need last ep, or next_episode was not provided, search for last ep
90 for seas in reversed(data['seasons']):
91 # Find the first season with collected/watched episodes
92 if seas['completed'] > 0:
93 eps = seas['number']
94 # Pick the highest collected/watched episode
95 epn = max(item['number'] for item in seas['episodes'] if item['completed'])
96 # If we are in next episode mode, we have to increment this number
97 if config['position'] == 'next':
98 if seas['completed'] >= seas['aired']:
99 # TODO: next_episode doesn't count unaired episodes right now, this will skip to next
100 # season too early when there are episodes left to air this season.
101 eps += 1
102 epn = 1
103 else:
104 epn += 1
105 break
106 if eps and epn:
107 entry = self.make_entry(fields, eps, epn)
108 entries.append(entry)
109 return entries
110
111 def make_entry(self, fields, season, episode):
112 entry = Entry()
113 entry.update(fields)
114 entry['series_season'] = season
115 entry['series_episode'] = episode
116 entry['series_id_type'] = 'ep'
117 entry['series_id'] = 'S%02dE%02d' % (season, episode)
118 entry['title'] = entry['series_name'] + ' ' + entry['series_id']
119 entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)
120 return entry
121
122
123 @event('plugin.register')
124 def register_plugin():
125 plugin.register(TraktEmit, 'trakt_emit', api_ver=2)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py
--- a/flexget/plugins/input/trakt_emit.py
+++ b/flexget/plugins/input/trakt_emit.py
@@ -1,5 +1,4 @@
from __future__ import unicode_literals, division, absolute_import
-import hashlib
import logging
from urlparse import urljoin
@@ -8,7 +7,6 @@
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
-from flexget.utils import json
from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url
log = logging.getLogger('trakt_emit')
@@ -103,6 +101,12 @@
else:
epn += 1
break
+ else:
+ if config['position'] == 'next':
+ eps = epn = 1
+ else:
+ # There were no watched/collected episodes, nothing to emit in 'last' mode
+ continue
if eps and epn:
entry = self.make_entry(fields, eps, epn)
entries.append(entry)
| {"golden_diff": "diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py\n--- a/flexget/plugins/input/trakt_emit.py\n+++ b/flexget/plugins/input/trakt_emit.py\n@@ -1,5 +1,4 @@\n from __future__ import unicode_literals, division, absolute_import\n-import hashlib\n import logging\n from urlparse import urljoin\n \n@@ -8,7 +7,6 @@\n from flexget import plugin\n from flexget.entry import Entry\n from flexget.event import event\n-from flexget.utils import json\n from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n \n log = logging.getLogger('trakt_emit')\n@@ -103,6 +101,12 @@\n else:\n epn += 1\n break\n+ else:\n+ if config['position'] == 'next':\n+ eps = epn = 1\n+ else:\n+ # There were no watched/collected episodes, nothing to emit in 'last' mode\n+ continue\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n", "issue": "[add] jinja split: Adds split into jinja filters\nSorry about this, I found the solution\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport hashlib\nimport logging\nfrom urlparse import urljoin\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils import json\nfrom flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n\nlog = logging.getLogger('trakt_emit')\n\n\nclass TraktEmit(object):\n \"\"\"\n Creates an entry for the latest or the next item in your watched or collected\n episodes in your trakt account.\n\n Syntax:\n\n trakt_emit:\n username: <value>\n position: <last|next>\n context: <collect|collected|watch|watched>\n list: <value>\n\n Options username, password and api_key are required.\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},\n 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},\n 'list': {'type': 'string'}\n },\n 'required': ['username'],\n 'additionalProperties': False\n }\n\n def on_task_input(self, task, config):\n session = get_session(config['username'], config.get('password'))\n listed_series = {}\n if config.get('list'):\n url = urljoin(API_URL, 'users/%s/' % config['username'])\n if config['list'] in ['collection', 'watchlist', 'watched']:\n url = urljoin(url, '%s/shows' % config['list'])\n else:\n url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))\n if not data:\n log.warning('The list \"%s\" is empty.' % config['list'])\n return\n for item in data:\n if item['show'] is not None:\n if not item['show']['title']:\n # Seems we can get entries with a blank show title sometimes\n log.warning('Found trakt list show with no series name.')\n continue\n trakt_id = item['show']['ids']['trakt']\n listed_series[trakt_id] = {\n 'series_name': item['show']['title'],\n 'trakt_id': trakt_id,\n 'tvdb_id': item['show']['ids']['tvdb']}\n context = config['context']\n if context == 'collected':\n context = 'collection'\n entries = []\n for trakt_id, fields in listed_series.iteritems():\n url = get_api_url('shows', trakt_id, 'progress', context)\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('TODO: error message')\n if config['position'] == 'next' and data.get('next_episode'):\n # If the next episode is already in the trakt database, we'll get it here\n eps = data['next_episode']['season']\n epn = data['next_episode']['number']\n else:\n # If we need last ep, or next_episode was not provided, search for last ep\n for seas in reversed(data['seasons']):\n # Find the first season with collected/watched episodes\n if seas['completed'] > 0:\n eps = seas['number']\n # Pick the highest collected/watched episode\n epn = max(item['number'] for item in seas['episodes'] if item['completed'])\n # If we are in next episode mode, we have to increment this number\n if config['position'] == 'next':\n if seas['completed'] >= seas['aired']:\n # TODO: next_episode doesn't count unaired episodes right now, this will skip to next\n # season too early when there are episodes left to air this season.\n eps += 1\n epn = 1\n else:\n epn += 1\n break\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n return entries\n\n def make_entry(self, fields, season, episode):\n entry = Entry()\n entry.update(fields)\n entry['series_season'] = season\n entry['series_episode'] = episode\n entry['series_id_type'] = 'ep'\n entry['series_id'] = 'S%02dE%02d' % (season, episode)\n entry['title'] = entry['series_name'] + ' ' + entry['series_id']\n entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)\n return entry\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(TraktEmit, 'trakt_emit', api_ver=2)\n", "path": "flexget/plugins/input/trakt_emit.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nfrom urlparse import urljoin\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n\nlog = logging.getLogger('trakt_emit')\n\n\nclass TraktEmit(object):\n \"\"\"\n Creates an entry for the latest or the next item in your watched or collected\n episodes in your trakt account.\n\n Syntax:\n\n trakt_emit:\n username: <value>\n position: <last|next>\n context: <collect|collected|watch|watched>\n list: <value>\n\n Options username, password and api_key are required.\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},\n 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},\n 'list': {'type': 'string'}\n },\n 'required': ['username'],\n 'additionalProperties': False\n }\n\n def on_task_input(self, task, config):\n session = get_session(config['username'], config.get('password'))\n listed_series = {}\n if config.get('list'):\n url = urljoin(API_URL, 'users/%s/' % config['username'])\n if config['list'] in ['collection', 'watchlist', 'watched']:\n url = urljoin(url, '%s/shows' % config['list'])\n else:\n url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))\n if not data:\n log.warning('The list \"%s\" is empty.' % config['list'])\n return\n for item in data:\n if item['show'] is not None:\n if not item['show']['title']:\n # Seems we can get entries with a blank show title sometimes\n log.warning('Found trakt list show with no series name.')\n continue\n trakt_id = item['show']['ids']['trakt']\n listed_series[trakt_id] = {\n 'series_name': item['show']['title'],\n 'trakt_id': trakt_id,\n 'tvdb_id': item['show']['ids']['tvdb']}\n context = config['context']\n if context == 'collected':\n context = 'collection'\n entries = []\n for trakt_id, fields in listed_series.iteritems():\n url = get_api_url('shows', trakt_id, 'progress', context)\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('TODO: error message')\n if config['position'] == 'next' and data.get('next_episode'):\n # If the next episode is already in the trakt database, we'll get it here\n eps = data['next_episode']['season']\n epn = data['next_episode']['number']\n else:\n # If we need last ep, or next_episode was not provided, search for last ep\n for seas in reversed(data['seasons']):\n # Find the first season with collected/watched episodes\n if seas['completed'] > 0:\n eps = seas['number']\n # Pick the highest collected/watched episode\n epn = max(item['number'] for item in seas['episodes'] if item['completed'])\n # If we are in next episode mode, we have to increment this number\n if config['position'] == 'next':\n if seas['completed'] >= seas['aired']:\n # TODO: next_episode doesn't count unaired episodes right now, this will skip to next\n # season too early when there are episodes left to air this season.\n eps += 1\n epn = 1\n else:\n epn += 1\n break\n else:\n if config['position'] == 'next':\n eps = epn = 1\n else:\n # There were no watched/collected episodes, nothing to emit in 'last' mode\n continue\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n return entries\n\n def make_entry(self, fields, season, episode):\n entry = Entry()\n entry.update(fields)\n entry['series_season'] = season\n entry['series_episode'] = episode\n entry['series_id_type'] = 'ep'\n entry['series_id'] = 'S%02dE%02d' % (season, episode)\n entry['title'] = entry['series_name'] + ' ' + entry['series_id']\n entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)\n return entry\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(TraktEmit, 'trakt_emit', api_ver=2)\n", "path": "flexget/plugins/input/trakt_emit.py"}]} | 1,697 | 257 |
gh_patches_debug_918 | rasdani/github-patches | git_diff | vas3k__vas3k.club-260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Сломался check_PR экшн на новые пуллреквесты
Вот здесь все пошло не так после пары изменений в requirements и докерфайлах: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml
Из-за этого все новые пуллреквесты красненькие и мержить их приходится только суровой админской рукой. Надо бы переосмыслить этот CI как-нибудь. У кого есть идеи?
По сути мне важны линтеры и чтобы докер с новым кодом успешно поднимался. Остального пока нет.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/images.py`
Content:
```
1 import io
2 import logging
3 import os
4 from urllib.parse import urlparse
5
6 import requests
7 from PIL import Image
8 from django.conf import settings
9
10 log = logging.getLogger(__name__)
11
12
13 def upload_image_bytes(
14 filename, data, resize=(192, 192), convert_to=None, quality=None
15 ):
16 if not data:
17 return None
18
19 if resize:
20 try:
21 image = Image.open(data)
22 except Exception as ex:
23 log.warning(f"Bad image data: {ex}")
24 return None
25
26 image.thumbnail(resize)
27 saved_image = io.BytesIO()
28 saved_image.name = filename
29
30 try:
31 image.save(saved_image)
32 except OSError:
33 log.warning(f"Error saving image data: {ex}")
34 return None
35
36 data = saved_image.getvalue()
37
38 upload_params = {
39 "code": settings.MEDIA_UPLOAD_CODE
40 }
41
42 if convert_to:
43 upload_params["convert_to"] = convert_to
44
45 if quality:
46 upload_params["quality"] = quality
47
48 try:
49 uploaded = requests.post(
50 url=settings.MEDIA_UPLOAD_URL,
51 params=upload_params,
52 files={"media": (filename, data)},
53 )
54 except requests.exceptions.RequestException as ex:
55 log.error(f"Image upload error: {ex}")
56 return None
57
58 if 200 <= uploaded.status_code <= 299:
59 try:
60 response_data = uploaded.json()
61 except Exception as ex:
62 log.error(f"Image upload error: {ex} ({uploaded.content})")
63 return None
64
65 return response_data["uploaded"][0]
66
67 return None
68
69
70 def upload_image_from_url(url, resize=(192, 192), convert_to="jpg", quality=90):
71 if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:
72 return url
73
74 if not url:
75 return None
76
77 image_name = os.path.basename(urlparse(url).path)
78 if "." not in image_name:
79 image_name += ".jpg"
80
81 try:
82 image_data = io.BytesIO(requests.get(url).content)
83 except requests.exceptions.RequestException:
84 return None
85
86 return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/images.py b/utils/images.py
--- a/utils/images.py
+++ b/utils/images.py
@@ -29,7 +29,7 @@
try:
image.save(saved_image)
- except OSError:
+ except OSError as ex:
log.warning(f"Error saving image data: {ex}")
return None
| {"golden_diff": "diff --git a/utils/images.py b/utils/images.py\n--- a/utils/images.py\n+++ b/utils/images.py\n@@ -29,7 +29,7 @@\n \n try:\n image.save(saved_image)\n- except OSError:\n+ except OSError as ex:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n", "issue": "\u0421\u043b\u043e\u043c\u0430\u043b\u0441\u044f check_PR \u044d\u043a\u0448\u043d \u043d\u0430 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b\n\u0412\u043e\u0442 \u0437\u0434\u0435\u0441\u044c \u0432\u0441\u0435 \u043f\u043e\u0448\u043b\u043e \u043d\u0435 \u0442\u0430\u043a \u043f\u043e\u0441\u043b\u0435 \u043f\u0430\u0440\u044b \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0439 \u0432 requirements \u0438 \u0434\u043e\u043a\u0435\u0440\u0444\u0430\u0439\u043b\u0430\u0445: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml\r\n\r\n\u0418\u0437-\u0437\u0430 \u044d\u0442\u043e\u0433\u043e \u0432\u0441\u0435 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b \u043a\u0440\u0430\u0441\u043d\u0435\u043d\u044c\u043a\u0438\u0435 \u0438 \u043c\u0435\u0440\u0436\u0438\u0442\u044c \u0438\u0445 \u043f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0442\u043e\u043b\u044c\u043a\u043e \u0441\u0443\u0440\u043e\u0432\u043e\u0439 \u0430\u0434\u043c\u0438\u043d\u0441\u043a\u043e\u0439 \u0440\u0443\u043a\u043e\u0439. \u041d\u0430\u0434\u043e \u0431\u044b \u043f\u0435\u0440\u0435\u043e\u0441\u043c\u044b\u0441\u043b\u0438\u0442\u044c \u044d\u0442\u043e\u0442 CI \u043a\u0430\u043a-\u043d\u0438\u0431\u0443\u0434\u044c. \u0423 \u043a\u043e\u0433\u043e \u0435\u0441\u0442\u044c \u0438\u0434\u0435\u0438?\r\n\r\n\u041f\u043e \u0441\u0443\u0442\u0438 \u043c\u043d\u0435 \u0432\u0430\u0436\u043d\u044b \u043b\u0438\u043d\u0442\u0435\u0440\u044b \u0438 \u0447\u0442\u043e\u0431\u044b \u0434\u043e\u043a\u0435\u0440 \u0441 \u043d\u043e\u0432\u044b\u043c \u043a\u043e\u0434\u043e\u043c \u0443\u0441\u043f\u0435\u0448\u043d\u043e \u043f\u043e\u0434\u043d\u0438\u043c\u0430\u043b\u0441\u044f. \u041e\u0441\u0442\u0430\u043b\u044c\u043d\u043e\u0433\u043e \u043f\u043e\u043a\u0430 \u043d\u0435\u0442.\n", "before_files": [{"content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if not data:\n return None\n\n if resize:\n try:\n image = Image.open(data)\n except Exception as ex:\n log.warning(f\"Bad image data: {ex}\")\n return None\n\n image.thumbnail(resize)\n saved_image = io.BytesIO()\n saved_image.name = filename\n\n try:\n image.save(saved_image)\n except OSError:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n\n data = saved_image.getvalue()\n\n upload_params = {\n \"code\": settings.MEDIA_UPLOAD_CODE\n }\n\n if convert_to:\n upload_params[\"convert_to\"] = convert_to\n\n if quality:\n upload_params[\"quality\"] = quality\n\n try:\n uploaded = requests.post(\n url=settings.MEDIA_UPLOAD_URL,\n params=upload_params,\n files={\"media\": (filename, data)},\n )\n except requests.exceptions.RequestException as ex:\n log.error(f\"Image upload error: {ex}\")\n return None\n\n if 200 <= uploaded.status_code <= 299:\n try:\n response_data = uploaded.json()\n except Exception as ex:\n log.error(f\"Image upload error: {ex} ({uploaded.content})\")\n return None\n\n return response_data[\"uploaded\"][0]\n\n return None\n\n\ndef upload_image_from_url(url, resize=(192, 192), convert_to=\"jpg\", quality=90):\n if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:\n return url\n\n if not url:\n return None\n\n image_name = os.path.basename(urlparse(url).path)\n if \".\" not in image_name:\n image_name += \".jpg\"\n\n try:\n image_data = io.BytesIO(requests.get(url).content)\n except requests.exceptions.RequestException:\n return None\n\n return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)\n", "path": "utils/images.py"}], "after_files": [{"content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if not data:\n return None\n\n if resize:\n try:\n image = Image.open(data)\n except Exception as ex:\n log.warning(f\"Bad image data: {ex}\")\n return None\n\n image.thumbnail(resize)\n saved_image = io.BytesIO()\n saved_image.name = filename\n\n try:\n image.save(saved_image)\n except OSError as ex:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n\n data = saved_image.getvalue()\n\n upload_params = {\n \"code\": settings.MEDIA_UPLOAD_CODE\n }\n\n if convert_to:\n upload_params[\"convert_to\"] = convert_to\n\n if quality:\n upload_params[\"quality\"] = quality\n\n try:\n uploaded = requests.post(\n url=settings.MEDIA_UPLOAD_URL,\n params=upload_params,\n files={\"media\": (filename, data)},\n )\n except requests.exceptions.RequestException as ex:\n log.error(f\"Image upload error: {ex}\")\n return None\n\n if 200 <= uploaded.status_code <= 299:\n try:\n response_data = uploaded.json()\n except Exception as ex:\n log.error(f\"Image upload error: {ex} ({uploaded.content})\")\n return None\n\n return response_data[\"uploaded\"][0]\n\n return None\n\n\ndef upload_image_from_url(url, resize=(192, 192), convert_to=\"jpg\", quality=90):\n if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:\n return url\n\n if not url:\n return None\n\n image_name = os.path.basename(urlparse(url).path)\n if \".\" not in image_name:\n image_name += \".jpg\"\n\n try:\n image_data = io.BytesIO(requests.get(url).content)\n except requests.exceptions.RequestException:\n return None\n\n return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)\n", "path": "utils/images.py"}]} | 1,075 | 75 |
gh_patches_debug_3052 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1486 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The type of `n_gram` is mislabeled as bool, which should be int type.
## 🐛 Bug
In Translation Task:
The type of `n_gram` is mislabeled as bool, which should be int type.
### To Reproduce
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
flash translation from_hf_datasets --help
```
The error raised:
```
translation: error: Configuration check failed :: Parser key "model.n_gram": Expected a <class 'bool'> but got "4"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/text/seq2seq/translation/model.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional, Union
15
16 from torchmetrics import BLEUScore
17
18 from flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0
19 from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE
20 from flash.text.seq2seq.core.model import Seq2SeqTask
21
22
23 class TranslationTask(Seq2SeqTask):
24 """The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see
25 :ref:`translation`.
26
27 You can change the backbone to any translation model from `HuggingFace/transformers
28 <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.
29
30 Args:
31 backbone: backbone model to use for the task.
32 max_source_length: The maximum length to pad / truncate input sequences to.
33 max_target_length: The maximum length to pad / truncate target sequences to.
34 padding: The type of padding to apply. One of: "longest" or ``True``, "max_length", "do_not_pad" or
35 ``False``.
36 loss_fn: Loss function for training.
37 optimizer: Optimizer to use for training.
38 lr_scheduler: The LR scheduler to use during training.
39 metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.
40 Changing this argument currently has no effect.
41 learning_rate: Learning rate to use for training, defaults to `1e-5`
42 num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
43 n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`
44 smooth: Apply smoothing in BLEU calculation. Defaults to `True`
45 enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training
46 """
47
48 def __init__(
49 self,
50 backbone: str = "t5-small",
51 tokenizer_kwargs: Optional[Dict[str, Any]] = None,
52 max_source_length: int = 128,
53 max_target_length: int = 128,
54 padding: Union[str, bool] = "max_length",
55 loss_fn: LOSS_FN_TYPE = None,
56 optimizer: OPTIMIZER_TYPE = "Adam",
57 lr_scheduler: LR_SCHEDULER_TYPE = None,
58 metrics: METRICS_TYPE = None,
59 learning_rate: Optional[float] = None,
60 num_beams: Optional[int] = 4,
61 n_gram: bool = 4,
62 smooth: bool = True,
63 enable_ort: bool = False,
64 ):
65 self.save_hyperparameters()
66 super().__init__(
67 backbone=backbone,
68 tokenizer_kwargs=tokenizer_kwargs,
69 max_source_length=max_source_length,
70 max_target_length=max_target_length,
71 padding=padding,
72 loss_fn=loss_fn,
73 optimizer=optimizer,
74 lr_scheduler=lr_scheduler,
75 metrics=metrics,
76 learning_rate=learning_rate,
77 num_beams=num_beams,
78 enable_ort=enable_ort,
79 )
80 self.bleu = BLEUScore(
81 n_gram=n_gram,
82 smooth=smooth,
83 )
84
85 @property
86 def task(self) -> str:
87 return "translation"
88
89 def compute_metrics(self, generated_tokens, batch, prefix):
90 reference_corpus = self.decode(batch["labels"])
91 # wrap targets in list as score expects a list of potential references
92 reference_corpus = [[reference] for reference in reference_corpus]
93
94 translate_corpus = self.decode(generated_tokens)
95 translate_corpus = [line for line in translate_corpus]
96
97 if _TM_GREATER_EQUAL_0_7_0:
98 result = self.bleu(translate_corpus, reference_corpus)
99 else:
100 result = self.bleu(reference_corpus, translate_corpus)
101 self.log(f"{prefix}_bleu_score", result, on_step=False, on_epoch=True, prog_bar=True)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py
--- a/flash/text/seq2seq/translation/model.py
+++ b/flash/text/seq2seq/translation/model.py
@@ -58,7 +58,7 @@
metrics: METRICS_TYPE = None,
learning_rate: Optional[float] = None,
num_beams: Optional[int] = 4,
- n_gram: bool = 4,
+ n_gram: int = 4,
smooth: bool = True,
enable_ort: bool = False,
):
| {"golden_diff": "diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py\n--- a/flash/text/seq2seq/translation/model.py\n+++ b/flash/text/seq2seq/translation/model.py\n@@ -58,7 +58,7 @@\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n- n_gram: bool = 4,\n+ n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n", "issue": "The type of `n_gram` is mislabeled as bool, which should be int type.\n## \ud83d\udc1b Bug\r\n\r\nIn Translation Task:\r\nThe type of `n_gram` is mislabeled as bool, which should be int type.\r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nflash translation from_hf_datasets --help\r\n```\r\nThe error raised:\r\n```\r\ntranslation: error: Configuration check failed :: Parser key \"model.n_gram\": Expected a <class 'bool'> but got \"4\"\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: bool = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n", "path": "flash/text/seq2seq/translation/model.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n", "path": "flash/text/seq2seq/translation/model.py"}]} | 1,581 | 139 |
gh_patches_debug_28402 | rasdani/github-patches | git_diff | dask__distributed-416 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Distributed.joblib code fails to affect sklearn
The comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.
It would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/joblib.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin
4 from joblib.parallel import register_parallel_backend
5 from tornado import gen
6
7 from .executor import Executor, _wait
8
9
10 class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
11 MIN_IDEAL_BATCH_DURATION = 0.2
12 MAX_IDEAL_BATCH_DURATION = 1.0
13
14 def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):
15 self.executor = Executor(scheduler_host, loop=loop)
16 self.futures = set()
17
18 def configure(self, n_jobs=1, parallel=None, **backend_args):
19 return self.effective_n_jobs(n_jobs)
20
21 def effective_n_jobs(self, n_jobs=1):
22 return sum(self.executor.ncores().values())
23
24 def apply_async(self, func, *args, **kwargs):
25 callback = kwargs.pop('callback', None)
26 kwargs['pure'] = False
27 future = self.executor.submit(func, *args, **kwargs)
28 self.futures.add(future)
29
30 @gen.coroutine
31 def callback_wrapper():
32 result = yield _wait([future])
33 self.futures.remove(future)
34 callback(result) # gets called in separate thread
35
36 self.executor.loop.add_callback(callback_wrapper)
37
38 future.get = future.result # monkey patch to achieve AsyncResult API
39 return future
40
41 def abort_everything(self, ensure_ready=True):
42 # Tell the executor to cancel any task submitted via this instance
43 # as joblib.Parallel will never access those results.
44 self.executor.cancel(self.futures)
45 self.futures.clear()
46
47
48 register_parallel_backend('distributed', DistributedBackend)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/joblib.py b/distributed/joblib.py
--- a/distributed/joblib.py
+++ b/distributed/joblib.py
@@ -1,10 +1,36 @@
from __future__ import print_function, division, absolute_import
-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin
-from joblib.parallel import register_parallel_backend
+from distutils.version import LooseVersion
+
from tornado import gen
from .executor import Executor, _wait
+from .utils import ignoring
+
+
+# A user could have installed joblib, sklearn, both, or neither. Further, only
+# joblib >= 0.10.0 supports backends, so we also need to check for that. This
+# bit of logic is to ensure that we create and register the backend for all
+# viable installations of joblib.
+joblib = sk_joblib = None
+with ignoring(ImportError):
+ import joblib
+ if LooseVersion(joblib.__version__) < '0.10.0':
+ joblib = None
+with ignoring(ImportError):
+ import sklearn.externals.joblib as sk_joblib
+ if LooseVersion(sk_joblib.__version__) < '0.10.0':
+ sk_joblib = None
+
+if joblib:
+ from joblib._parallel_backends import (ParallelBackendBase,
+ AutoBatchingMixin)
+elif sk_joblib:
+ from sklearn.externals.joblib._parallel_backends import (
+ ParallelBackendBase, AutoBatchingMixin)
+else:
+ raise RuntimeError("Joblib backend requires either `joblib` >= '0.10.0' "
+ " or `sklearn` > '0.17.1'. Please install or upgrade")
class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
@@ -45,4 +71,8 @@
self.futures.clear()
-register_parallel_backend('distributed', DistributedBackend)
+# Register the backend with any available versions of joblib
+if joblib:
+ joblib.register_parallel_backend('distributed', DistributedBackend)
+if sk_joblib:
+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)
| {"golden_diff": "diff --git a/distributed/joblib.py b/distributed/joblib.py\n--- a/distributed/joblib.py\n+++ b/distributed/joblib.py\n@@ -1,10 +1,36 @@\n from __future__ import print_function, division, absolute_import\n \n-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\n-from joblib.parallel import register_parallel_backend\n+from distutils.version import LooseVersion\n+\n from tornado import gen\n \n from .executor import Executor, _wait\n+from .utils import ignoring\n+\n+\n+# A user could have installed joblib, sklearn, both, or neither. Further, only\n+# joblib >= 0.10.0 supports backends, so we also need to check for that. This\n+# bit of logic is to ensure that we create and register the backend for all\n+# viable installations of joblib.\n+joblib = sk_joblib = None\n+with ignoring(ImportError):\n+ import joblib\n+ if LooseVersion(joblib.__version__) < '0.10.0':\n+ joblib = None\n+with ignoring(ImportError):\n+ import sklearn.externals.joblib as sk_joblib\n+ if LooseVersion(sk_joblib.__version__) < '0.10.0':\n+ sk_joblib = None\n+\n+if joblib:\n+ from joblib._parallel_backends import (ParallelBackendBase,\n+ AutoBatchingMixin)\n+elif sk_joblib:\n+ from sklearn.externals.joblib._parallel_backends import (\n+ ParallelBackendBase, AutoBatchingMixin)\n+else:\n+ raise RuntimeError(\"Joblib backend requires either `joblib` >= '0.10.0' \"\n+ \" or `sklearn` > '0.17.1'. Please install or upgrade\")\n \n \n class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n@@ -45,4 +71,8 @@\n self.futures.clear()\n \n \n-register_parallel_backend('distributed', DistributedBackend)\n+# Register the backend with any available versions of joblib\n+if joblib:\n+ joblib.register_parallel_backend('distributed', DistributedBackend)\n+if sk_joblib:\n+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)\n", "issue": "Distributed.joblib code fails to affect sklearn\nThe comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.\n\nIt would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\nfrom joblib.parallel import register_parallel_backend\nfrom tornado import gen\n\nfrom .executor import Executor, _wait\n\n\nclass DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n MIN_IDEAL_BATCH_DURATION = 0.2\n MAX_IDEAL_BATCH_DURATION = 1.0\n\n def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):\n self.executor = Executor(scheduler_host, loop=loop)\n self.futures = set()\n\n def configure(self, n_jobs=1, parallel=None, **backend_args):\n return self.effective_n_jobs(n_jobs)\n\n def effective_n_jobs(self, n_jobs=1):\n return sum(self.executor.ncores().values())\n\n def apply_async(self, func, *args, **kwargs):\n callback = kwargs.pop('callback', None)\n kwargs['pure'] = False\n future = self.executor.submit(func, *args, **kwargs)\n self.futures.add(future)\n\n @gen.coroutine\n def callback_wrapper():\n result = yield _wait([future])\n self.futures.remove(future)\n callback(result) # gets called in separate thread\n\n self.executor.loop.add_callback(callback_wrapper)\n\n future.get = future.result # monkey patch to achieve AsyncResult API\n return future\n\n def abort_everything(self, ensure_ready=True):\n # Tell the executor to cancel any task submitted via this instance\n # as joblib.Parallel will never access those results.\n self.executor.cancel(self.futures)\n self.futures.clear()\n\n\nregister_parallel_backend('distributed', DistributedBackend)\n", "path": "distributed/joblib.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom distutils.version import LooseVersion\n\nfrom tornado import gen\n\nfrom .executor import Executor, _wait\nfrom .utils import ignoring\n\n\n# A user could have installed joblib, sklearn, both, or neither. Further, only\n# joblib >= 0.10.0 supports backends, so we also need to check for that. This\n# bit of logic is to ensure that we create and register the backend for all\n# viable installations of joblib.\njoblib = sk_joblib = None\nwith ignoring(ImportError):\n import joblib\n if LooseVersion(joblib.__version__) < '0.10.0':\n joblib = None\nwith ignoring(ImportError):\n import sklearn.externals.joblib as sk_joblib\n if LooseVersion(sk_joblib.__version__) < '0.10.0':\n sk_joblib = None\n\nif joblib:\n from joblib._parallel_backends import (ParallelBackendBase,\n AutoBatchingMixin)\nelif sk_joblib:\n from sklearn.externals.joblib._parallel_backends import (\n ParallelBackendBase, AutoBatchingMixin)\nelse:\n raise RuntimeError(\"Joblib backend requires either `joblib` >= '0.10.0' \"\n \" or `sklearn` > '0.17.1'. Please install or upgrade\")\n\n\nclass DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n MIN_IDEAL_BATCH_DURATION = 0.2\n MAX_IDEAL_BATCH_DURATION = 1.0\n\n def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):\n self.executor = Executor(scheduler_host, loop=loop)\n self.futures = set()\n\n def configure(self, n_jobs=1, parallel=None, **backend_args):\n return self.effective_n_jobs(n_jobs)\n\n def effective_n_jobs(self, n_jobs=1):\n return sum(self.executor.ncores().values())\n\n def apply_async(self, func, *args, **kwargs):\n callback = kwargs.pop('callback', None)\n kwargs['pure'] = False\n future = self.executor.submit(func, *args, **kwargs)\n self.futures.add(future)\n\n @gen.coroutine\n def callback_wrapper():\n result = yield _wait([future])\n self.futures.remove(future)\n callback(result) # gets called in separate thread\n\n self.executor.loop.add_callback(callback_wrapper)\n\n future.get = future.result # monkey patch to achieve AsyncResult API\n return future\n\n def abort_everything(self, ensure_ready=True):\n # Tell the executor to cancel any task submitted via this instance\n # as joblib.Parallel will never access those results.\n self.executor.cancel(self.futures)\n self.futures.clear()\n\n\n# Register the backend with any available versions of joblib\nif joblib:\n joblib.register_parallel_backend('distributed', DistributedBackend)\nif sk_joblib:\n sk_joblib.register_parallel_backend('distributed', DistributedBackend)\n", "path": "distributed/joblib.py"}]} | 887 | 483 |
gh_patches_debug_60682 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-2076 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Login page: change spacing on left panel
The spacing in the left panel is odd. Change to something like the below:

Note, this will stay in backlog for now as we may want to revise this page to align with the Frog design.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/version.py`
Content:
```
1 hdx_version = 'v0.5.13'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.13'
+hdx_version = 'v0.5.15'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.13'\n+hdx_version = 'v0.5.15'\n", "issue": "Login page: change spacing on left panel \nThe spacing in the left panel is odd. Change to something like the below: \n\n\n\nNote, this will stay in backlog for now as we may want to revise this page to align with the Frog design.\n\n", "before_files": [{"content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}], "after_files": [{"content": "hdx_version = 'v0.5.15'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 396 | 108 |
gh_patches_debug_10301 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1157 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update /availability API to pull from new registry
### Issue Description
The [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.
### Acceptance Criteria
- [x] The checkDomain method is used to check that a domain is in the registry or not
and
- [x] The check is used at /availability
- [x] Tests are implemented and/or updated
- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well.
### Additional Context (optional)
Once done, the backend work of #476 and frontend work of #561 can be completed.
_Consider add-on to defend the endpoint from bot spam._
### Implementation Notes
Epp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.
### Issue Links
Blocking #476 and #561
Blocked by #1028
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/api/views.py`
Content:
```
1 """Internal API views"""
2 from django.apps import apps
3 from django.views.decorators.http import require_http_methods
4 from django.http import JsonResponse
5
6 from django.contrib.auth.decorators import login_required
7
8 import requests
9
10 from cachetools.func import ttl_cache
11
12
13 DOMAIN_FILE_URL = (
14 "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv"
15 )
16
17
18 DOMAIN_API_MESSAGES = {
19 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”"
20 " For example, if you want www.city.gov, you would enter “city”"
21 " (without the quotes).",
22 "extra_dots": "Enter the .gov domain you want without any periods.",
23 "unavailable": "That domain isn’t available. Try entering another one."
24 " Contact us if you need help coming up with a domain.",
25 "invalid": "Enter a domain using only letters,"
26 " numbers, or hyphens (though we don't recommend using hyphens).",
27 "success": "That domain is available!",
28 }
29
30
31 # this file doesn't change that often, nor is it that big, so cache the result
32 # in memory for ten minutes
33 @ttl_cache(ttl=600)
34 def _domains():
35 """Return a list of the current .gov domains.
36
37 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,
38 lowercase everything and return the list.
39 """
40 DraftDomain = apps.get_model("registrar.DraftDomain")
41 # 5 second timeout
42 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text
43 domains = set()
44 # skip the first line
45 for line in file_contents.splitlines()[1:]:
46 # get the domain before the first comma
47 domain = line.split(",", 1)[0]
48 # sanity-check the string we got from the file here
49 if DraftDomain.string_could_be_domain(domain):
50 # lowercase everything when we put it in domains
51 domains.add(domain.lower())
52 return domains
53
54
55 def in_domains(domain):
56 """Return true if the given domain is in the domains list.
57
58 The given domain is lowercased to match against the domains list. If the
59 given domain doesn't end with .gov, ".gov" is added when looking for
60 a match.
61 """
62 domain = domain.lower()
63 if domain.endswith(".gov"):
64 return domain.lower() in _domains()
65 else:
66 # domain search string doesn't end with .gov, add it on here
67 return (domain + ".gov") in _domains()
68
69
70 @require_http_methods(["GET"])
71 @login_required
72 def available(request, domain=""):
73 """Is a given domain available or not.
74
75 Response is a JSON dictionary with the key "available" and value true or
76 false.
77 """
78 DraftDomain = apps.get_model("registrar.DraftDomain")
79 # validate that the given domain could be a domain name and fail early if
80 # not.
81 if not (
82 DraftDomain.string_could_be_domain(domain)
83 or DraftDomain.string_could_be_domain(domain + ".gov")
84 ):
85 return JsonResponse(
86 {"available": False, "message": DOMAIN_API_MESSAGES["invalid"]}
87 )
88 # a domain is available if it is NOT in the list of current domains
89 if in_domains(domain):
90 return JsonResponse(
91 {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]}
92 )
93 else:
94 return JsonResponse(
95 {"available": True, "message": DOMAIN_API_MESSAGES["success"]}
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/api/views.py b/src/api/views.py
--- a/src/api/views.py
+++ b/src/api/views.py
@@ -59,12 +59,12 @@
given domain doesn't end with .gov, ".gov" is added when looking for
a match.
"""
- domain = domain.lower()
+ Domain = apps.get_model("registrar.Domain")
if domain.endswith(".gov"):
- return domain.lower() in _domains()
+ return Domain.available(domain)
else:
# domain search string doesn't end with .gov, add it on here
- return (domain + ".gov") in _domains()
+ return Domain.available(domain + ".gov")
@require_http_methods(["GET"])
| {"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -59,12 +59,12 @@\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n- domain = domain.lower()\n+ Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n- return domain.lower() in _domains()\n+ return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n- return (domain + \".gov\") in _domains()\n+ return Domain.available(domain + \".gov\")\n \n \n @require_http_methods([\"GET\"])\n", "issue": "Update /availability API to pull from new registry\n### Issue Description\r\n\r\nThe [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] The checkDomain method is used to check that a domain is in the registry or not\r\n\r\nand\r\n\r\n- [x] The check is used at /availability \r\n- [x] Tests are implemented and/or updated\r\n- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well. \r\n\r\n### Additional Context (optional)\r\n\r\nOnce done, the backend work of #476 and frontend work of #561 can be completed.\r\n\r\n_Consider add-on to defend the endpoint from bot spam._\r\n\r\n### Implementation Notes\r\n\r\nEpp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.\r\n\r\n### Issue Links\r\n\r\nBlocking #476 and #561\r\nBlocked by #1028 \n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n domain = domain.lower()\n if domain.endswith(\".gov\"):\n return domain.lower() in _domains()\n else:\n # domain search string doesn't end with .gov, add it on here\n return (domain + \".gov\") in _domains()\n\n\n@require_http_methods([\"GET\"])\n@login_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}], "after_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n\n\n@require_http_methods([\"GET\"])\n@login_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}]} | 1,560 | 161 |
gh_patches_debug_7477 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)
[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)
**Describe the solution you'd like**
- [x] power control
- [x] brightness control
- [x] CCT control
- [x] remove color control

Maybee because the board is also color capable

**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**
[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)
**Additional context**
https://zigbee.blakadder.com/Tuya_TS0502A.html
Touchlink resetable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/lidl/cct.py`
Content:
```
1 """Quirk for LIDL CCT bulb."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 from zigpy.zcl.clusters.general import (
5 Basic,
6 GreenPowerProxy,
7 Groups,
8 Identify,
9 LevelControl,
10 OnOff,
11 Ota,
12 Scenes,
13 Time,
14 )
15 from zigpy.zcl.clusters.lighting import Color
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from zhaquirks.const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27
28 class LidlCCTColorCluster(CustomCluster, Color):
29 """Lidl CCT Lighting custom cluster."""
30
31 # Remove RGB color wheel for CCT Lighting: only expose color temperature
32 # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)
33 _CONSTANT_ATTRIBUTES = {0x400A: 16}
34
35
36 class CCTLight(CustomDevice):
37 """Lidl CCT Lighting device."""
38
39 signature = {
40 MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")],
41 ENDPOINTS: {
42 1: {
43 # <SimpleDescriptor endpoint=1 profile=260 device_type=268
44 # device_version=1
45 # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
46 # output_clusters=[10, 25]
47 PROFILE_ID: zha.PROFILE_ID,
48 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
49 INPUT_CLUSTERS: [
50 Basic.cluster_id,
51 Identify.cluster_id,
52 Groups.cluster_id,
53 Scenes.cluster_id,
54 OnOff.cluster_id,
55 LevelControl.cluster_id,
56 Color.cluster_id,
57 LightLink.cluster_id,
58 ],
59 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
60 },
61 242: {
62 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97
63 # device_version=0
64 # input_clusters=[]
65 # output_clusters=[33]
66 PROFILE_ID: 41440,
67 DEVICE_TYPE: 97,
68 INPUT_CLUSTERS: [],
69 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
70 },
71 },
72 }
73
74 replacement = {
75 ENDPOINTS: {
76 1: {
77 PROFILE_ID: zha.PROFILE_ID,
78 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
79 INPUT_CLUSTERS: [
80 Basic.cluster_id,
81 Identify.cluster_id,
82 Groups.cluster_id,
83 Scenes.cluster_id,
84 OnOff.cluster_id,
85 LevelControl.cluster_id,
86 LidlCCTColorCluster,
87 LightLink.cluster_id,
88 ],
89 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
90 },
91 242: {
92 PROFILE_ID: 41440,
93 DEVICE_TYPE: 97,
94 INPUT_CLUSTERS: [],
95 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
96 },
97 }
98 }
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py
--- a/zhaquirks/lidl/cct.py
+++ b/zhaquirks/lidl/cct.py
@@ -37,7 +37,12 @@
"""Lidl CCT Lighting device."""
signature = {
- MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")],
+ MODELS_INFO: [
+ ("_TZ3000_49qchf10", "TS0502A"),
+ ("_TZ3000_oborybow", "TS0502A"),
+ ("_TZ3000_9evm3otq", "TS0502A"),
+ ("_TZ3000_rylaozuc", "TS0502A"),
+ ],
ENDPOINTS: {
1: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=268
| {"golden_diff": "diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py\n--- a/zhaquirks/lidl/cct.py\n+++ b/zhaquirks/lidl/cct.py\n@@ -37,7 +37,12 @@\n \"\"\"Lidl CCT Lighting device.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n+ MODELS_INFO: [\n+ (\"_TZ3000_49qchf10\", \"TS0502A\"),\n+ (\"_TZ3000_oborybow\", \"TS0502A\"),\n+ (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n+ (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n+ ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n", "issue": "[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)\n[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)\r\n\r\n**Describe the solution you'd like**\r\n- [x] power control\r\n- [x] brightness control\r\n- [x] CCT control\r\n- [x] remove color control\r\n\r\n\r\n\r\nMaybee because the board is also color capable\r\n\r\n\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)\r\n\r\n\r\n**Additional context**\r\nhttps://zigbee.blakadder.com/Tuya_TS0502A.html\r\nTouchlink resetable\n", "before_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}], "after_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}]} | 1,563 | 260 |
gh_patches_debug_57081 | rasdani/github-patches | git_diff | SeldonIO__MLServer-945 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MLServer is incompatible with latest release of FastAPI
MLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.
```
2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop
2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.
Traceback (most recent call last):
File "/home/cc/miniconda3/envs/central-1/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 79, in main
root()
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 20, in wrapper
return asyncio.run(f(*args, **kwargs))
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 43, in start
server = MLServer(settings)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py", line 71, in __init__
self._rest_server = RESTServer(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py", line 26, in __init__
self._app = create_app(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py", line 43, in create_app
APIRoute(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py", line 400, in __init__
self.response_field = create_response_field(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py", line 90, in create_response_field
raise fastapi.exceptions.FastAPIError(
fastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 packages=find_packages(exclude=["tests", "tests.*"]),
42 install_requires=[
43 "click",
44 "fastapi<=0.88.0",
45 "python-dotenv",
46 "grpcio",
47 "importlib-metadata;python_version<'3.8'",
48 "numpy",
49 "pandas",
50 "protobuf",
51 "uvicorn",
52 "starlette_exporter",
53 "py-grpc-prometheus",
54 "uvloop;" + env_marker_cpython,
55 "aiokafka",
56 "tritonclient[http]>=2.24",
57 "aiofiles",
58 "orjson",
59 ],
60 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
61 long_description=_load_description(),
62 long_description_content_type="text/markdown",
63 license="Apache 2.0",
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,8 @@
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=[
"click",
- "fastapi<=0.88.0",
+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
+ "fastapi<=0.89.1, !=0.89.0",
"python-dotenv",
"grpcio",
"importlib-metadata;python_version<'3.8'",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,8 @@\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n- \"fastapi<=0.88.0\",\n+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n+ \"fastapi<=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n", "issue": "MLServer is incompatible with latest release of FastAPI\nMLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.\r\n```\r\n2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop\r\n2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.\r\nTraceback (most recent call last):\r\n File \"/home/cc/miniconda3/envs/central-1/bin/mlserver\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 79, in main\r\n root()\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 20, in wrapper\r\n return asyncio.run(f(*args, **kwargs))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"uvloop/loop.pyx\", line 1517, in uvloop.loop.Loop.run_until_complete\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 43, in start\r\n server = MLServer(settings)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py\", line 71, in __init__\r\n self._rest_server = RESTServer(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py\", line 26, in __init__\r\n self._app = create_app(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py\", line 43, in create_app\r\n APIRoute(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py\", line 400, in __init__\r\n self.response_field = create_response_field(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py\", line 90, in create_response_field\r\n raise fastapi.exceptions.FastAPIError(\r\nfastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type\r\n```\n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n \"fastapi<=0.88.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi<=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} | 1,737 | 138 |
gh_patches_debug_40195 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use strategy 0 for user-provided gradients in minuit
# Description
since we have exact gradient we can disable the checks minuit does
cc @alexander-held
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/optimize/opt_minuit.py`
Content:
```
1 """Minuit Optimizer Class."""
2 from .. import default_backend, exceptions
3 from .mixins import OptimizerMixin
4 import scipy
5 import iminuit
6
7
8 class minuit_optimizer(OptimizerMixin):
9 """
10 Optimizer that uses iminuit.Minuit.migrad.
11 """
12
13 __slots__ = ['name', 'errordef', 'steps']
14
15 def __init__(self, *args, **kwargs):
16 """
17 Create MINUIT Optimizer.
18
19 .. note::
20
21 ``errordef`` should be 1.0 for a least-squares cost function and 0.5
22 for negative log-likelihood function. See page 37 of
23 http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes
24 called ``UP`` in the ``MINUIT`` docs.
25
26
27 Args:
28 errordef (:obj:`float`): See minuit docs. Default is 1.0.
29 steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
30 """
31 self.name = 'minuit'
32 self.errordef = kwargs.pop('errordef', 1)
33 self.steps = kwargs.pop('steps', 1000)
34 super().__init__(*args, **kwargs)
35
36 def _get_minimizer(
37 self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False
38 ):
39
40 step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]
41 fixed_vals = fixed_vals or []
42 # Minuit wants True/False for each parameter
43 fixed_bools = [False] * len(init_pars)
44 for index, val in fixed_vals:
45 fixed_bools[index] = True
46 init_pars[index] = val
47 step_sizes[index] = 0.0
48
49 # Minuit requires jac=callable
50 if do_grad:
51 wrapped_objective = lambda pars: objective_and_grad(pars)[0]
52 jac = lambda pars: objective_and_grad(pars)[1]
53 else:
54 wrapped_objective = objective_and_grad
55 jac = None
56
57 kwargs = dict(
58 fcn=wrapped_objective,
59 grad=jac,
60 start=init_pars,
61 error=step_sizes,
62 limit=init_bounds,
63 fix=fixed_bools,
64 print_level=self.verbose,
65 errordef=self.errordef,
66 )
67 return iminuit.Minuit.from_array_func(**kwargs)
68
69 def _minimize(
70 self,
71 minimizer,
72 func,
73 x0,
74 do_grad=False,
75 bounds=None,
76 fixed_vals=None,
77 return_uncertainties=False,
78 options={},
79 ):
80
81 """
82 Same signature as :func:`scipy.optimize.minimize`.
83
84 Note: an additional `minuit` is injected into the fitresult to get the
85 underlying minimizer.
86
87 Minimizer Options:
88 maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
89 return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
90
91 Returns:
92 fitresult (scipy.optimize.OptimizeResult): the fit result
93 """
94 maxiter = options.pop('maxiter', self.maxiter)
95 return_uncertainties = options.pop('return_uncertainties', False)
96 if options:
97 raise exceptions.Unsupported(
98 f"Unsupported options were passed in: {list(options.keys())}."
99 )
100
101 minimizer.migrad(ncall=maxiter)
102 # Following lines below come from:
103 # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
104 message = "Optimization terminated successfully."
105 if not minimizer.valid:
106 message = "Optimization failed."
107 fmin = minimizer.fmin
108 if fmin.has_reached_call_limit:
109 message += " Call limit was reached."
110 if fmin.is_above_max_edm:
111 message += " Estimated distance to minimum too large."
112
113 n = len(x0)
114 hess_inv = default_backend.ones((n, n))
115 if minimizer.valid:
116 hess_inv = minimizer.np_covariance()
117
118 unc = None
119 if return_uncertainties:
120 unc = minimizer.np_errors()
121
122 return scipy.optimize.OptimizeResult(
123 x=minimizer.np_values(),
124 unc=unc,
125 success=minimizer.valid,
126 fun=minimizer.fval,
127 hess_inv=hess_inv,
128 message=message,
129 nfev=minimizer.ncalls,
130 njev=minimizer.ngrads,
131 minuit=minimizer,
132 )
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -10,7 +10,7 @@
Optimizer that uses iminuit.Minuit.migrad.
"""
- __slots__ = ['name', 'errordef', 'steps']
+ __slots__ = ['name', 'errordef', 'steps', 'strategy']
def __init__(self, *args, **kwargs):
"""
@@ -27,10 +27,12 @@
Args:
errordef (:obj:`float`): See minuit docs. Default is 1.0.
steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.
"""
self.name = 'minuit'
self.errordef = kwargs.pop('errordef', 1)
self.steps = kwargs.pop('steps', 1000)
+ self.strategy = kwargs.pop('strategy', None)
super().__init__(*args, **kwargs)
def _get_minimizer(
@@ -87,17 +89,24 @@
Minimizer Options:
maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.
Returns:
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
return_uncertainties = options.pop('return_uncertainties', False)
+ # 0: Fast, user-provided gradient
+ # 1: Default, no user-provided gradient
+ strategy = options.pop(
+ 'strategy', self.strategy if self.strategy else not do_grad
+ )
if options:
raise exceptions.Unsupported(
f"Unsupported options were passed in: {list(options.keys())}."
)
+ minimizer.strategy = strategy
minimizer.migrad(ncall=maxiter)
# Following lines below come from:
# https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
@@ -113,6 +122,8 @@
n = len(x0)
hess_inv = default_backend.ones((n, n))
if minimizer.valid:
+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.
+ minimizer.hesse()
hess_inv = minimizer.np_covariance()
unc = None
| {"golden_diff": "diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py\n--- a/src/pyhf/optimize/opt_minuit.py\n+++ b/src/pyhf/optimize/opt_minuit.py\n@@ -10,7 +10,7 @@\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n \n- __slots__ = ['name', 'errordef', 'steps']\n+ __slots__ = ['name', 'errordef', 'steps', 'strategy']\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n@@ -27,10 +27,12 @@\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n+ self.strategy = kwargs.pop('strategy', None)\n super().__init__(*args, **kwargs)\n \n def _get_minimizer(\n@@ -87,17 +89,24 @@\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n \n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n+ # 0: Fast, user-provided gradient\n+ # 1: Default, no user-provided gradient\n+ strategy = options.pop(\n+ 'strategy', self.strategy if self.strategy else not do_grad\n+ )\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n \n+ minimizer.strategy = strategy\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n@@ -113,6 +122,8 @@\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n+ minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n \n unc = None\n", "issue": "use strategy 0 for user-provided gradients in minuit\n# Description\r\n\r\nsince we have exact gradient we can disable the checks minuit does\r\n\r\ncc @alexander-held \n", "before_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0]\n jac = lambda pars: objective_and_grad(pars)[1]\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}], "after_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps', 'strategy']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n self.strategy = kwargs.pop('strategy', None)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0]\n jac = lambda pars: objective_and_grad(pars)[1]\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n # 0: Fast, user-provided gradient\n # 1: Default, no user-provided gradient\n strategy = options.pop(\n 'strategy', self.strategy if self.strategy else not do_grad\n )\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.strategy = strategy\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}]} | 1,643 | 706 |
gh_patches_debug_26193 | rasdani/github-patches | git_diff | python-discord__site-1165 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support rescheduling of offensive messages
When a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.
When this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.
We should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).
However, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/apps/api/viewsets/bot/offensive_message.py`
Content:
```
1 from rest_framework.mixins import (
2 CreateModelMixin,
3 DestroyModelMixin,
4 ListModelMixin
5 )
6 from rest_framework.viewsets import GenericViewSet
7
8 from pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage
9 from pydis_site.apps.api.serializers import OffensiveMessageSerializer
10
11
12 class OffensiveMessageViewSet(
13 CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
14 ):
15 """
16 View providing CRUD access to offensive messages.
17
18 ## Routes
19 ### GET /bot/offensive-messages
20 Returns all offensive messages in the database.
21
22 #### Response format
23 >>> [
24 ... {
25 ... 'id': '631953598091100200',
26 ... 'channel_id': '291284109232308226',
27 ... 'delete_date': '2019-11-01T21:51:15.545000Z'
28 ... },
29 ... ...
30 ... ]
31
32 #### Status codes
33 - 200: returned on success
34
35 ### POST /bot/offensive-messages
36 Create a new offensive message object.
37
38 #### Request body
39 >>> {
40 ... 'id': int,
41 ... 'channel_id': int,
42 ... 'delete_date': datetime.datetime # ISO-8601-formatted date
43 ... }
44
45 #### Status codes
46 - 201: returned on success
47 - 400: if the body format is invalid
48
49 ### DELETE /bot/offensive-messages/<id:int>
50 Delete the offensive message object with the given `id`.
51
52 #### Status codes
53 - 204: returned on success
54 - 404: if a offensive message object with the given `id` does not exist
55
56 ## Authentication
57 Requires an API token.
58 """
59
60 serializer_class = OffensiveMessageSerializer
61 queryset = OffensiveMessage.objects.all()
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py
--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py
+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py
@@ -1,6 +1,7 @@
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
+ UpdateModelMixin,
ListModelMixin
)
from rest_framework.viewsets import GenericViewSet
@@ -10,7 +11,7 @@
class OffensiveMessageViewSet(
- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet
):
"""
View providing CRUD access to offensive messages.
@@ -46,6 +47,16 @@
- 201: returned on success
- 400: if the body format is invalid
+ ### PATCH /bot/offensive-messages/<id:int>
+ Perform a partial update of the offensive message with the given `id`.
+ Intended to allow rescheduling the deletion date in case the bot's attempt
+ to delete the message failed due to another error than the message already
+ being deleted.
+
+ #### Status codes
+ - 200: returned on success
+ - 404: if a offensive message object with the given `id` does not exist
+
### DELETE /bot/offensive-messages/<id:int>
Delete the offensive message object with the given `id`.
| {"golden_diff": "diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py\n+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n@@ -1,6 +1,7 @@\n from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n+ UpdateModelMixin,\n ListModelMixin\n )\n from rest_framework.viewsets import GenericViewSet\n@@ -10,7 +11,7 @@\n \n \n class OffensiveMessageViewSet(\n- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet\n ):\n \"\"\"\n View providing CRUD access to offensive messages.\n@@ -46,6 +47,16 @@\n - 201: returned on success\n - 400: if the body format is invalid\n \n+ ### PATCH /bot/offensive-messages/<id:int>\n+ Perform a partial update of the offensive message with the given `id`.\n+ Intended to allow rescheduling the deletion date in case the bot's attempt\n+ to delete the message failed due to another error than the message already\n+ being deleted.\n+\n+ #### Status codes\n+ - 200: returned on success\n+ - 404: if a offensive message object with the given `id` does not exist\n+\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n", "issue": "Support rescheduling of offensive messages\nWhen a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.\r\n\r\nWhen this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.\r\n\r\nWe should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).\r\n\r\nHowever, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.\n", "before_files": [{"content": "from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n ListModelMixin\n)\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage\nfrom pydis_site.apps.api.serializers import OffensiveMessageSerializer\n\n\nclass OffensiveMessageViewSet(\n CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n):\n \"\"\"\n View providing CRUD access to offensive messages.\n\n ## Routes\n ### GET /bot/offensive-messages\n Returns all offensive messages in the database.\n\n #### Response format\n >>> [\n ... {\n ... 'id': '631953598091100200',\n ... 'channel_id': '291284109232308226',\n ... 'delete_date': '2019-11-01T21:51:15.545000Z'\n ... },\n ... ...\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### POST /bot/offensive-messages\n Create a new offensive message object.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'channel_id': int,\n ... 'delete_date': datetime.datetime # ISO-8601-formatted date\n ... }\n\n #### Status codes\n - 201: returned on success\n - 400: if the body format is invalid\n\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ## Authentication\n Requires an API token.\n \"\"\"\n\n serializer_class = OffensiveMessageSerializer\n queryset = OffensiveMessage.objects.all()\n", "path": "pydis_site/apps/api/viewsets/bot/offensive_message.py"}], "after_files": [{"content": "from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n UpdateModelMixin,\n ListModelMixin\n)\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage\nfrom pydis_site.apps.api.serializers import OffensiveMessageSerializer\n\n\nclass OffensiveMessageViewSet(\n CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet\n):\n \"\"\"\n View providing CRUD access to offensive messages.\n\n ## Routes\n ### GET /bot/offensive-messages\n Returns all offensive messages in the database.\n\n #### Response format\n >>> [\n ... {\n ... 'id': '631953598091100200',\n ... 'channel_id': '291284109232308226',\n ... 'delete_date': '2019-11-01T21:51:15.545000Z'\n ... },\n ... ...\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### POST /bot/offensive-messages\n Create a new offensive message object.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'channel_id': int,\n ... 'delete_date': datetime.datetime # ISO-8601-formatted date\n ... }\n\n #### Status codes\n - 201: returned on success\n - 400: if the body format is invalid\n\n ### PATCH /bot/offensive-messages/<id:int>\n Perform a partial update of the offensive message with the given `id`.\n Intended to allow rescheduling the deletion date in case the bot's attempt\n to delete the message failed due to another error than the message already\n being deleted.\n\n #### Status codes\n - 200: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ## Authentication\n Requires an API token.\n \"\"\"\n\n serializer_class = OffensiveMessageSerializer\n queryset = OffensiveMessage.objects.all()\n", "path": "pydis_site/apps/api/viewsets/bot/offensive_message.py"}]} | 1,006 | 353 |
gh_patches_debug_16293 | rasdani/github-patches | git_diff | nf-core__tools-1261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linting does not recognize README Nextflow minimum version mention in Quick Start
When running `nf-core lint` with a readme file that has the following in it:
```
## Quick Start
1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)
```
A warning is triggered:
```
readme: README did not have a Nextflow minimum version mentioned in Quick Start section.
```
This warning should not be triggering as the minimum nextflow version is in the readme file.
Link to code location
https://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/lint/readme.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import re
5
6
7 def readme(self):
8 """Repository ``README.md`` tests
9
10 The ``README.md`` files for a project are very important and must meet some requirements:
11
12 * Nextflow badge
13
14 * If no Nextflow badge is found, a warning is given
15 * If a badge is found but the version doesn't match the minimum version in the config file, the test fails
16 * Example badge code:
17
18 .. code-block:: md
19
20 [](https://www.nextflow.io/)
21
22 * Bioconda badge
23
24 * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required
25 * Required badge code:
26
27 .. code-block:: md
28
29 [](https://bioconda.github.io/)
30
31 .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.
32 """
33 passed = []
34 warned = []
35 failed = []
36
37 with open(os.path.join(self.wf_path, "README.md"), "r") as fh:
38 content = fh.read()
39
40 # Check that there is a readme badge showing the minimum required version of Nextflow
41 # [](https://www.nextflow.io/)
42 # and that it has the correct version
43 nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\?labelColor=000000\)\]\(https://www\.nextflow\.io/\)"
44 match = re.search(nf_badge_re, content)
45 if match:
46 nf_badge_version = match.group(1).strip("'\"")
47 try:
48 assert nf_badge_version == self.minNextflowVersion
49 except (AssertionError, KeyError):
50 failed.append(
51 "README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`".format(
52 nf_badge_version, self.minNextflowVersion
53 )
54 )
55 else:
56 passed.append(
57 "README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`".format(
58 nf_badge_version, self.minNextflowVersion
59 )
60 )
61 else:
62 warned.append("README did not have a Nextflow minimum version badge.")
63
64 # Check that the minimum version mentioned in the quick start section is consistent
65 # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)"
66 nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
67 match = re.search(nf_version_re, content)
68 if match:
69 nf_quickstart_version = match.group(1)
70 try:
71 assert nf_quickstart_version == self.minNextflowVersion
72 except (AssertionError, KeyError):
73 failed.append(
74 f"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`"
75 )
76 else:
77 passed.append(
78 f"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`"
79 )
80 else:
81 warned.append("README did not have a Nextflow minimum version mentioned in Quick Start section.")
82
83 return {"passed": passed, "warned": warned, "failed": failed}
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py
--- a/nf_core/lint/readme.py
+++ b/nf_core/lint/readme.py
@@ -62,8 +62,8 @@
warned.append("README did not have a Nextflow minimum version badge.")
# Check that the minimum version mentioned in the quick start section is consistent
- # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)"
- nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
+ # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)"
+ nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
match = re.search(nf_version_re, content)
if match:
nf_quickstart_version = match.group(1)
| {"golden_diff": "diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py\n--- a/nf_core/lint/readme.py\n+++ b/nf_core/lint/readme.py\n@@ -62,8 +62,8 @@\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n \n # Check that the minimum version mentioned in the quick start section is consistent\n- # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n- nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n+ # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\"\n+ nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n", "issue": "Linting does not recognize README Nextflow minimum version mention in Quick Start\nWhen running `nf-core lint` with a readme file that has the following in it:\r\n\r\n```\r\n## Quick Start\r\n\r\n1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r\n```\r\n\r\nA warning is triggered:\r\n\r\n```\r\nreadme: README did not have a Nextflow minimum version mentioned in Quick Start section.\r\n```\r\n\r\nThis warning should not be triggering as the minimum nextflow version is in the readme file.\r\n\r\nLink to code location\r\n\r\nhttps://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}]} | 1,545 | 291 |
gh_patches_debug_50355 | rasdani/github-patches | git_diff | pypi__warehouse-6747 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Brazilian Portugese to localization footer
https://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.
@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/i18n/__init__.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from babel.core import Locale
14 from pyramid.i18n import TranslationStringFactory, default_locale_negotiator
15 from pyramid.threadlocal import get_current_request
16
17 KNOWN_LOCALES = {"en": "English"}
18
19 LOCALE_ATTR = "_LOCALE_"
20
21 _translation_factory = TranslationStringFactory("messages")
22
23
24 class LazyString:
25 def __init__(self, fn, *args, **kwargs):
26 self.fn = fn
27 self.args = args
28 self.mapping = kwargs.get("mapping", {})
29 self.kwargs = kwargs
30
31 def __json__(self, request):
32 return str(self)
33
34 def __mod__(self, new_mapping):
35 mapping = self.mapping.copy()
36 mapping.update(new_mapping)
37 return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)
38
39 def __str__(self):
40 return self.fn(*self.args, **self.kwargs)
41
42
43 def _locale(request):
44 """
45 Computes a babel.core:Locale() object for this request.
46 """
47 return Locale.parse(request.locale_name, sep="_")
48
49
50 def _negotiate_locale(request):
51 locale_name = getattr(request, LOCALE_ATTR, None)
52 if locale_name is not None:
53 return locale_name
54
55 locale_name = request.params.get(LOCALE_ATTR)
56 if locale_name is not None:
57 return locale_name
58
59 locale_name = request.cookies.get(LOCALE_ATTR)
60 if locale_name is not None:
61 return locale_name
62
63 if not request.accept_language:
64 return default_locale_negotiator(request)
65
66 return request.accept_language.best_match(
67 tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)
68 )
69
70
71 def localize(message, **kwargs):
72 def _localize(message, **kwargs):
73 request = get_current_request()
74 return request.localizer.translate(_translation_factory(message, **kwargs))
75
76 return LazyString(_localize, message, **kwargs)
77
78
79 def includeme(config):
80 # Add the request attributes
81 config.add_request_method(_locale, name="locale", reify=True)
82
83 # Register our translation directory.
84 config.add_translation_dirs("warehouse:locale/")
85
86 config.set_locale_negotiator(_negotiate_locale)
87
88 # Register our i18n/l10n filters for Jinja2
89 filters = config.get_settings().setdefault("jinja2.filters", {})
90 filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
91 filters.setdefault("format_datetime", "warehouse.i18n.filters:format_datetime")
92 filters.setdefault(
93 "format_rfc822_datetime", "warehouse.i18n.filters:format_rfc822_datetime"
94 )
95 filters.setdefault("format_number", "warehouse.i18n.filters:format_number")
96
97 jglobals = config.get_settings().setdefault("jinja2.globals", {})
98 jglobals.setdefault("KNOWN_LOCALES", "warehouse.i18n:KNOWN_LOCALES")
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py
--- a/warehouse/i18n/__init__.py
+++ b/warehouse/i18n/__init__.py
@@ -14,7 +14,7 @@
from pyramid.i18n import TranslationStringFactory, default_locale_negotiator
from pyramid.threadlocal import get_current_request
-KNOWN_LOCALES = {"en": "English"}
+KNOWN_LOCALES = {"en": "English", "pt_BR": "Portuguese (Brazil)"}
LOCALE_ATTR = "_LOCALE_"
| {"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -14,7 +14,7 @@\n from pyramid.i18n import TranslationStringFactory, default_locale_negotiator\n from pyramid.threadlocal import get_current_request\n \n-KNOWN_LOCALES = {\"en\": \"English\"}\n+KNOWN_LOCALES = {\"en\": \"English\", \"pt_BR\": \"Portuguese (Brazil)\"}\n \n LOCALE_ATTR = \"_LOCALE_\"\n", "issue": "Add Brazilian Portugese to localization footer\nhttps://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.\r\n\r\n@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nKNOWN_LOCALES = {\"en\": \"English\"}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef localize(message, **kwargs):\n def _localize(message, **kwargs):\n request = get_current_request()\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n return LazyString(_localize, message, **kwargs)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n", "path": "warehouse/i18n/__init__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nKNOWN_LOCALES = {\"en\": \"English\", \"pt_BR\": \"Portuguese (Brazil)\"}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef localize(message, **kwargs):\n def _localize(message, **kwargs):\n request = get_current_request()\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n return LazyString(_localize, message, **kwargs)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n", "path": "warehouse/i18n/__init__.py"}]} | 1,299 | 137 |
gh_patches_debug_15882 | rasdani/github-patches | git_diff | beeware__toga-850 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Table rows not rendering correctly in `update_data` winforms
## Expected Behavior
View text in the rows
## Current Behavior
Rows are created but text is not displayed.

When I insert a new row (with insert button) is working ok:

In found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.
The specific line is the change made from this one:
```
item._impl = WinForms.ListViewItem([
str(getattr(item, attr)) for attr in self.interface._accessors
])
```
to this:
```
item._impl = WinForms.ListViewItem(self.row_data(item))
```
## Steps to reproduce
1. Open example and that's it
## Your Environment
* Python Version (list the specific version number)
3.7.7
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
- [ ] macOS - version:
- [ ] Linux - distro: - version:
- [x] Windows - version: 10 Pro
- [ ] Other - name: - version:
* Toga Version (list the specific version number or git hash)
Master branch of toga.
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [x] winforms
- [ ] win32
- [ ] Other (please specify)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/winforms/toga_winforms/widgets/table.py`
Content:
```
1 from travertino.size import at_least
2
3 from toga_winforms.libs import WinForms
4
5 from .base import Widget
6
7
8 class Table(Widget):
9 def create(self):
10 self._container = self
11 self.native = WinForms.ListView()
12 self.native.View = WinForms.View.Details
13
14 dataColumn = []
15 for heading in self.interface.headings:
16 col = WinForms.ColumnHeader()
17 col.Text = heading
18 dataColumn.append(col)
19
20 self.native.FullRowSelect = True
21 self.native.Multiselect = self.interface.multiple_select
22 self.native.DoubleBuffered = True
23 self.native.Columns.AddRange(dataColumn)
24
25 def change_source(self, source):
26 self.update_data()
27
28 def row_data(self, item):
29 # TODO: Winforms can't support icons in tree cells; so, if the data source
30 # specifies an icon, strip it when converting to row data.
31 def strip_icon(item, attr):
32 val = getattr(item, attr)
33 if isinstance(val, tuple):
34 return str(val[1])
35 return str(val)
36
37 return [item] + [
38 strip_icon(item, attr)
39 for attr in self.interface._accessors
40 ]
41
42 def update_data(self):
43 self.native.BeginUpdate()
44 self.native.Items.Clear()
45 items = []
46 for item in self.interface.data:
47 item._impl = WinForms.ListViewItem(self.row_data(item))
48 items.append(item._impl)
49 self.native.Items.AddRange(items)
50 self.native.EndUpdate()
51
52 def insert(self, index, item):
53 self.native.BeginUpdate()
54 item._impl = WinForms.ListViewItem([
55 str(getattr(item, attr)) for attr in self.interface._accessors
56 ])
57 self.native.Items.Insert(index, item._impl)
58 self.native.EndUpdate()
59
60 def change(self, item):
61 self.interface.factory.not_implemented('Table.change()')
62
63 def remove(self, item):
64 self.update_data()
65
66 def clear(self):
67 self.native.Items.Clear()
68
69 def set_on_select(self, handler):
70 self.interface.factory.not_implemented('Table.set_on_select()')
71
72 def scroll_to_row(self, row):
73 self.native.EnsureVisible(row)
74 self.interface.factory.not_implemented('Table.scroll_to_row()')
75
76 def rehint(self):
77 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
78 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py
--- a/src/winforms/toga_winforms/widgets/table.py
+++ b/src/winforms/toga_winforms/widgets/table.py
@@ -34,7 +34,7 @@
return str(val[1])
return str(val)
- return [item] + [
+ return [
strip_icon(item, attr)
for attr in self.interface._accessors
]
@@ -51,9 +51,7 @@
def insert(self, index, item):
self.native.BeginUpdate()
- item._impl = WinForms.ListViewItem([
- str(getattr(item, attr)) for attr in self.interface._accessors
- ])
+ item._impl = WinForms.ListViewItem(self.row_data(item))
self.native.Items.Insert(index, item._impl)
self.native.EndUpdate()
| {"golden_diff": "diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py\n--- a/src/winforms/toga_winforms/widgets/table.py\n+++ b/src/winforms/toga_winforms/widgets/table.py\n@@ -34,7 +34,7 @@\n return str(val[1])\n return str(val)\n \n- return [item] + [\n+ return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n@@ -51,9 +51,7 @@\n \n def insert(self, index, item):\n self.native.BeginUpdate()\n- item._impl = WinForms.ListViewItem([\n- str(getattr(item, attr)) for attr in self.interface._accessors\n- ])\n+ item._impl = WinForms.ListViewItem(self.row_data(item))\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n", "issue": "Table rows not rendering correctly in `update_data` winforms\n## Expected Behavior\r\nView text in the rows\r\n\r\n## Current Behavior\r\nRows are created but text is not displayed.\r\n\r\nWhen I insert a new row (with insert button) is working ok:\r\n\r\n\r\nIn found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.\r\n\r\nThe specific line is the change made from this one:\r\n```\r\nitem._impl = WinForms.ListViewItem([\r\n str(getattr(item, attr)) for attr in self.interface._accessors\r\n ])\r\n```\r\nto this:\r\n```\r\nitem._impl = WinForms.ListViewItem(self.row_data(item))\r\n```\r\n\r\n## Steps to reproduce\r\n1. Open example and that's it\r\n\r\n## Your Environment\r\n\r\n* Python Version (list the specific version number)\r\n3.7.7\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version:\r\n - [ ] Linux - distro: - version:\r\n - [x] Windows - version: 10 Pro\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Version (list the specific version number or git hash)\r\nMaster branch of toga.\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n\r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django\r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [x] winforms\r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n\n dataColumn = []\n for heading in self.interface.headings:\n col = WinForms.ColumnHeader()\n col.Text = heading\n dataColumn.append(col)\n\n self.native.FullRowSelect = True\n self.native.Multiselect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.Columns.AddRange(dataColumn)\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr)\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [item] + [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.BeginUpdate()\n self.native.Items.Clear()\n items = []\n for item in self.interface.data:\n item._impl = WinForms.ListViewItem(self.row_data(item))\n items.append(item._impl)\n self.native.Items.AddRange(items)\n self.native.EndUpdate()\n\n def insert(self, index, item):\n self.native.BeginUpdate()\n item._impl = WinForms.ListViewItem([\n str(getattr(item, attr)) for attr in self.interface._accessors\n ])\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.native.Items.Clear()\n\n def set_on_select(self, handler):\n self.interface.factory.not_implemented('Table.set_on_select()')\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n self.interface.factory.not_implemented('Table.scroll_to_row()')\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/winforms/toga_winforms/widgets/table.py"}], "after_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n\n dataColumn = []\n for heading in self.interface.headings:\n col = WinForms.ColumnHeader()\n col.Text = heading\n dataColumn.append(col)\n\n self.native.FullRowSelect = True\n self.native.Multiselect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.Columns.AddRange(dataColumn)\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr)\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.BeginUpdate()\n self.native.Items.Clear()\n items = []\n for item in self.interface.data:\n item._impl = WinForms.ListViewItem(self.row_data(item))\n items.append(item._impl)\n self.native.Items.AddRange(items)\n self.native.EndUpdate()\n\n def insert(self, index, item):\n self.native.BeginUpdate()\n item._impl = WinForms.ListViewItem(self.row_data(item))\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.native.Items.Clear()\n\n def set_on_select(self, handler):\n self.interface.factory.not_implemented('Table.set_on_select()')\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n self.interface.factory.not_implemented('Table.scroll_to_row()')\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/winforms/toga_winforms/widgets/table.py"}]} | 1,461 | 200 |
gh_patches_debug_9260 | rasdani/github-patches | git_diff | translate__pootle-6524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing source (with xliff) can create submissions with no submitter
# Steps to reproduce:
- change source in file of existing unit
- run update_stores
# Results
- submissions have no submitter
- stuff breaks
## Expected result:
- nothing breaks
this only happens in xliff afaict - as i dont think its possible to change source of units in this way with po
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_store/receivers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from hashlib import md5
10
11 from django.db.models.signals import post_save, pre_save
12 from django.dispatch import receiver
13 from django.utils.encoding import force_bytes
14
15 from pootle.core.delegate import lifecycle, uniqueid
16 from pootle.core.models import Revision
17 from pootle.core.signals import update_checks, update_data
18
19 from .constants import FUZZY, TRANSLATED, UNTRANSLATED
20 from .models import Suggestion, Unit, UnitChange, UnitSource
21
22
23 @receiver(post_save, sender=Suggestion)
24 def handle_suggestion_added(**kwargs):
25 created = kwargs.get("created")
26 if not created:
27 return
28 store = kwargs["instance"].unit.store
29 update_data.send(store.__class__, instance=store)
30
31
32 @receiver(post_save, sender=Suggestion)
33 def handle_suggestion_accepted(**kwargs):
34 created = kwargs.get("created")
35 suggestion = kwargs["instance"]
36 if created or not suggestion.is_accepted:
37 return
38 update_data.send(
39 suggestion.unit.store.__class__,
40 instance=suggestion.unit.store)
41
42
43 @receiver(pre_save, sender=UnitSource)
44 def handle_unit_source_pre_save(**kwargs):
45 unit_source = kwargs["instance"]
46 created = not unit_source.pk
47 unit = unit_source.unit
48 if created:
49 unit_source.creation_revision = unit.revision
50 if created or unit.source_updated:
51 unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()
52 unit_source.source_length = len(unit.source_f)
53 unit_source.source_wordcount = max(
54 1, (unit.counter.count_words(unit.source_f.strings) or 0))
55
56
57 @receiver(pre_save, sender=Unit)
58 def handle_unit_pre_save(**kwargs):
59 unit = kwargs["instance"]
60 auto_translated = False
61
62 if unit.source_updated:
63 # update source related fields
64 wc = unit.counter.count_words(unit.source_f.strings)
65 if not wc and not bool(filter(None, unit.target_f.strings)):
66 # auto-translate untranslated strings
67 unit.target = unit.source
68 unit.state = FUZZY
69 auto_translated = True
70 if unit.target_updated:
71 # update target related fields
72 unit.target_wordcount = unit.counter.count_words(
73 unit.target_f.strings)
74 unit.target_length = len(unit.target_f)
75 if filter(None, unit.target_f.strings):
76 if unit.state == UNTRANSLATED:
77 unit.state = TRANSLATED
78 else:
79 # if it was TRANSLATED then set to UNTRANSLATED
80 if unit.state > FUZZY:
81 unit.state = UNTRANSLATED
82
83 # Updating unit from the .po file set its revision property to
84 # a new value (the same for all units during its store updated)
85 # since that change doesn't require further sync but note that
86 # auto_translated units require further sync
87 update_revision = (
88 unit.revision is None
89 or (not unit.revision_updated
90 and (unit.updated and not auto_translated)))
91 if update_revision:
92 unit.revision = Revision.incr()
93
94 if unit.index is None:
95 unit.index = unit.store.max_index() + 1
96 unitid = uniqueid.get(unit.__class__)(unit)
97 if unitid.changed:
98 unit.setid(unitid.getid())
99
100
101 @receiver(pre_save, sender=UnitChange)
102 def handle_unit_pre_change(**kwargs):
103 unit_change = kwargs["instance"]
104 unit = unit_change.unit
105 if unit.state == UNTRANSLATED:
106 # clear reviewer and translator data if translation
107 # has been deleted
108 unit_change.submitted_by = None
109 unit_change.submitted_on = None
110
111
112 @receiver(post_save, sender=UnitChange)
113 def handle_unit_change(**kwargs):
114 unit_change = kwargs["instance"]
115 unit = unit_change.unit
116 created = not unit._frozen.pk
117
118 if not created:
119 lifecycle.get(Unit)(unit).change()
120 if not unit.source_updated and not unit.target_updated:
121 return
122 new_untranslated = (created and unit.state == UNTRANSLATED)
123 if not new_untranslated:
124 update_checks.send(unit.__class__, instance=unit)
125 if unit.istranslated():
126 unit.update_tmserver()
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py
--- a/pootle/apps/pootle_store/receivers.py
+++ b/pootle/apps/pootle_store/receivers.py
@@ -98,17 +98,6 @@
unit.setid(unitid.getid())
-@receiver(pre_save, sender=UnitChange)
-def handle_unit_pre_change(**kwargs):
- unit_change = kwargs["instance"]
- unit = unit_change.unit
- if unit.state == UNTRANSLATED:
- # clear reviewer and translator data if translation
- # has been deleted
- unit_change.submitted_by = None
- unit_change.submitted_on = None
-
-
@receiver(post_save, sender=UnitChange)
def handle_unit_change(**kwargs):
unit_change = kwargs["instance"]
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py\n--- a/pootle/apps/pootle_store/receivers.py\n+++ b/pootle/apps/pootle_store/receivers.py\n@@ -98,17 +98,6 @@\n unit.setid(unitid.getid())\n \n \n-@receiver(pre_save, sender=UnitChange)\n-def handle_unit_pre_change(**kwargs):\n- unit_change = kwargs[\"instance\"]\n- unit = unit_change.unit\n- if unit.state == UNTRANSLATED:\n- # clear reviewer and translator data if translation\n- # has been deleted\n- unit_change.submitted_by = None\n- unit_change.submitted_on = None\n-\n-\n @receiver(post_save, sender=UnitChange)\n def handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n", "issue": "Changing source (with xliff) can create submissions with no submitter\n# Steps to reproduce:\r\n\r\n- change source in file of existing unit\r\n- run update_stores\r\n\r\n# Results\r\n\r\n- submissions have no submitter\r\n- stuff breaks\r\n\r\n## Expected result:\r\n\r\n- nothing breaks\r\n\r\n\r\nthis only happens in xliff afaict - as i dont think its possible to change source of units in this way with po\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\n\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.delegate import lifecycle, uniqueid\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_checks, update_data\n\nfrom .constants import FUZZY, TRANSLATED, UNTRANSLATED\nfrom .models import Suggestion, Unit, UnitChange, UnitSource\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_added(**kwargs):\n created = kwargs.get(\"created\")\n if not created:\n return\n store = kwargs[\"instance\"].unit.store\n update_data.send(store.__class__, instance=store)\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_accepted(**kwargs):\n created = kwargs.get(\"created\")\n suggestion = kwargs[\"instance\"]\n if created or not suggestion.is_accepted:\n return\n update_data.send(\n suggestion.unit.store.__class__,\n instance=suggestion.unit.store)\n\n\n@receiver(pre_save, sender=UnitSource)\ndef handle_unit_source_pre_save(**kwargs):\n unit_source = kwargs[\"instance\"]\n created = not unit_source.pk\n unit = unit_source.unit\n if created:\n unit_source.creation_revision = unit.revision\n if created or unit.source_updated:\n unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()\n unit_source.source_length = len(unit.source_f)\n unit_source.source_wordcount = max(\n 1, (unit.counter.count_words(unit.source_f.strings) or 0))\n\n\n@receiver(pre_save, sender=Unit)\ndef handle_unit_pre_save(**kwargs):\n unit = kwargs[\"instance\"]\n auto_translated = False\n\n if unit.source_updated:\n # update source related fields\n wc = unit.counter.count_words(unit.source_f.strings)\n if not wc and not bool(filter(None, unit.target_f.strings)):\n # auto-translate untranslated strings\n unit.target = unit.source\n unit.state = FUZZY\n auto_translated = True\n if unit.target_updated:\n # update target related fields\n unit.target_wordcount = unit.counter.count_words(\n unit.target_f.strings)\n unit.target_length = len(unit.target_f)\n if filter(None, unit.target_f.strings):\n if unit.state == UNTRANSLATED:\n unit.state = TRANSLATED\n else:\n # if it was TRANSLATED then set to UNTRANSLATED\n if unit.state > FUZZY:\n unit.state = UNTRANSLATED\n\n # Updating unit from the .po file set its revision property to\n # a new value (the same for all units during its store updated)\n # since that change doesn't require further sync but note that\n # auto_translated units require further sync\n update_revision = (\n unit.revision is None\n or (not unit.revision_updated\n and (unit.updated and not auto_translated)))\n if update_revision:\n unit.revision = Revision.incr()\n\n if unit.index is None:\n unit.index = unit.store.max_index() + 1\n unitid = uniqueid.get(unit.__class__)(unit)\n if unitid.changed:\n unit.setid(unitid.getid())\n\n\n@receiver(pre_save, sender=UnitChange)\ndef handle_unit_pre_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n if unit.state == UNTRANSLATED:\n # clear reviewer and translator data if translation\n # has been deleted\n unit_change.submitted_by = None\n unit_change.submitted_on = None\n\n\n@receiver(post_save, sender=UnitChange)\ndef handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n created = not unit._frozen.pk\n\n if not created:\n lifecycle.get(Unit)(unit).change()\n if not unit.source_updated and not unit.target_updated:\n return\n new_untranslated = (created and unit.state == UNTRANSLATED)\n if not new_untranslated:\n update_checks.send(unit.__class__, instance=unit)\n if unit.istranslated():\n unit.update_tmserver()\n", "path": "pootle/apps/pootle_store/receivers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\n\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.delegate import lifecycle, uniqueid\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_checks, update_data\n\nfrom .constants import FUZZY, TRANSLATED, UNTRANSLATED\nfrom .models import Suggestion, Unit, UnitChange, UnitSource\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_added(**kwargs):\n created = kwargs.get(\"created\")\n if not created:\n return\n store = kwargs[\"instance\"].unit.store\n update_data.send(store.__class__, instance=store)\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_accepted(**kwargs):\n created = kwargs.get(\"created\")\n suggestion = kwargs[\"instance\"]\n if created or not suggestion.is_accepted:\n return\n update_data.send(\n suggestion.unit.store.__class__,\n instance=suggestion.unit.store)\n\n\n@receiver(pre_save, sender=UnitSource)\ndef handle_unit_source_pre_save(**kwargs):\n unit_source = kwargs[\"instance\"]\n created = not unit_source.pk\n unit = unit_source.unit\n if created:\n unit_source.creation_revision = unit.revision\n if created or unit.source_updated:\n unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()\n unit_source.source_length = len(unit.source_f)\n unit_source.source_wordcount = max(\n 1, (unit.counter.count_words(unit.source_f.strings) or 0))\n\n\n@receiver(pre_save, sender=Unit)\ndef handle_unit_pre_save(**kwargs):\n unit = kwargs[\"instance\"]\n auto_translated = False\n\n if unit.source_updated:\n # update source related fields\n wc = unit.counter.count_words(unit.source_f.strings)\n if not wc and not bool(filter(None, unit.target_f.strings)):\n # auto-translate untranslated strings\n unit.target = unit.source\n unit.state = FUZZY\n auto_translated = True\n if unit.target_updated:\n # update target related fields\n unit.target_wordcount = unit.counter.count_words(\n unit.target_f.strings)\n unit.target_length = len(unit.target_f)\n if filter(None, unit.target_f.strings):\n if unit.state == UNTRANSLATED:\n unit.state = TRANSLATED\n else:\n # if it was TRANSLATED then set to UNTRANSLATED\n if unit.state > FUZZY:\n unit.state = UNTRANSLATED\n\n # Updating unit from the .po file set its revision property to\n # a new value (the same for all units during its store updated)\n # since that change doesn't require further sync but note that\n # auto_translated units require further sync\n update_revision = (\n unit.revision is None\n or (not unit.revision_updated\n and (unit.updated and not auto_translated)))\n if update_revision:\n unit.revision = Revision.incr()\n\n if unit.index is None:\n unit.index = unit.store.max_index() + 1\n unitid = uniqueid.get(unit.__class__)(unit)\n if unitid.changed:\n unit.setid(unitid.getid())\n\n\n@receiver(post_save, sender=UnitChange)\ndef handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n created = not unit._frozen.pk\n\n if not created:\n lifecycle.get(Unit)(unit).change()\n if not unit.source_updated and not unit.target_updated:\n return\n new_untranslated = (created and unit.state == UNTRANSLATED)\n if not new_untranslated:\n update_checks.send(unit.__class__, instance=unit)\n if unit.istranslated():\n unit.update_tmserver()\n", "path": "pootle/apps/pootle_store/receivers.py"}]} | 1,599 | 195 |
gh_patches_debug_6221 | rasdani/github-patches | git_diff | scikit-hep__pyhf-837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bump jsonschema to v3.2.0+ to support draft 6
Currently on alpha release 3.0.x but can bump to 3.2.0 which was released.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=1.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=3.6",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
100 'jsonpatch',
101 'pyyaml', # for parsing CLI equal-delimited options
102 ],
103 extras_require=extras_require,
104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
105 dependency_links=[],
106 use_scm_version=lambda: {'local_scheme': lambda version: ''},
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,7 +96,7 @@
'scipy', # requires numpy, which is required by pyhf and tensorflow
'click>=6.0', # for console scripts,
'tqdm', # for readxml
- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
+ 'jsonschema>=3.2.0', # for utils
'jsonpatch',
'pyyaml', # for parsing CLI equal-delimited options
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,7 +96,7 @@\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n+ 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n", "issue": "bump jsonschema to v3.2.0+ to support draft 6\nCurrently on alpha release 3.0.x but can bump to 3.2.0 which was released.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,368 | 143 |
gh_patches_debug_925 | rasdani/github-patches | git_diff | dynamiqs__dynamiqs-196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
implement a ver() method
As a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynamiqs/__init__.py`
Content:
```
1 from .mesolve import mesolve
2 from .sesolve import sesolve
3 from .smesolve import smesolve
4 from .utils import *
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py
--- a/dynamiqs/__init__.py
+++ b/dynamiqs/__init__.py
@@ -1,4 +1,9 @@
+from importlib.metadata import version
+
from .mesolve import mesolve
from .sesolve import sesolve
from .smesolve import smesolve
from .utils import *
+
+# get version from pyproject.toml
+__version__ = version(__package__)
| {"golden_diff": "diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py\n--- a/dynamiqs/__init__.py\n+++ b/dynamiqs/__init__.py\n@@ -1,4 +1,9 @@\n+from importlib.metadata import version\n+\n from .mesolve import mesolve\n from .sesolve import sesolve\n from .smesolve import smesolve\n from .utils import *\n+\n+# get version from pyproject.toml\n+__version__ = version(__package__)\n", "issue": "implement a ver() method\nAs a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running\n", "before_files": [{"content": "from .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n", "path": "dynamiqs/__init__.py"}], "after_files": [{"content": "from importlib.metadata import version\n\nfrom .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n\n# get version from pyproject.toml\n__version__ = version(__package__)\n", "path": "dynamiqs/__init__.py"}]} | 341 | 113 |
gh_patches_debug_132 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-3433 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ci - failure around mismatched versions of tabulate
Per current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.
This mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.
digging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from io import open
3 from setuptools import setup, find_packages
4
5
6 def read(fname):
7 return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
8
9
10 setup(
11 name="c7n",
12 version='0.8.33.1',
13 description="Cloud Custodian - Policy Rules Engine",
14 long_description=read('README.rst'),
15 classifiers=[
16 "Topic :: System :: Systems Administration",
17 "Topic :: System :: Distributed Computing"
18 ],
19 url="https://github.com/capitalone/cloud-custodian",
20 license="Apache-2.0",
21 packages=find_packages(),
22 entry_points={
23 'console_scripts': [
24 'custodian = c7n.cli:main']},
25 install_requires=[
26 "boto3>=1.9.62",
27 "botocore>=1.12.62",
28 "python-dateutil>=2.6,<3.0.0",
29 "pyyaml",
30 "jsonschema",
31 "jsonpatch>=1.21",
32 "argcomplete",
33 "tabulate"
34 ],
35 )
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,6 +30,6 @@
"jsonschema",
"jsonpatch>=1.21",
"argcomplete",
- "tabulate"
+ "tabulate==0.8.2"
],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,6 +30,6 @@\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n- \"tabulate\"\n+ \"tabulate==0.8.2\"\n ],\n )\n", "issue": "ci - failure around mismatched versions of tabulate\nPer current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.\r\n\r\nThis mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.\r\n\r\ndigging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.\r\n\n", "before_files": [{"content": "import os\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()\n\n\nsetup(\n name=\"c7n\",\n version='0.8.33.1',\n description=\"Cloud Custodian - Policy Rules Engine\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/capitalone/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'custodian = c7n.cli:main']},\n install_requires=[\n \"boto3>=1.9.62\",\n \"botocore>=1.12.62\",\n \"python-dateutil>=2.6,<3.0.0\",\n \"pyyaml\",\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n \"tabulate\"\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()\n\n\nsetup(\n name=\"c7n\",\n version='0.8.33.1',\n description=\"Cloud Custodian - Policy Rules Engine\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/capitalone/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'custodian = c7n.cli:main']},\n install_requires=[\n \"boto3>=1.9.62\",\n \"botocore>=1.12.62\",\n \"python-dateutil>=2.6,<3.0.0\",\n \"pyyaml\",\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n \"tabulate==0.8.2\"\n ],\n)\n", "path": "setup.py"}]} | 691 | 73 |
gh_patches_debug_26997 | rasdani/github-patches | git_diff | mdn__kuma-6098 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
search_phase_execution_exception in ES on huuuge ?page params
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/
```
TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')
(24 additional frame(s) were not displayed)
...
File "rest_framework/views.py", line 492, in dispatch
response = handler(request, *args, **kwargs)
File "rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "rest_framework/mixins.py", line 42, in list
page = self.paginate_queryset(queryset)
File "rest_framework/generics.py", line 173, in paginate_queryset
return self.paginator.paginate_queryset(queryset, self.request, view=self)
File "rest_framework/pagination.py", line 204, in paginate_queryset
self.page = paginator.page(page_number)
TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/search/paginator.py`
Content:
```
1 from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
2 from django.utils.functional import cached_property
3
4
5 class SearchPaginator(Paginator):
6 """
7 A better paginator for search results
8
9 The normal Paginator does a .count() query and then a slice. Since ES
10 results contain the total number of results, we can take an optimistic
11 slice and then adjust the count.
12 """
13
14 def __init__(self, *args, **kwargs):
15 super(SearchPaginator, self).__init__(*args, **kwargs)
16 self._result_total = None
17
18 def validate_number(self, number):
19 """
20 Validates the given 1-based page number.
21
22 This class overrides the default behavior and ignores the upper bound.
23 """
24 try:
25 number = int(number)
26 except (TypeError, ValueError):
27 raise PageNotAnInteger('That page number is not an integer')
28 if number < 1:
29 raise EmptyPage('That page number is less than 1')
30 return number
31
32 def page(self, number):
33 """
34 Returns a page object.
35
36 This class overrides the default behavior and ignores "orphans" and
37 assigns the count from the ES result to the Paginator.
38 """
39 number = self.validate_number(number)
40 bottom = (number - 1) * self.per_page
41 top = bottom + self.per_page
42
43 # Force the search to evaluate and then attach the count. We want to
44 # avoid an extra useless query even if there are no results, so we
45 # directly fetch the count from hits.
46 result = self.object_list[bottom:top].execute()
47 page = Page(result.hits, number, self)
48 # Set the count to the results after post_filter
49 self._result_total = result.hits.total
50 # Also store the aggregations, if any.
51 page.aggregations = getattr(result, 'aggregations', None)
52
53 # Now that we have the count validate that the page number isn't higher
54 # than the possible number of pages and adjust accordingly.
55 if number > self.num_pages:
56 if number == 1 and self.allow_empty_first_page:
57 pass
58 else:
59 raise EmptyPage('That page contains no results')
60 return page
61
62 @cached_property
63 def count(self):
64 """
65 Returns the total number of results.
66
67 Paginator's count property will call .count() on the search object,
68 which returns results before the pre_filter. This will result in a
69 count that is too high. Instead, use 'total' from the results,
70 executing if needed.
71 """
72 if self._result_total is not None:
73 return self._result_total
74 return self.object_list.execute().hits.total
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py
--- a/kuma/search/paginator.py
+++ b/kuma/search/paginator.py
@@ -1,4 +1,5 @@
-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
+from django.core.paginator import (
+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)
from django.utils.functional import cached_property
@@ -19,7 +20,7 @@
"""
Validates the given 1-based page number.
- This class overrides the default behavior and ignores the upper bound.
+ We also check that the number isn't too large.
"""
try:
number = int(number)
@@ -27,6 +28,19 @@
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
+
+ if number >= 1000:
+ # Anything >=1,000 will result in a hard error in
+ # Elasticsearch which would happen before we even get a chance
+ # to validate that the range is too big. The error you would
+ # get from Elasticsearch 6.x is something like this:
+ #
+ # Result window is too large, from + size must be less
+ # than or equal to: [10000] but was [11000].
+ #
+ # See https://github.com/mdn/kuma/issues/6092
+ raise InvalidPage('Page number too large')
+
return number
def page(self, number):
| {"golden_diff": "diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py\n--- a/kuma/search/paginator.py\n+++ b/kuma/search/paginator.py\n@@ -1,4 +1,5 @@\n-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\n+from django.core.paginator import (\n+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)\n from django.utils.functional import cached_property\n \n \n@@ -19,7 +20,7 @@\n \"\"\"\n Validates the given 1-based page number.\n \n- This class overrides the default behavior and ignores the upper bound.\n+ We also check that the number isn't too large.\n \"\"\"\n try:\n number = int(number)\n@@ -27,6 +28,19 @@\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n+\n+ if number >= 1000:\n+ # Anything >=1,000 will result in a hard error in\n+ # Elasticsearch which would happen before we even get a chance\n+ # to validate that the range is too big. The error you would\n+ # get from Elasticsearch 6.x is something like this:\n+ #\n+ # Result window is too large, from + size must be less\n+ # than or equal to: [10000] but was [11000].\n+ #\n+ # See https://github.com/mdn/kuma/issues/6092\n+ raise InvalidPage('Page number too large')\n+\n return number\n \n def page(self, number):\n", "issue": "search_phase_execution_exception in ES on huuuge ?page params\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/\n\n```\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n(24 additional frame(s) were not displayed)\n...\n File \"rest_framework/views.py\", line 492, in dispatch\n response = handler(request, *args, **kwargs)\n File \"rest_framework/generics.py\", line 201, in get\n return self.list(request, *args, **kwargs)\n File \"rest_framework/mixins.py\", line 42, in list\n page = self.paginate_queryset(queryset)\n File \"rest_framework/generics.py\", line 173, in paginate_queryset\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n File \"rest_framework/pagination.py\", line 204, in paginate_queryset\n self.page = paginator.page(page_number)\n\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n```\n", "before_files": [{"content": "from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\nfrom django.utils.functional import cached_property\n\n\nclass SearchPaginator(Paginator):\n \"\"\"\n A better paginator for search results\n\n The normal Paginator does a .count() query and then a slice. Since ES\n results contain the total number of results, we can take an optimistic\n slice and then adjust the count.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SearchPaginator, self).__init__(*args, **kwargs)\n self._result_total = None\n\n def validate_number(self, number):\n \"\"\"\n Validates the given 1-based page number.\n\n This class overrides the default behavior and ignores the upper bound.\n \"\"\"\n try:\n number = int(number)\n except (TypeError, ValueError):\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n return number\n\n def page(self, number):\n \"\"\"\n Returns a page object.\n\n This class overrides the default behavior and ignores \"orphans\" and\n assigns the count from the ES result to the Paginator.\n \"\"\"\n number = self.validate_number(number)\n bottom = (number - 1) * self.per_page\n top = bottom + self.per_page\n\n # Force the search to evaluate and then attach the count. We want to\n # avoid an extra useless query even if there are no results, so we\n # directly fetch the count from hits.\n result = self.object_list[bottom:top].execute()\n page = Page(result.hits, number, self)\n # Set the count to the results after post_filter\n self._result_total = result.hits.total\n # Also store the aggregations, if any.\n page.aggregations = getattr(result, 'aggregations', None)\n\n # Now that we have the count validate that the page number isn't higher\n # than the possible number of pages and adjust accordingly.\n if number > self.num_pages:\n if number == 1 and self.allow_empty_first_page:\n pass\n else:\n raise EmptyPage('That page contains no results')\n return page\n\n @cached_property\n def count(self):\n \"\"\"\n Returns the total number of results.\n\n Paginator's count property will call .count() on the search object,\n which returns results before the pre_filter. This will result in a\n count that is too high. Instead, use 'total' from the results,\n executing if needed.\n \"\"\"\n if self._result_total is not None:\n return self._result_total\n return self.object_list.execute().hits.total\n", "path": "kuma/search/paginator.py"}], "after_files": [{"content": "from django.core.paginator import (\n EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)\nfrom django.utils.functional import cached_property\n\n\nclass SearchPaginator(Paginator):\n \"\"\"\n A better paginator for search results\n\n The normal Paginator does a .count() query and then a slice. Since ES\n results contain the total number of results, we can take an optimistic\n slice and then adjust the count.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SearchPaginator, self).__init__(*args, **kwargs)\n self._result_total = None\n\n def validate_number(self, number):\n \"\"\"\n Validates the given 1-based page number.\n\n We also check that the number isn't too large.\n \"\"\"\n try:\n number = int(number)\n except (TypeError, ValueError):\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n\n if number >= 1000:\n # Anything >=1,000 will result in a hard error in\n # Elasticsearch which would happen before we even get a chance\n # to validate that the range is too big. The error you would\n # get from Elasticsearch 6.x is something like this:\n #\n # Result window is too large, from + size must be less\n # than or equal to: [10000] but was [11000].\n #\n # See https://github.com/mdn/kuma/issues/6092\n raise InvalidPage('Page number too large')\n\n return number\n\n def page(self, number):\n \"\"\"\n Returns a page object.\n\n This class overrides the default behavior and ignores \"orphans\" and\n assigns the count from the ES result to the Paginator.\n \"\"\"\n number = self.validate_number(number)\n bottom = (number - 1) * self.per_page\n top = bottom + self.per_page\n\n # Force the search to evaluate and then attach the count. We want to\n # avoid an extra useless query even if there are no results, so we\n # directly fetch the count from hits.\n result = self.object_list[bottom:top].execute()\n page = Page(result.hits, number, self)\n # Set the count to the results after post_filter\n self._result_total = result.hits.total\n # Also store the aggregations, if any.\n page.aggregations = getattr(result, 'aggregations', None)\n\n # Now that we have the count validate that the page number isn't higher\n # than the possible number of pages and adjust accordingly.\n if number > self.num_pages:\n if number == 1 and self.allow_empty_first_page:\n pass\n else:\n raise EmptyPage('That page contains no results')\n return page\n\n @cached_property\n def count(self):\n \"\"\"\n Returns the total number of results.\n\n Paginator's count property will call .count() on the search object,\n which returns results before the pre_filter. This will result in a\n count that is too high. Instead, use 'total' from the results,\n executing if needed.\n \"\"\"\n if self._result_total is not None:\n return self._result_total\n return self.object_list.execute().hits.total\n", "path": "kuma/search/paginator.py"}]} | 1,365 | 373 |
gh_patches_debug_3331 | rasdani/github-patches | git_diff | fidals__shopelectro-885 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stale import db issue
PO says the last import was at `<yml_catalog date="2019-05-23 00:38">`
Check if import db can autolaunch.
Import db is called as catalog update command
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/management/commands/_update_catalog/update_pack.py`
Content:
```
1 """
2 Update Product.in_pack and prices.
3
4 The update_catalog command always resets product prices to per unit format, so:
5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack
6 2. Multiply product prices by in_pack value and save.
7 """
8 import logging
9
10 from django.db import models, transaction
11
12 from shopelectro.models import TagQuerySet, TagGroup
13
14 logger = logging.getLogger(__name__)
15 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']
16
17
18 def update_in_packs(packs: TagQuerySet):
19 """Parse and save in pack quantity values."""
20 # @todo #859:60m Implement update_pack and render prices properly.
21
22
23 def update_prices(packs: TagQuerySet):
24 """Multiply product prices on in pack quantity."""
25 fields_to_update = {}
26 for price in PRICES:
27 fields_to_update[price] = models.F(price) * models.F('in_pack')
28
29 with transaction.atomic():
30 packs.products().update(**fields_to_update)
31
32
33 def main(*args, kwargs):
34 uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
35 pack_group = TagGroup.objects.filter(uuid=uuid).first()
36 if not pack_group:
37 logger.error(f'Couldn\'t find "Упаковка" tag group with uuid = "{uuid}".')
38 return
39
40 return
41
42 packs = pack_group.tags.all().prefetch_related('products')
43 update_in_packs(packs)
44 update_prices(packs)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py
--- a/shopelectro/management/commands/_update_catalog/update_pack.py
+++ b/shopelectro/management/commands/_update_catalog/update_pack.py
@@ -30,7 +30,7 @@
packs.products().update(**fields_to_update)
-def main(*args, kwargs):
+def main(*args, **kwargs):
uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
pack_group = TagGroup.objects.filter(uuid=uuid).first()
if not pack_group:
| {"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -30,7 +30,7 @@\n packs.products().update(**fields_to_update)\n \n \n-def main(*args, kwargs):\n+def main(*args, **kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n", "issue": "Stale import db issue\nPO says the last import was at `<yml_catalog date=\"2019-05-23 00:38\">`\r\nCheck if import db can autolaunch.\r\n\r\nImport db is called as catalog update command\r\n\n", "before_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.db import models, transaction\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n # @todo #859:60m Implement update_pack and render prices properly.\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}], "after_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.db import models, transaction\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n # @todo #859:60m Implement update_pack and render prices properly.\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, **kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]} | 762 | 164 |
gh_patches_debug_4833 | rasdani/github-patches | git_diff | Theano__Theano-146 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get rid of warning when using Scipy 0.10.x
WARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theano/sparse/__init__.py`
Content:
```
1 import sys
2 try:
3 import scipy
4 enable_sparse = scipy.__version__ >= '0.7'
5 if not enable_sparse:
6 sys.stderr.write("WARNING: scipy version = %s."
7 " We request version >=0.7.0 for the sparse code as it has"
8 " bugs fixed in the sparse matrix code.\n" % scipy.__version__)
9 except ImportError:
10 enable_sparse = False
11 sys.stderr.write("WARNING: scipy can't be imported."
12 " We disable the sparse matrix code.")
13
14 if enable_sparse:
15 from basic import *
16 import sharedvar
17 from sharedvar import sparse_constructor as shared
18
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py
--- a/theano/sparse/__init__.py
+++ b/theano/sparse/__init__.py
@@ -1,7 +1,10 @@
+from pkg_resources import parse_version as V
import sys
+
try:
import scipy
- enable_sparse = scipy.__version__ >= '0.7'
+ enable_sparse = V(scipy.__version__) >= V('0.7')
+
if not enable_sparse:
sys.stderr.write("WARNING: scipy version = %s."
" We request version >=0.7.0 for the sparse code as it has"
| {"golden_diff": "diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py\n--- a/theano/sparse/__init__.py\n+++ b/theano/sparse/__init__.py\n@@ -1,7 +1,10 @@\n+from pkg_resources import parse_version as V\n import sys\n+\n try:\n import scipy\n- enable_sparse = scipy.__version__ >= '0.7'\n+ enable_sparse = V(scipy.__version__) >= V('0.7')\n+\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n", "issue": "Get rid of warning when using Scipy 0.10.x\nWARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.\n\n", "before_files": [{"content": "import sys\ntry:\n import scipy\n enable_sparse = scipy.__version__ >= '0.7'\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n \" bugs fixed in the sparse matrix code.\\n\" % scipy.__version__)\nexcept ImportError:\n enable_sparse = False\n sys.stderr.write(\"WARNING: scipy can't be imported.\"\n \" We disable the sparse matrix code.\")\n\nif enable_sparse:\n from basic import *\n import sharedvar\n from sharedvar import sparse_constructor as shared\n\n", "path": "theano/sparse/__init__.py"}], "after_files": [{"content": "from pkg_resources import parse_version as V\nimport sys\n\ntry:\n import scipy\n enable_sparse = V(scipy.__version__) >= V('0.7')\n\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n \" bugs fixed in the sparse matrix code.\\n\" % scipy.__version__)\nexcept ImportError:\n enable_sparse = False\n sys.stderr.write(\"WARNING: scipy can't be imported.\"\n \" We disable the sparse matrix code.\")\n\nif enable_sparse:\n from basic import *\n import sharedvar\n from sharedvar import sparse_constructor as shared\n\n", "path": "theano/sparse/__init__.py"}]} | 480 | 147 |
gh_patches_debug_1163 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-2712 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document that RTD uses `rel` branch for production
Hi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.
Thanks in advance!
Oli
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 import os
4 import sys
5
6 from recommonmark.parser import CommonMarkParser
7
8 sys.path.insert(0, os.path.abspath('..'))
9 sys.path.append(os.path.dirname(__file__))
10 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
11
12 from django.conf import settings
13
14 import django
15 django.setup()
16
17
18 sys.path.append(os.path.abspath('_ext'))
19 extensions = [
20 'sphinx.ext.autodoc',
21 'sphinx.ext.intersphinx',
22 'sphinxcontrib.httpdomain',
23 'djangodocs',
24 'doc_extensions',
25 ]
26 templates_path = ['_templates']
27
28 source_suffix = ['.rst', '.md']
29 source_parsers = {
30 '.md': CommonMarkParser,
31 }
32
33 master_doc = 'index'
34 project = u'Read The Docs'
35 copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
36 version = '1.0'
37 release = '1.0'
38 exclude_patterns = ['_build']
39 default_role = 'obj'
40 pygments_style = 'sphinx'
41 intersphinx_mapping = {
42 'python': ('http://python.readthedocs.io/en/latest/', None),
43 'django': ('http://django.readthedocs.io/en/1.8.x/', None),
44 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),
45 }
46 # This doesn't exist since we aren't shipping any static files ourselves.
47 #html_static_path = ['_static']
48 htmlhelp_basename = 'ReadTheDocsdoc'
49 latex_documents = [
50 ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',
51 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
52 ]
53 man_pages = [
54 ('index', 'read-the-docs', u'Read The Docs Documentation',
55 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
56 ]
57
58 exclude_patterns = [
59 # 'api' # needed for ``make gettext`` to not die.
60 ]
61
62 language = 'en'
63
64 locale_dirs = [
65 'locale/',
66 ]
67 gettext_compact = False
68
69
70 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
71 if not on_rtd: # only import and set the theme if we're building docs locally
72 import sphinx_rtd_theme
73 html_theme = 'sphinx_rtd_theme'
74 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -32,7 +32,7 @@
master_doc = 'index'
project = u'Read The Docs'
-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
+copyright = u'2010-2017, Read the Docs, Inc & contributors'
version = '1.0'
release = '1.0'
exclude_patterns = ['_build']
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -32,7 +32,7 @@\n \n master_doc = 'index'\n project = u'Read The Docs'\n-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\n+copyright = u'2010-2017, Read the Docs, Inc & contributors'\n version = '1.0'\n release = '1.0'\n exclude_patterns = ['_build']\n", "issue": "Document that RTD uses `rel` branch for production\nHi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.\r\n\r\nThanks in advance!\r\nOli\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010-2017, Read the Docs, Inc & contributors'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]} | 1,005 | 117 |
gh_patches_debug_5807 | rasdani/github-patches | git_diff | scikit-hep__awkward-970 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doubly jagged workaround broken in awkward 1.4.0rc2
There was a quick fix (made for KM3NeT data 😄) for doubly jagged arrays which were discussed here https://github.com/scikit-hep/uproot4/issues/90 and the it uses the `akward._io` submodule which is not accessible anymore directly (`AttributeError`) in `1.4.0rc2`.
See here:
https://github.com/scikit-hep/awkward-1.0/blob/main/src/awkward/_connect/_uproot.py#L35
I am not sure what the desired fix is, to import `_io` in `_connect/_uproot.py` or if this fix is now covered by further developments, so I thought I ask first before I do a PR 😉
Here is the full MWE (needs `pip install km3net-testdata`, I am not sure if this test
```python
>>> import uproot
>>> uproot.__version__
'4.0.7'
>>> import awkward as ak
>>> ak.__version__
'1.4.0rc2'
>>> from km3net_testdata import data_path
>>> f = uproot.open(data_path("offline/mcv5.11r2.gsg_muonCChigherE-CC_50-5000GeV.km3_AAv1.jterbr00004695.jchain.aanet.498.root"))
>>> f["E/Evt/trks/trks.rec_stages"].array()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-80472509fad7> in <module>
----> 1 f["E/Evt/trks/trks.rec_stages"].array()
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in array(self, interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, array_cache, library)
2076 ranges_or_baskets.append((branch, basket_num, range_or_basket))
2077
-> 2078 _ranges_or_baskets_to_arrays(
2079 self,
2080 ranges_or_baskets,
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in _ranges_or_baskets_to_arrays(hasbranches, ranges_or_baskets, branchid_interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, library, arrays, update_ranges_or_baskets)
3476
3477 elif isinstance(obj, tuple) and len(obj) == 3:
-> 3478 uproot.source.futures.delayed_raise(*obj)
3479
3480 else:
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/source/futures.py in delayed_raise(exception_class, exception_value, traceback)
44 exec("raise exception_class, exception_value, traceback")
45 else:
---> 46 raise exception_value.with_traceback(traceback)
47
48
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in basket_to_array(basket)
3420 basket_arrays = branchid_arrays[branch.cache_key]
3421
-> 3422 basket_arrays[basket.basket_num] = interpretation.basket_array(
3423 basket.data,
3424 basket.byte_offsets,
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/interpretation/objects.py in basket_array(self, data, byte_offsets, basket, branch, context, cursor_offset, library)
151 "cursor_offset": cursor_offset,
152 }
--> 153 output = awkward._connect._uproot.basket_array(
154 form, data, byte_offsets, extra
155 )
~/Dev/km3io/venv/lib/python3.9/site-packages/awkward/_connect/_uproot.py in basket_array(form, data, byte_offsets, extra)
36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
37
---> 38 return ak._io.uproot_issue_90(
39 form,
40 ak.layout.NumpyArray(data),
AttributeError: module 'awkward' has no attribute '_io'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/_connect/_uproot.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 from __future__ import absolute_import
4
5 import json
6
7 # don't import awkward._connect._uproot in awkward/__init__.py!
8 import uproot
9
10 import awkward as ak
11
12
13 def can_optimize(interpretation, form):
14 if isinstance(interpretation, uproot.interpretation.objects.AsObjects):
15 jsonform = json.loads(form.tojson(verbose=True))
16 if (
17 jsonform["class"] == "ListOffsetArray64"
18 and jsonform["parameters"].get("uproot")
19 == {"as": "array", "header": True, "speedbump": False}
20 and jsonform["content"]["class"] == "ListOffsetArray64"
21 and jsonform["content"]["parameters"].get("uproot")
22 == {"as": "vector", "header": False}
23 and jsonform["content"]["content"]["class"] == "NumpyArray"
24 and jsonform["content"]["content"]["inner_shape"] == []
25 and (
26 jsonform["content"]["content"].get("primitive") == "float64"
27 or jsonform["content"]["content"].get("primitive") == "int32"
28 )
29 ):
30 return True
31
32 return False
33
34
35 def basket_array(form, data, byte_offsets, extra):
36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
37
38 return ak._io.uproot_issue_90(
39 form,
40 ak.layout.NumpyArray(data),
41 ak.layout.Index32(byte_offsets),
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/_connect/_uproot.py b/src/awkward/_connect/_uproot.py
--- a/src/awkward/_connect/_uproot.py
+++ b/src/awkward/_connect/_uproot.py
@@ -33,9 +33,11 @@
def basket_array(form, data, byte_offsets, extra):
+ import awkward._io
+
# FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
- return ak._io.uproot_issue_90(
+ return awkward._io.uproot_issue_90(
form,
ak.layout.NumpyArray(data),
ak.layout.Index32(byte_offsets),
| {"golden_diff": "diff --git a/src/awkward/_connect/_uproot.py b/src/awkward/_connect/_uproot.py\n--- a/src/awkward/_connect/_uproot.py\n+++ b/src/awkward/_connect/_uproot.py\n@@ -33,9 +33,11 @@\n \n \n def basket_array(form, data, byte_offsets, extra):\n+ import awkward._io\n+\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n \n- return ak._io.uproot_issue_90(\n+ return awkward._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n", "issue": "Doubly jagged workaround broken in awkward 1.4.0rc2\nThere was a quick fix (made for KM3NeT data \ud83d\ude04) for doubly jagged arrays which were discussed here https://github.com/scikit-hep/uproot4/issues/90 and the it uses the `akward._io` submodule which is not accessible anymore directly (`AttributeError`) in `1.4.0rc2`.\r\n\r\nSee here:\r\n\r\nhttps://github.com/scikit-hep/awkward-1.0/blob/main/src/awkward/_connect/_uproot.py#L35\r\n\r\nI am not sure what the desired fix is, to import `_io` in `_connect/_uproot.py` or if this fix is now covered by further developments, so I thought I ask first before I do a PR \ud83d\ude09 \r\n\r\nHere is the full MWE (needs `pip install km3net-testdata`, I am not sure if this test\r\n\r\n```python\r\n>>> import uproot\r\n\r\n>>> uproot.__version__\r\n'4.0.7'\r\n\r\n>>> import awkward as ak\r\n\r\n>>> ak.__version__\r\n'1.4.0rc2'\r\n\r\n>>> from km3net_testdata import data_path\r\n\r\n>>> f = uproot.open(data_path(\"offline/mcv5.11r2.gsg_muonCChigherE-CC_50-5000GeV.km3_AAv1.jterbr00004695.jchain.aanet.498.root\"))\r\n\r\n>>> f[\"E/Evt/trks/trks.rec_stages\"].array()\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-18-80472509fad7> in <module>\r\n----> 1 f[\"E/Evt/trks/trks.rec_stages\"].array()\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in array(self, interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, array_cache, library)\r\n 2076 ranges_or_baskets.append((branch, basket_num, range_or_basket))\r\n 2077\r\n-> 2078 _ranges_or_baskets_to_arrays(\r\n 2079 self,\r\n 2080 ranges_or_baskets,\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in _ranges_or_baskets_to_arrays(hasbranches, ranges_or_baskets, branchid_interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, library, arrays, update_ranges_or_baskets)\r\n 3476\r\n 3477 elif isinstance(obj, tuple) and len(obj) == 3:\r\n-> 3478 uproot.source.futures.delayed_raise(*obj)\r\n 3479\r\n 3480 else:\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/source/futures.py in delayed_raise(exception_class, exception_value, traceback)\r\n 44 exec(\"raise exception_class, exception_value, traceback\")\r\n 45 else:\r\n---> 46 raise exception_value.with_traceback(traceback)\r\n 47\r\n 48\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in basket_to_array(basket)\r\n 3420 basket_arrays = branchid_arrays[branch.cache_key]\r\n 3421\r\n-> 3422 basket_arrays[basket.basket_num] = interpretation.basket_array(\r\n 3423 basket.data,\r\n 3424 basket.byte_offsets,\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/interpretation/objects.py in basket_array(self, data, byte_offsets, basket, branch, context, cursor_offset, library)\r\n 151 \"cursor_offset\": cursor_offset,\r\n 152 }\r\n--> 153 output = awkward._connect._uproot.basket_array(\r\n 154 form, data, byte_offsets, extra\r\n 155 )\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/awkward/_connect/_uproot.py in basket_array(form, data, byte_offsets, extra)\r\n 36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\r\n 37\r\n---> 38 return ak._io.uproot_issue_90(\r\n 39 form,\r\n 40 ak.layout.NumpyArray(data),\r\n\r\nAttributeError: module 'awkward' has no attribute '_io'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport json\n\n# don't import awkward._connect._uproot in awkward/__init__.py!\nimport uproot\n\nimport awkward as ak\n\n\ndef can_optimize(interpretation, form):\n if isinstance(interpretation, uproot.interpretation.objects.AsObjects):\n jsonform = json.loads(form.tojson(verbose=True))\n if (\n jsonform[\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"parameters\"].get(\"uproot\")\n == {\"as\": \"array\", \"header\": True, \"speedbump\": False}\n and jsonform[\"content\"][\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"content\"][\"parameters\"].get(\"uproot\")\n == {\"as\": \"vector\", \"header\": False}\n and jsonform[\"content\"][\"content\"][\"class\"] == \"NumpyArray\"\n and jsonform[\"content\"][\"content\"][\"inner_shape\"] == []\n and (\n jsonform[\"content\"][\"content\"].get(\"primitive\") == \"float64\"\n or jsonform[\"content\"][\"content\"].get(\"primitive\") == \"int32\"\n )\n ):\n return True\n\n return False\n\n\ndef basket_array(form, data, byte_offsets, extra):\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n\n return ak._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n )\n", "path": "src/awkward/_connect/_uproot.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport json\n\n# don't import awkward._connect._uproot in awkward/__init__.py!\nimport uproot\n\nimport awkward as ak\n\n\ndef can_optimize(interpretation, form):\n if isinstance(interpretation, uproot.interpretation.objects.AsObjects):\n jsonform = json.loads(form.tojson(verbose=True))\n if (\n jsonform[\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"parameters\"].get(\"uproot\")\n == {\"as\": \"array\", \"header\": True, \"speedbump\": False}\n and jsonform[\"content\"][\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"content\"][\"parameters\"].get(\"uproot\")\n == {\"as\": \"vector\", \"header\": False}\n and jsonform[\"content\"][\"content\"][\"class\"] == \"NumpyArray\"\n and jsonform[\"content\"][\"content\"][\"inner_shape\"] == []\n and (\n jsonform[\"content\"][\"content\"].get(\"primitive\") == \"float64\"\n or jsonform[\"content\"][\"content\"].get(\"primitive\") == \"int32\"\n )\n ):\n return True\n\n return False\n\n\ndef basket_array(form, data, byte_offsets, extra):\n import awkward._io\n\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n\n return awkward._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n )\n", "path": "src/awkward/_connect/_uproot.py"}]} | 1,739 | 155 |
gh_patches_debug_30613 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2729 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Erreurs 404 non générées sur certaines pages
Certaines pages devraient générées des erreurs 404 au lieu d'afficher le contenu des pages.
J'ai découvert ce bug en voulant corriger des erreurs dans les liens sur la page des CGU, un oubli de mailto produit actuellement un lien vers http://zestedesavoir.com/pages/cgu/[email protected] qui affiche la page des CGU. Or une erreur 404 devrait être générée. Peu importe la suite de l'URL cela fonctionne et ce pour plusieurs pages...
Il manque un `$` à la fin de certaines expressions régulières dans zds/pages/urls.py.
De plus le fichier urls.py importe pages.views mais ne l'utilise à aucun moment.
Du coup je me demande si je supprime l'import de views (ligne 5) ou bien si j'utilise par exemple `views.about` au lieu de `'zds.pages.views.about'` ?
Je veux bien bien me charger de faire une PR pour corriger ces bugs :)
EDIT: Je n'arrive pas à m'assigner à cette issue (comme suggéré par CONTRIBUTING.md), problèmes de droits ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/pages/urls.py`
Content:
```
1 # coding: utf-8
2
3 from django.conf.urls import patterns, url
4
5 from . import views
6
7
8 urlpatterns = patterns('',
9
10 url(r'^apropos/$', 'zds.pages.views.about'),
11 url(r'^association/$', 'zds.pages.views.association'),
12 url(r'^contact/', 'zds.pages.views.contact'),
13 url(r'^cgu/', 'zds.pages.views.eula'),
14 url(r'^alertes/', 'zds.pages.views.alerts'),
15 url(r'^cookies/', 'zds.pages.views.cookies'),
16 url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),
17
18 url(r'^$', 'zds.pages.views.index'),
19 )
20
```
Path: `zds/search/urls.py`
Content:
```
1 # coding: utf-8
2
3 from django.conf.urls import patterns, url
4
5 from . import views
6 from haystack.views import search_view_factory
7 from zds.search.views import CustomSearchView
8 from zds.search.forms import CustomSearchForm
9
10 urlpatterns = patterns('haystack.views',
11 url(r'^$', search_view_factory(
12 view_class=CustomSearchView,
13 template='search/search.html',
14 form_class=CustomSearchForm
15 ), name='haystack_search'))
16
17 urlpatterns += patterns('',
18 url(r'^opensearch.xml', 'zds.search.views.opensearch')
19 )
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/pages/urls.py b/zds/pages/urls.py
--- a/zds/pages/urls.py
+++ b/zds/pages/urls.py
@@ -2,17 +2,15 @@
from django.conf.urls import patterns, url
-from . import views
-
urlpatterns = patterns('',
url(r'^apropos/$', 'zds.pages.views.about'),
url(r'^association/$', 'zds.pages.views.association'),
- url(r'^contact/', 'zds.pages.views.contact'),
- url(r'^cgu/', 'zds.pages.views.eula'),
- url(r'^alertes/', 'zds.pages.views.alerts'),
- url(r'^cookies/', 'zds.pages.views.cookies'),
+ url(r'^contact/$', 'zds.pages.views.contact'),
+ url(r'^cgu/$', 'zds.pages.views.eula'),
+ url(r'^alertes/$', 'zds.pages.views.alerts'),
+ url(r'^cookies/$', 'zds.pages.views.cookies'),
url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),
url(r'^$', 'zds.pages.views.index'),
diff --git a/zds/search/urls.py b/zds/search/urls.py
--- a/zds/search/urls.py
+++ b/zds/search/urls.py
@@ -2,7 +2,6 @@
from django.conf.urls import patterns, url
-from . import views
from haystack.views import search_view_factory
from zds.search.views import CustomSearchView
from zds.search.forms import CustomSearchForm
@@ -15,5 +14,5 @@
), name='haystack_search'))
urlpatterns += patterns('',
- url(r'^opensearch.xml', 'zds.search.views.opensearch')
+ url(r'^opensearch\.xml$', 'zds.search.views.opensearch')
)
| {"golden_diff": "diff --git a/zds/pages/urls.py b/zds/pages/urls.py\n--- a/zds/pages/urls.py\n+++ b/zds/pages/urls.py\n@@ -2,17 +2,15 @@\n \n from django.conf.urls import patterns, url\n \n-from . import views\n-\n \n urlpatterns = patterns('',\n \n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n- url(r'^contact/', 'zds.pages.views.contact'),\n- url(r'^cgu/', 'zds.pages.views.eula'),\n- url(r'^alertes/', 'zds.pages.views.alerts'),\n- url(r'^cookies/', 'zds.pages.views.cookies'),\n+ url(r'^contact/$', 'zds.pages.views.contact'),\n+ url(r'^cgu/$', 'zds.pages.views.eula'),\n+ url(r'^alertes/$', 'zds.pages.views.alerts'),\n+ url(r'^cookies/$', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n \n url(r'^$', 'zds.pages.views.index'),\ndiff --git a/zds/search/urls.py b/zds/search/urls.py\n--- a/zds/search/urls.py\n+++ b/zds/search/urls.py\n@@ -2,7 +2,6 @@\n \n from django.conf.urls import patterns, url\n \n-from . import views\n from haystack.views import search_view_factory\n from zds.search.views import CustomSearchView\n from zds.search.forms import CustomSearchForm\n@@ -15,5 +14,5 @@\n ), name='haystack_search'))\n \n urlpatterns += patterns('',\n- url(r'^opensearch.xml', 'zds.search.views.opensearch')\n+ url(r'^opensearch\\.xml$', 'zds.search.views.opensearch')\n )\n", "issue": "Erreurs 404 non g\u00e9n\u00e9r\u00e9es sur certaines pages\nCertaines pages devraient g\u00e9n\u00e9r\u00e9es des erreurs 404 au lieu d'afficher le contenu des pages.\nJ'ai d\u00e9couvert ce bug en voulant corriger des erreurs dans les liens sur la page des CGU, un oubli de mailto produit actuellement un lien vers http://zestedesavoir.com/pages/cgu/[email protected] qui affiche la page des CGU. Or une erreur 404 devrait \u00eatre g\u00e9n\u00e9r\u00e9e. Peu importe la suite de l'URL cela fonctionne et ce pour plusieurs pages...\nIl manque un `$` \u00e0 la fin de certaines expressions r\u00e9guli\u00e8res dans zds/pages/urls.py.\n\nDe plus le fichier urls.py importe pages.views mais ne l'utilise \u00e0 aucun moment.\nDu coup je me demande si je supprime l'import de views (ligne 5) ou bien si j'utilise par exemple `views.about` au lieu de `'zds.pages.views.about'` ?\n\nJe veux bien bien me charger de faire une PR pour corriger ces bugs :)\n\nEDIT: Je n'arrive pas \u00e0 m'assigner \u00e0 cette issue (comme sugg\u00e9r\u00e9 par CONTRIBUTING.md), probl\u00e8mes de droits ?\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom . import views\n\n\nurlpatterns = patterns('',\n\n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n url(r'^contact/', 'zds.pages.views.contact'),\n url(r'^cgu/', 'zds.pages.views.eula'),\n url(r'^alertes/', 'zds.pages.views.alerts'),\n url(r'^cookies/', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n\n url(r'^$', 'zds.pages.views.index'),\n )\n", "path": "zds/pages/urls.py"}, {"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom . import views\nfrom haystack.views import search_view_factory\nfrom zds.search.views import CustomSearchView\nfrom zds.search.forms import CustomSearchForm\n\nurlpatterns = patterns('haystack.views',\n url(r'^$', search_view_factory(\n view_class=CustomSearchView,\n template='search/search.html',\n form_class=CustomSearchForm\n ), name='haystack_search'))\n\nurlpatterns += patterns('',\n url(r'^opensearch.xml', 'zds.search.views.opensearch')\n )\n", "path": "zds/search/urls.py"}], "after_files": [{"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\n\nurlpatterns = patterns('',\n\n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n url(r'^contact/$', 'zds.pages.views.contact'),\n url(r'^cgu/$', 'zds.pages.views.eula'),\n url(r'^alertes/$', 'zds.pages.views.alerts'),\n url(r'^cookies/$', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n\n url(r'^$', 'zds.pages.views.index'),\n )\n", "path": "zds/pages/urls.py"}, {"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom haystack.views import search_view_factory\nfrom zds.search.views import CustomSearchView\nfrom zds.search.forms import CustomSearchForm\n\nurlpatterns = patterns('haystack.views',\n url(r'^$', search_view_factory(\n view_class=CustomSearchView,\n template='search/search.html',\n form_class=CustomSearchForm\n ), name='haystack_search'))\n\nurlpatterns += patterns('',\n url(r'^opensearch\\.xml$', 'zds.search.views.opensearch')\n )\n", "path": "zds/search/urls.py"}]} | 886 | 405 |
gh_patches_debug_912 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-60 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FileNotFoundError after new update
Getting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts' after the new update.
Not exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.
Python 3.10.6
venv C:\Users\admin\stable-diffusion-webui\venv\Scripts\Python.exe
Launching webui.py...
Loading model from C:\Users\admin\stable-diffusion-webui\model.ckpt
Global Step: 470000
LatentDiffusion: Running in eps-prediction mode
DiffusionWrapper has 859.52 M params.
making attention of type 'vanilla' with 512 in_channels
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
making attention of type 'vanilla' with 512 in_channels
Traceback (most recent call last):
File "C:\Users\admin\stable-diffusion-webui\webui.py", line 135, in <module>
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
File "C:\Users\admin\stable-diffusion-webui\modules\scripts.py", line 32, in load_scripts
for filename in os.listdir(basedir):
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/scripts.py`
Content:
```
1 import os
2 import sys
3 import traceback
4
5 import modules.ui as ui
6 import gradio as gr
7
8 from modules.processing import StableDiffusionProcessing
9
10 class Script:
11 filename = None
12 args_from = None
13 args_to = None
14
15 def title(self):
16 raise NotImplementedError()
17
18 def ui(self, is_img2img):
19 pass
20
21 def run(self, *args):
22 raise NotImplementedError()
23
24 def describe(self):
25 return ""
26
27
28 scripts = []
29
30
31 def load_scripts(basedir):
32 for filename in os.listdir(basedir):
33 path = os.path.join(basedir, filename)
34
35 if not os.path.isfile(path):
36 continue
37
38 with open(path, "r", encoding="utf8") as file:
39 text = file.read()
40
41 from types import ModuleType
42 compiled = compile(text, path, 'exec')
43 module = ModuleType(filename)
44 exec(compiled, module.__dict__)
45
46 for key, script_class in module.__dict__.items():
47 if type(script_class) == type and issubclass(script_class, Script):
48 obj = script_class()
49 obj.filename = path
50
51 scripts.append(obj)
52
53
54 def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
55 try:
56 res = func(*args, **kwargs)
57 return res
58 except Exception:
59 print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
60 print(traceback.format_exc(), file=sys.stderr)
61
62 return default
63
64
65 def setup_ui(is_img2img):
66 titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in scripts]
67
68 dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
69
70 inputs = [dropdown]
71
72 for script in scripts:
73 script.args_from = len(inputs)
74 controls = script.ui(is_img2img)
75
76 for control in controls:
77 control.visible = False
78
79 inputs += controls
80 script.args_to = len(inputs)
81
82 def select_script(index):
83 if index > 0:
84 script = scripts[index-1]
85 args_from = script.args_from
86 args_to = script.args_to
87 else:
88 args_from = 0
89 args_to = 0
90
91 return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
92
93 dropdown.change(
94 fn=select_script,
95 inputs=[dropdown],
96 outputs=inputs
97 )
98
99 return inputs
100
101
102 def run(p: StableDiffusionProcessing, *args):
103 script_index = args[0] - 1
104
105 if script_index < 0 or script_index >= len(scripts):
106 return None
107
108 script = scripts[script_index]
109
110 script_args = args[script.args_from:script.args_to]
111 processed = script.run(p, *script_args)
112
113 return processed
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/scripts.py b/modules/scripts.py
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -29,6 +29,9 @@
def load_scripts(basedir):
+ if not os.path.exists(basedir):
+ return
+
for filename in os.listdir(basedir):
path = os.path.join(basedir, filename)
| {"golden_diff": "diff --git a/modules/scripts.py b/modules/scripts.py\n--- a/modules/scripts.py\n+++ b/modules/scripts.py\n@@ -29,6 +29,9 @@\n \r\n \r\n def load_scripts(basedir):\r\n+ if not os.path.exists(basedir):\r\n+ return\r\n+\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\n", "issue": "FileNotFoundError after new update\nGetting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\admin\\\\stable-diffusion-webui\\\\scripts' after the new update. \r\n\r\nNot exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.\r\n\r\nPython 3.10.6\r\nvenv C:\\Users\\admin\\stable-diffusion-webui\\venv\\Scripts\\Python.exe\r\nLaunching webui.py...\r\nLoading model from C:\\Users\\admin\\stable-diffusion-webui\\model.ckpt\r\nGlobal Step: 470000\r\nLatentDiffusion: Running in eps-prediction mode\r\nDiffusionWrapper has 859.52 M params.\r\nmaking attention of type 'vanilla' with 512 in_channels\r\nWorking with z of shape (1, 4, 32, 32) = 4096 dimensions.\r\nmaking attention of type 'vanilla' with 512 in_channels\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\admin\\stable-diffusion-webui\\webui.py\", line 135, in <module>\r\n modules.scripts.load_scripts(os.path.join(script_path, \"scripts\"))\r\n File \"C:\\Users\\admin\\stable-diffusion-webui\\modules\\scripts.py\", line 32, in load_scripts\r\n for filename in os.listdir(basedir):\r\nFileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\admin\\\\stable-diffusion-webui\\\\scripts'\n", "before_files": [{"content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport modules.ui as ui\r\nimport gradio as gr\r\n\r\nfrom modules.processing import StableDiffusionProcessing\r\n\r\nclass Script:\r\n filename = None\r\n args_from = None\r\n args_to = None\r\n\r\n def title(self):\r\n raise NotImplementedError()\r\n\r\n def ui(self, is_img2img):\r\n pass\r\n\r\n def run(self, *args):\r\n raise NotImplementedError()\r\n\r\n def describe(self):\r\n return \"\"\r\n\r\n\r\nscripts = []\r\n\r\n\r\ndef load_scripts(basedir):\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\r\n\r\n if not os.path.isfile(path):\r\n continue\r\n\r\n with open(path, \"r\", encoding=\"utf8\") as file:\r\n text = file.read()\r\n\r\n from types import ModuleType\r\n compiled = compile(text, path, 'exec')\r\n module = ModuleType(filename)\r\n exec(compiled, module.__dict__)\r\n\r\n for key, script_class in module.__dict__.items():\r\n if type(script_class) == type and issubclass(script_class, Script):\r\n obj = script_class()\r\n obj.filename = path\r\n\r\n scripts.append(obj)\r\n\r\n\r\ndef wrap_call(func, filename, funcname, *args, default=None, **kwargs):\r\n try:\r\n res = func(*args, **kwargs)\r\n return res\r\n except Exception:\r\n print(f\"Error calling: {filename}/{funcname}\", file=sys.stderr)\r\n print(traceback.format_exc(), file=sys.stderr)\r\n\r\n return default\r\n\r\n\r\ndef setup_ui(is_img2img):\r\n titles = [wrap_call(script.title, script.filename, \"title\") or f\"{script.filename} [error]\" for script in scripts]\r\n\r\n dropdown = gr.Dropdown(label=\"Script\", choices=[\"None\"] + titles, value=\"None\", type=\"index\")\r\n\r\n inputs = [dropdown]\r\n\r\n for script in scripts:\r\n script.args_from = len(inputs)\r\n controls = script.ui(is_img2img)\r\n\r\n for control in controls:\r\n control.visible = False\r\n\r\n inputs += controls\r\n script.args_to = len(inputs)\r\n\r\n def select_script(index):\r\n if index > 0:\r\n script = scripts[index-1]\r\n args_from = script.args_from\r\n args_to = script.args_to\r\n else:\r\n args_from = 0\r\n args_to = 0\r\n\r\n return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]\r\n\r\n dropdown.change(\r\n fn=select_script,\r\n inputs=[dropdown],\r\n outputs=inputs\r\n )\r\n\r\n return inputs\r\n\r\n\r\ndef run(p: StableDiffusionProcessing, *args):\r\n script_index = args[0] - 1\r\n\r\n if script_index < 0 or script_index >= len(scripts):\r\n return None\r\n\r\n script = scripts[script_index]\r\n\r\n script_args = args[script.args_from:script.args_to]\r\n processed = script.run(p, *script_args)\r\n\r\n return processed\r\n", "path": "modules/scripts.py"}], "after_files": [{"content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport modules.ui as ui\r\nimport gradio as gr\r\n\r\nfrom modules.processing import StableDiffusionProcessing\r\n\r\nclass Script:\r\n filename = None\r\n args_from = None\r\n args_to = None\r\n\r\n def title(self):\r\n raise NotImplementedError()\r\n\r\n def ui(self, is_img2img):\r\n pass\r\n\r\n def run(self, *args):\r\n raise NotImplementedError()\r\n\r\n def describe(self):\r\n return \"\"\r\n\r\n\r\nscripts = []\r\n\r\n\r\ndef load_scripts(basedir):\r\n if not os.path.exists(basedir):\r\n return\r\n\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\r\n\r\n if not os.path.isfile(path):\r\n continue\r\n\r\n with open(path, \"r\", encoding=\"utf8\") as file:\r\n text = file.read()\r\n\r\n from types import ModuleType\r\n compiled = compile(text, path, 'exec')\r\n module = ModuleType(filename)\r\n exec(compiled, module.__dict__)\r\n\r\n for key, script_class in module.__dict__.items():\r\n if type(script_class) == type and issubclass(script_class, Script):\r\n obj = script_class()\r\n obj.filename = path\r\n\r\n scripts.append(obj)\r\n\r\n\r\ndef wrap_call(func, filename, funcname, *args, default=None, **kwargs):\r\n try:\r\n res = func(*args, **kwargs)\r\n return res\r\n except Exception:\r\n print(f\"Error calling: {filename}/{funcname}\", file=sys.stderr)\r\n print(traceback.format_exc(), file=sys.stderr)\r\n\r\n return default\r\n\r\n\r\ndef setup_ui(is_img2img):\r\n titles = [wrap_call(script.title, script.filename, \"title\") or f\"{script.filename} [error]\" for script in scripts]\r\n\r\n dropdown = gr.Dropdown(label=\"Script\", choices=[\"None\"] + titles, value=\"None\", type=\"index\")\r\n\r\n inputs = [dropdown]\r\n\r\n for script in scripts:\r\n script.args_from = len(inputs)\r\n controls = script.ui(is_img2img)\r\n\r\n for control in controls:\r\n control.visible = False\r\n\r\n inputs += controls\r\n script.args_to = len(inputs)\r\n\r\n def select_script(index):\r\n if index > 0:\r\n script = scripts[index-1]\r\n args_from = script.args_from\r\n args_to = script.args_to\r\n else:\r\n args_from = 0\r\n args_to = 0\r\n\r\n return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]\r\n\r\n dropdown.change(\r\n fn=select_script,\r\n inputs=[dropdown],\r\n outputs=inputs\r\n )\r\n\r\n return inputs\r\n\r\n\r\ndef run(p: StableDiffusionProcessing, *args):\r\n script_index = args[0] - 1\r\n\r\n if script_index < 0 or script_index >= len(scripts):\r\n return None\r\n\r\n script = scripts[script_index]\r\n\r\n script_args = args[script.args_from:script.args_to]\r\n processed = script.run(p, *script_args)\r\n\r\n return processed\r\n", "path": "modules/scripts.py"}]} | 1,520 | 83 |
gh_patches_debug_12710 | rasdani/github-patches | git_diff | autorope__donkeycar-273 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support WIFI network that does not have internet access
REF: https://github.com/wroscoe/donkey/blob/dev/donkeycar/util/web.py
The system determines its IP address using a ping to 8.8.8.8
This approach fails when the WIFI network does not have internet access.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `donkeycar/util/web.py`
Content:
```
1 import socket
2
3 def get_ip_address():
4 ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
5 [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
6 [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
7 return ip
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/donkeycar/util/web.py b/donkeycar/util/web.py
--- a/donkeycar/util/web.py
+++ b/donkeycar/util/web.py
@@ -1,7 +1,10 @@
import socket
def get_ip_address():
- ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
- [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
- [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
- return ip
+ try:
+ ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
+ [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
+ [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
+ return ip
+ except OSError: #occurs when cannot connect to '8.8.8.8'
+ return "127.0.0.1" #loopback
\ No newline at end of file
| {"golden_diff": "diff --git a/donkeycar/util/web.py b/donkeycar/util/web.py\n--- a/donkeycar/util/web.py\n+++ b/donkeycar/util/web.py\n@@ -1,7 +1,10 @@\n import socket\n \n def get_ip_address():\n- ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n- [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n- [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n- return ip\n+ try:\n+ ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n+ [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n+ [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n+ return ip\n+ except OSError: #occurs when cannot connect to '8.8.8.8' \n+ return \"127.0.0.1\" #loopback\n\\ No newline at end of file\n", "issue": "Support WIFI network that does not have internet access\nREF: https://github.com/wroscoe/donkey/blob/dev/donkeycar/util/web.py\r\n\r\nThe system determines its IP address using a ping to 8.8.8.8\r\nThis approach fails when the WIFI network does not have internet access.\r\n\r\n\r\n\n", "before_files": [{"content": "import socket\n\ndef get_ip_address():\n ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n return ip\n", "path": "donkeycar/util/web.py"}], "after_files": [{"content": "import socket\n\ndef get_ip_address():\n try:\n ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n return ip\n except OSError: #occurs when cannot connect to '8.8.8.8' \n return \"127.0.0.1\" #loopback", "path": "donkeycar/util/web.py"}]} | 439 | 311 |
gh_patches_debug_26766 | rasdani/github-patches | git_diff | modin-project__modin-1045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Capitalization of "S" in "S3://" results in inconsistent behaviors when reading from S3 path
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux
- **Modin installed from (source or binary)**: binary
- **Modin version**: 0.7.0
- **Python version**: 3.6.8
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
Reading data from a S3 path, e.g. `read_csv`, `read_json`, behaves differently based on the capitalization of "S" in the path. See below code example.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
```
import pandas as pd
import ray
import modin.pandas as mpd
filepath = "s3://my-bucket/data/traffic.json"
filepath_2 = "s3://my-bucket/data/BikeSharingDaily.csv"
filepath_3 = "S3://my-bucket/data/BikeSharingDaily.csv"
# working
df_native = pd.read_json(filepath, lines=True)
df_native_2 = pd.read_csv(filepath_2)
# not working (FileNotFoundError: [Errno 2] No such file or directory: 's3://my-bucket/data/traffic.json')
df_modin = mpd.read_json(filepath, lines=True)
# working (but it prints, defaulting to pandas implementation)
df_modin_2 = mpd.read_csv(filepath_2)
# working (no additional print)
df_modin_3 = mpd.read_csv(filepath_3)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/engines/base/io/file_reader.py`
Content:
```
1 import os
2 import re
3
4 S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
5 NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
6
7
8 class FileReader:
9 frame_cls = None
10 frame_partition_cls = None
11 query_compiler_cls = None
12
13 @classmethod
14 def get_path(cls, file_path):
15 if S3_ADDRESS_REGEX.search(file_path):
16 return file_path
17 else:
18 return os.path.abspath(file_path)
19
20 @classmethod
21 def file_open(cls, file_path, mode="rb", compression="infer"):
22 if isinstance(file_path, str):
23 match = S3_ADDRESS_REGEX.search(file_path)
24 if match:
25 import s3fs as S3FS
26 from botocore.exceptions import NoCredentialsError
27
28 s3fs = S3FS.S3FileSystem(anon=False)
29 try:
30 return s3fs.open(file_path)
31 except NoCredentialsError:
32 s3fs = S3FS.S3FileSystem(anon=True)
33 return s3fs.open(file_path)
34 elif compression == "gzip":
35 import gzip
36
37 return gzip.open(file_path, mode=mode)
38 elif compression == "bz2":
39 import bz2
40
41 return bz2.BZ2File(file_path, mode=mode)
42 elif compression == "xz":
43 import lzma
44
45 return lzma.LZMAFile(file_path, mode=mode)
46 elif compression == "zip":
47 import zipfile
48
49 zf = zipfile.ZipFile(file_path, mode=mode.replace("b", ""))
50 if zf.mode == "w":
51 return zf
52 elif zf.mode == "r":
53 zip_names = zf.namelist()
54 if len(zip_names) == 1:
55 f = zf.open(zip_names.pop())
56 return f
57 elif len(zip_names) == 0:
58 raise ValueError(
59 "Zero files found in ZIP file {}".format(file_path)
60 )
61 else:
62 raise ValueError(
63 "Multiple files found in ZIP file."
64 " Only one file per ZIP: {}".format(zip_names)
65 )
66
67 return open(file_path, mode=mode)
68
69 @classmethod
70 def file_size(cls, f):
71 cur_pos = f.tell()
72 f.seek(0, os.SEEK_END)
73 size = f.tell()
74 f.seek(cur_pos, os.SEEK_SET)
75 return size
76
77 @classmethod
78 def file_exists(cls, file_path):
79 if isinstance(file_path, str):
80 match = S3_ADDRESS_REGEX.search(file_path)
81 if match:
82 import s3fs as S3FS
83 from botocore.exceptions import NoCredentialsError
84
85 s3fs = S3FS.S3FileSystem(anon=False)
86 exists = False
87 try:
88 exists = s3fs.exists(file_path) or exists
89 except NoCredentialsError:
90 pass
91 s3fs = S3FS.S3FileSystem(anon=True)
92 return exists or s3fs.exists(file_path)
93 return os.path.exists(file_path)
94
95 @classmethod
96 def deploy(cls, func, args, num_return_vals):
97 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
98
99 def parse(self, func, args, num_return_vals):
100 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
101
102 @classmethod
103 def materialize(cls, obj_id):
104 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/engines/base/io/file_reader.py b/modin/engines/base/io/file_reader.py
--- a/modin/engines/base/io/file_reader.py
+++ b/modin/engines/base/io/file_reader.py
@@ -1,7 +1,7 @@
import os
import re
-S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
+S3_ADDRESS_REGEX = re.compile("[sS]3://(.*?)/(.*)")
NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
@@ -21,7 +21,9 @@
def file_open(cls, file_path, mode="rb", compression="infer"):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
- if match:
+ if match is not None:
+ if file_path[0] == "S":
+ file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
@@ -78,7 +80,9 @@
def file_exists(cls, file_path):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
- if match:
+ if match is not None:
+ if file_path[0] == "S":
+ file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
| {"golden_diff": "diff --git a/modin/engines/base/io/file_reader.py b/modin/engines/base/io/file_reader.py\n--- a/modin/engines/base/io/file_reader.py\n+++ b/modin/engines/base/io/file_reader.py\n@@ -1,7 +1,7 @@\n import os\n import re\n \n-S3_ADDRESS_REGEX = re.compile(\"s3://(.*?)/(.*)\")\n+S3_ADDRESS_REGEX = re.compile(\"[sS]3://(.*?)/(.*)\")\n NOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n \n \n@@ -21,7 +21,9 @@\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n- if match:\n+ if match is not None:\n+ if file_path[0] == \"S\":\n+ file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n \n@@ -78,7 +80,9 @@\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n- if match:\n+ if match is not None:\n+ if file_path[0] == \"S\":\n+ file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n", "issue": "Capitalization of \"S\" in \"S3://\" results in inconsistent behaviors when reading from S3 path\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux\r\n- **Modin installed from (source or binary)**: binary\r\n- **Modin version**: 0.7.0\r\n- **Python version**: 3.6.8\r\n- **Exact command to reproduce**: \r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nReading data from a S3 path, e.g. `read_csv`, `read_json`, behaves differently based on the capitalization of \"S\" in the path. See below code example.\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n```\r\nimport pandas as pd\r\nimport ray\r\nimport modin.pandas as mpd\r\n\r\nfilepath = \"s3://my-bucket/data/traffic.json\"\r\nfilepath_2 = \"s3://my-bucket/data/BikeSharingDaily.csv\"\r\nfilepath_3 = \"S3://my-bucket/data/BikeSharingDaily.csv\"\r\n\r\n# working\r\ndf_native = pd.read_json(filepath, lines=True)\r\ndf_native_2 = pd.read_csv(filepath_2)\r\n\r\n# not working (FileNotFoundError: [Errno 2] No such file or directory: 's3://my-bucket/data/traffic.json')\r\ndf_modin = mpd.read_json(filepath, lines=True)\r\n\r\n# working (but it prints, defaulting to pandas implementation)\r\ndf_modin_2 = mpd.read_csv(filepath_2)\r\n\r\n# working (no additional print)\r\ndf_modin_3 = mpd.read_csv(filepath_3)\r\n```\n", "before_files": [{"content": "import os\nimport re\n\nS3_ADDRESS_REGEX = re.compile(\"s3://(.*?)/(.*)\")\nNOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n\n\nclass FileReader:\n frame_cls = None\n frame_partition_cls = None\n query_compiler_cls = None\n\n @classmethod\n def get_path(cls, file_path):\n if S3_ADDRESS_REGEX.search(file_path):\n return file_path\n else:\n return os.path.abspath(file_path)\n\n @classmethod\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match:\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n try:\n return s3fs.open(file_path)\n except NoCredentialsError:\n s3fs = S3FS.S3FileSystem(anon=True)\n return s3fs.open(file_path)\n elif compression == \"gzip\":\n import gzip\n\n return gzip.open(file_path, mode=mode)\n elif compression == \"bz2\":\n import bz2\n\n return bz2.BZ2File(file_path, mode=mode)\n elif compression == \"xz\":\n import lzma\n\n return lzma.LZMAFile(file_path, mode=mode)\n elif compression == \"zip\":\n import zipfile\n\n zf = zipfile.ZipFile(file_path, mode=mode.replace(\"b\", \"\"))\n if zf.mode == \"w\":\n return zf\n elif zf.mode == \"r\":\n zip_names = zf.namelist()\n if len(zip_names) == 1:\n f = zf.open(zip_names.pop())\n return f\n elif len(zip_names) == 0:\n raise ValueError(\n \"Zero files found in ZIP file {}\".format(file_path)\n )\n else:\n raise ValueError(\n \"Multiple files found in ZIP file.\"\n \" Only one file per ZIP: {}\".format(zip_names)\n )\n\n return open(file_path, mode=mode)\n\n @classmethod\n def file_size(cls, f):\n cur_pos = f.tell()\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.seek(cur_pos, os.SEEK_SET)\n return size\n\n @classmethod\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match:\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n exists = False\n try:\n exists = s3fs.exists(file_path) or exists\n except NoCredentialsError:\n pass\n s3fs = S3FS.S3FileSystem(anon=True)\n return exists or s3fs.exists(file_path)\n return os.path.exists(file_path)\n\n @classmethod\n def deploy(cls, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n def parse(self, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n @classmethod\n def materialize(cls, obj_id):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n", "path": "modin/engines/base/io/file_reader.py"}], "after_files": [{"content": "import os\nimport re\n\nS3_ADDRESS_REGEX = re.compile(\"[sS]3://(.*?)/(.*)\")\nNOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n\n\nclass FileReader:\n frame_cls = None\n frame_partition_cls = None\n query_compiler_cls = None\n\n @classmethod\n def get_path(cls, file_path):\n if S3_ADDRESS_REGEX.search(file_path):\n return file_path\n else:\n return os.path.abspath(file_path)\n\n @classmethod\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match is not None:\n if file_path[0] == \"S\":\n file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n try:\n return s3fs.open(file_path)\n except NoCredentialsError:\n s3fs = S3FS.S3FileSystem(anon=True)\n return s3fs.open(file_path)\n elif compression == \"gzip\":\n import gzip\n\n return gzip.open(file_path, mode=mode)\n elif compression == \"bz2\":\n import bz2\n\n return bz2.BZ2File(file_path, mode=mode)\n elif compression == \"xz\":\n import lzma\n\n return lzma.LZMAFile(file_path, mode=mode)\n elif compression == \"zip\":\n import zipfile\n\n zf = zipfile.ZipFile(file_path, mode=mode.replace(\"b\", \"\"))\n if zf.mode == \"w\":\n return zf\n elif zf.mode == \"r\":\n zip_names = zf.namelist()\n if len(zip_names) == 1:\n f = zf.open(zip_names.pop())\n return f\n elif len(zip_names) == 0:\n raise ValueError(\n \"Zero files found in ZIP file {}\".format(file_path)\n )\n else:\n raise ValueError(\n \"Multiple files found in ZIP file.\"\n \" Only one file per ZIP: {}\".format(zip_names)\n )\n\n return open(file_path, mode=mode)\n\n @classmethod\n def file_size(cls, f):\n cur_pos = f.tell()\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.seek(cur_pos, os.SEEK_SET)\n return size\n\n @classmethod\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match is not None:\n if file_path[0] == \"S\":\n file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n exists = False\n try:\n exists = s3fs.exists(file_path) or exists\n except NoCredentialsError:\n pass\n s3fs = S3FS.S3FileSystem(anon=True)\n return exists or s3fs.exists(file_path)\n return os.path.exists(file_path)\n\n @classmethod\n def deploy(cls, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n def parse(self, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n @classmethod\n def materialize(cls, obj_id):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n", "path": "modin/engines/base/io/file_reader.py"}]} | 1,650 | 330 |
gh_patches_debug_35876 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-127 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add testing agains each feat PT version
## 🚀 Feature
Add a conda setup for testing against all PyTorch feature releases such as 1.4, 1.5, 1.6, ...
### Motivation
have better validation if some functions are not supported in old PT versions
### Pitch
<!-- A clear and concise description of what you want to happen. -->
### Alternatives
use CI action with conda setup, probably no need for pull large docker image
### Additional context
take inspiration from past Conda matrix in PL
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integrations/lightning_models.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import torch
15 from pytorch_lightning import LightningModule
16 from torch.utils.data import Dataset
17
18
19 class RandomDictStringDataset(Dataset):
20
21 def __init__(self, size, length):
22 self.len = length
23 self.data = torch.randn(length, size)
24
25 def __getitem__(self, index):
26 return {"id": str(index), "x": self.data[index]}
27
28 def __len__(self):
29 return self.len
30
31
32 class RandomDataset(Dataset):
33
34 def __init__(self, size, length):
35 self.len = length
36 self.data = torch.randn(length, size)
37
38 def __getitem__(self, index):
39 return self.data[index]
40
41 def __len__(self):
42 return self.len
43
44
45 class BoringModel(LightningModule):
46
47 def __init__(self):
48 """
49 Testing PL Module
50
51 Use as follows:
52 - subclass
53 - modify the behavior for what you want
54
55 class TestModel(BaseTestModel):
56 def training_step(...):
57 # do your own thing
58
59 or:
60
61 model = BaseTestModel()
62 model.training_epoch_end = None
63
64 """
65 super().__init__()
66 self.layer = torch.nn.Linear(32, 2)
67
68 def forward(self, x):
69 return self.layer(x)
70
71 @staticmethod
72 def loss(_, prediction):
73 # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
74 return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
75
76 def step(self, x):
77 x = self(x)
78 out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
79 return out
80
81 def training_step(self, batch, batch_idx):
82 output = self.layer(batch)
83 loss = self.loss(batch, output)
84 return {"loss": loss}
85
86 def training_step_end(self, training_step_outputs):
87 return training_step_outputs
88
89 def training_epoch_end(self, outputs) -> None:
90 torch.stack([x["loss"] for x in outputs]).mean()
91
92 def validation_step(self, batch, batch_idx):
93 output = self.layer(batch)
94 loss = self.loss(batch, output)
95 return {"x": loss}
96
97 def validation_epoch_end(self, outputs) -> None:
98 torch.stack([x['x'] for x in outputs]).mean()
99
100 def test_step(self, batch, batch_idx):
101 output = self.layer(batch)
102 loss = self.loss(batch, output)
103 return {"y": loss}
104
105 def test_epoch_end(self, outputs) -> None:
106 torch.stack([x["y"] for x in outputs]).mean()
107
108 def configure_optimizers(self):
109 optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
110 lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
111 return [optimizer], [lr_scheduler]
112
113 def train_dataloader(self):
114 return torch.utils.data.DataLoader(RandomDataset(32, 64))
115
116 def val_dataloader(self):
117 return torch.utils.data.DataLoader(RandomDataset(32, 64))
118
119 def test_dataloader(self):
120 return torch.utils.data.DataLoader(RandomDataset(32, 64))
121
```
Path: `torchmetrics/utilities/imports.py`
Content:
```
1 from distutils.version import LooseVersion
2
3 import torch
4
5 _TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion("1.4.0")
6 _TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion("1.5.0")
7 _TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion("1.6.0")
8
```
Path: `integrations/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/integrations/__init__.py b/integrations/__init__.py
--- a/integrations/__init__.py
+++ b/integrations/__init__.py
@@ -0,0 +1,3 @@
+from torchmetrics.utilities.imports import _module_available
+
+_PL_AVAILABLE = _module_available('pytorch_lightning')
diff --git a/integrations/lightning_models.py b/integrations/lightning_models.py
--- a/integrations/lightning_models.py
+++ b/integrations/lightning_models.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import torch
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
diff --git a/torchmetrics/utilities/imports.py b/torchmetrics/utilities/imports.py
--- a/torchmetrics/utilities/imports.py
+++ b/torchmetrics/utilities/imports.py
@@ -1,6 +1,64 @@
+# Copyright The PyTorch Lightning team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from distutils.version import LooseVersion
+from importlib import import_module
+from importlib.util import find_spec
import torch
+from pkg_resources import DistributionNotFound
+
+
+def _module_available(module_path: str) -> bool:
+ """
+ Check if a path is available in your environment
+
+ >>> _module_available('os')
+ True
+ >>> _module_available('bla.bla')
+ False
+ """
+ try:
+ return find_spec(module_path) is not None
+ except AttributeError:
+ # Python 3.6
+ return False
+ except ModuleNotFoundError:
+ # Python 3.7+
+ return False
+
+
+def _compare_version(package: str, op, version) -> bool:
+ """
+ Compare package version with some requirements
+
+ >>> import operator
+ >>> _compare_version("torch", operator.ge, "0.1")
+ True
+ """
+ try:
+ pkg = import_module(package)
+ except (ModuleNotFoundError, DistributionNotFound):
+ return False
+ try:
+ pkg_version = LooseVersion(pkg.__version__)
+ except AttributeError:
+ return False
+ if not (hasattr(pkg_version, "vstring") and hasattr(pkg_version, "version")):
+ # this is mock by sphinx, so it shall return True ro generate all summaries
+ return True
+ return op(pkg_version, LooseVersion(version))
+
_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion("1.4.0")
_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion("1.5.0")
| {"golden_diff": "diff --git a/integrations/__init__.py b/integrations/__init__.py\n--- a/integrations/__init__.py\n+++ b/integrations/__init__.py\n@@ -0,0 +1,3 @@\n+from torchmetrics.utilities.imports import _module_available\n+\n+_PL_AVAILABLE = _module_available('pytorch_lightning')\ndiff --git a/integrations/lightning_models.py b/integrations/lightning_models.py\n--- a/integrations/lightning_models.py\n+++ b/integrations/lightning_models.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\n import torch\n from pytorch_lightning import LightningModule\n from torch.utils.data import Dataset\ndiff --git a/torchmetrics/utilities/imports.py b/torchmetrics/utilities/imports.py\n--- a/torchmetrics/utilities/imports.py\n+++ b/torchmetrics/utilities/imports.py\n@@ -1,6 +1,64 @@\n+# Copyright The PyTorch Lightning team.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n from distutils.version import LooseVersion\n+from importlib import import_module\n+from importlib.util import find_spec\n \n import torch\n+from pkg_resources import DistributionNotFound\n+\n+\n+def _module_available(module_path: str) -> bool:\n+ \"\"\"\n+ Check if a path is available in your environment\n+\n+ >>> _module_available('os')\n+ True\n+ >>> _module_available('bla.bla')\n+ False\n+ \"\"\"\n+ try:\n+ return find_spec(module_path) is not None\n+ except AttributeError:\n+ # Python 3.6\n+ return False\n+ except ModuleNotFoundError:\n+ # Python 3.7+\n+ return False\n+\n+\n+def _compare_version(package: str, op, version) -> bool:\n+ \"\"\"\n+ Compare package version with some requirements\n+\n+ >>> import operator\n+ >>> _compare_version(\"torch\", operator.ge, \"0.1\")\n+ True\n+ \"\"\"\n+ try:\n+ pkg = import_module(package)\n+ except (ModuleNotFoundError, DistributionNotFound):\n+ return False\n+ try:\n+ pkg_version = LooseVersion(pkg.__version__)\n+ except AttributeError:\n+ return False\n+ if not (hasattr(pkg_version, \"vstring\") and hasattr(pkg_version, \"version\")):\n+ # this is mock by sphinx, so it shall return True ro generate all summaries\n+ return True\n+ return op(pkg_version, LooseVersion(version))\n+\n \n _TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n _TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n", "issue": "Add testing agains each feat PT version\n## \ud83d\ude80 Feature\r\n\r\nAdd a conda setup for testing against all PyTorch feature releases such as 1.4, 1.5, 1.6, ...\r\n\r\n### Motivation\r\n\r\nhave better validation if some functions are not supported in old PT versions\r\n\r\n### Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Alternatives\r\n\r\nuse CI action with conda setup, probably no need for pull large docker image\r\n\r\n### Additional context\r\n\r\ntake inspiration from past Conda matrix in PL\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.utils.data import Dataset\n\n\nclass RandomDictStringDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return {\"id\": str(index), \"x\": self.data[index]}\n\n def __len__(self):\n return self.len\n\n\nclass RandomDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\n\nclass BoringModel(LightningModule):\n\n def __init__(self):\n \"\"\"\n Testing PL Module\n\n Use as follows:\n - subclass\n - modify the behavior for what you want\n\n class TestModel(BaseTestModel):\n def training_step(...):\n # do your own thing\n\n or:\n\n model = BaseTestModel()\n model.training_epoch_end = None\n\n \"\"\"\n super().__init__()\n self.layer = torch.nn.Linear(32, 2)\n\n def forward(self, x):\n return self.layer(x)\n\n @staticmethod\n def loss(_, prediction):\n # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls\n return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))\n\n def step(self, x):\n x = self(x)\n out = torch.nn.functional.mse_loss(x, torch.ones_like(x))\n return out\n\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_step_end(self, training_step_outputs):\n return training_step_outputs\n\n def training_epoch_end(self, outputs) -> None:\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"x\": loss}\n\n def validation_epoch_end(self, outputs) -> None:\n torch.stack([x['x'] for x in outputs]).mean()\n\n def test_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"y\": loss}\n\n def test_epoch_end(self, outputs) -> None:\n torch.stack([x[\"y\"] for x in outputs]).mean()\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def test_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n", "path": "integrations/lightning_models.py"}, {"content": "from distutils.version import LooseVersion\n\nimport torch\n\n_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n_TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion(\"1.6.0\")\n", "path": "torchmetrics/utilities/imports.py"}, {"content": "", "path": "integrations/__init__.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.utils.data import Dataset\n\n\nclass RandomDictStringDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return {\"id\": str(index), \"x\": self.data[index]}\n\n def __len__(self):\n return self.len\n\n\nclass RandomDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\n\nclass BoringModel(LightningModule):\n\n def __init__(self):\n \"\"\"\n Testing PL Module\n\n Use as follows:\n - subclass\n - modify the behavior for what you want\n\n class TestModel(BaseTestModel):\n def training_step(...):\n # do your own thing\n\n or:\n\n model = BaseTestModel()\n model.training_epoch_end = None\n\n \"\"\"\n super().__init__()\n self.layer = torch.nn.Linear(32, 2)\n\n def forward(self, x):\n return self.layer(x)\n\n @staticmethod\n def loss(_, prediction):\n # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls\n return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))\n\n def step(self, x):\n x = self(x)\n out = torch.nn.functional.mse_loss(x, torch.ones_like(x))\n return out\n\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_step_end(self, training_step_outputs):\n return training_step_outputs\n\n def training_epoch_end(self, outputs) -> None:\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"x\": loss}\n\n def validation_epoch_end(self, outputs) -> None:\n torch.stack([x['x'] for x in outputs]).mean()\n\n def test_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"y\": loss}\n\n def test_epoch_end(self, outputs) -> None:\n torch.stack([x[\"y\"] for x in outputs]).mean()\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def test_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n", "path": "integrations/lightning_models.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom distutils.version import LooseVersion\nfrom importlib import import_module\nfrom importlib.util import find_spec\n\nimport torch\nfrom pkg_resources import DistributionNotFound\n\n\ndef _module_available(module_path: str) -> bool:\n \"\"\"\n Check if a path is available in your environment\n\n >>> _module_available('os')\n True\n >>> _module_available('bla.bla')\n False\n \"\"\"\n try:\n return find_spec(module_path) is not None\n except AttributeError:\n # Python 3.6\n return False\n except ModuleNotFoundError:\n # Python 3.7+\n return False\n\n\ndef _compare_version(package: str, op, version) -> bool:\n \"\"\"\n Compare package version with some requirements\n\n >>> import operator\n >>> _compare_version(\"torch\", operator.ge, \"0.1\")\n True\n \"\"\"\n try:\n pkg = import_module(package)\n except (ModuleNotFoundError, DistributionNotFound):\n return False\n try:\n pkg_version = LooseVersion(pkg.__version__)\n except AttributeError:\n return False\n if not (hasattr(pkg_version, \"vstring\") and hasattr(pkg_version, \"version\")):\n # this is mock by sphinx, so it shall return True ro generate all summaries\n return True\n return op(pkg_version, LooseVersion(version))\n\n\n_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n_TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion(\"1.6.0\")\n", "path": "torchmetrics/utilities/imports.py"}, {"content": "from torchmetrics.utilities.imports import _module_available\n\n_PL_AVAILABLE = _module_available('pytorch_lightning')\n", "path": "integrations/__init__.py"}]} | 1,593 | 744 |
gh_patches_debug_3999 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-743 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Too many JSON files error
https://mne.discourse.group/t/mne-bids-pipeline-too-many-json-files-error/6436
> This is a MEG dataset which I converted to BIDS format using mne-bids. The json files which appear to cause the trouble are [_beh.json] sidecar files for the behavioural data I saved to the beh/ subfolder like so:
>
> |MNE-BIDS_data/
> |— README
> |— dataset_description.json
> |— participants.json
> |— participants.tsv
> |— sub-01/
> |------ sub-01_scans.tsv
> |------ beh/
> |--------- sub-01_task-main_run-01_beh.json
> |--------- sub-01_task-main_run-01_beh.tsv
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mne_bids_pipeline/steps/init/_02_find_empty_room.py`
Content:
```
1 """Find empty-room data matches."""
2
3 from types import SimpleNamespace
4 from typing import Dict, Optional
5
6 from mne.utils import _pl
7 from mne_bids import BIDSPath
8
9 from ..._config_utils import (
10 get_datatype,
11 get_task,
12 get_sessions,
13 get_subjects,
14 get_runs,
15 )
16 from ..._io import _empty_room_match_path, _write_json
17 from ..._logging import gen_log_kwargs, logger
18 from ..._run import _update_for_splits, failsafe_run, save_logs
19
20
21 def get_input_fnames_find_empty_room(
22 *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace
23 ) -> Dict[str, BIDSPath]:
24 """Get paths of files required by filter_data function."""
25 bids_path_in = BIDSPath(
26 subject=subject,
27 run=run,
28 session=session,
29 task=cfg.task,
30 acquisition=cfg.acq,
31 recording=cfg.rec,
32 space=cfg.space,
33 datatype=cfg.datatype,
34 processing=cfg.proc,
35 root=cfg.bids_root,
36 check=False,
37 )
38 in_files: Dict[str, BIDSPath] = dict()
39 in_files[f"raw_run-{run}"] = bids_path_in
40 _update_for_splits(in_files, f"raw_run-{run}", single=True)
41 if hasattr(bids_path_in, "find_matching_sidecar"):
42 in_files["sidecar"] = (
43 bids_path_in.copy()
44 .update(datatype=None)
45 .find_matching_sidecar(extension=".json")
46 )
47 try:
48 fname = bids_path_in.find_empty_room(use_sidecar_only=True)
49 except Exception:
50 fname = None
51 if fname is None and hasattr(bids_path_in, "get_empty_room_candidates"):
52 for ci, path in enumerate(bids_path_in.get_empty_room_candidates()):
53 in_files[f"empty_room_candidate_{ci}"] = path
54 return in_files
55
56
57 @failsafe_run(
58 get_input_fnames=get_input_fnames_find_empty_room,
59 )
60 def find_empty_room(
61 *,
62 cfg: SimpleNamespace,
63 exec_params: SimpleNamespace,
64 subject: str,
65 session: Optional[str],
66 run: Optional[str],
67 in_files: Dict[str, BIDSPath],
68 ) -> Dict[str, BIDSPath]:
69 raw_path = in_files.pop(f"raw_run-{run}")
70 in_files.pop("sidecar", None)
71 try:
72 fname = raw_path.find_empty_room(use_sidecar_only=True)
73 except (FileNotFoundError, AssertionError, ValueError):
74 fname = ""
75 if fname is None:
76 # sidecar is very fast and checking all can be slow (seconds), so only
77 # log when actually looking through files
78 ending = "empty-room files"
79 if len(in_files): # MNE-BIDS < 0.12 missing get_empty_room_candidates
80 ending = f"{len(in_files)} empty-room file{_pl(in_files)}"
81 msg = f"Nearest-date matching {ending}"
82 logger.info(**gen_log_kwargs(message=msg))
83 try:
84 fname = raw_path.find_empty_room()
85 except (
86 ValueError, # non-MEG data
87 AssertionError, # MNE-BIDS check assert exists()
88 FileNotFoundError,
89 ): # MNE-BIDS PR-1080 exists()
90 fname = None
91 in_files.clear() # MNE-BIDS find_empty_room should have looked at all
92 elif fname == "":
93 fname = None # not downloaded, or EEG data
94 elif not fname.fpath.exists():
95 fname = None # path found by sidecar but does not exist
96 out_files = dict()
97 out_files["empty_room_match"] = _empty_room_match_path(raw_path, cfg)
98 _write_json(out_files["empty_room_match"], dict(fname=fname))
99 return out_files
100
101
102 def get_config(
103 *,
104 config,
105 ) -> SimpleNamespace:
106 cfg = SimpleNamespace(
107 proc=config.proc,
108 task=get_task(config),
109 datatype=get_datatype(config),
110 acq=config.acq,
111 rec=config.rec,
112 space=config.space,
113 bids_root=config.bids_root,
114 deriv_root=config.deriv_root,
115 )
116 return cfg
117
118
119 def main(*, config) -> None:
120 """Run find_empty_room."""
121 if not config.process_empty_room:
122 msg = "Skipping, process_empty_room is set to False …"
123 logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
124 return
125 if get_datatype(config) != "meg":
126 msg = "Skipping, empty-room data only relevant for MEG …"
127 logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
128 return
129 # This will be I/O bound if the sidecar is not complete, so let's not run
130 # in parallel.
131 logs = list()
132 for subject in get_subjects(config):
133 if config.use_maxwell_filter:
134 run = config.mf_reference_run
135 else:
136 run = get_runs(config=config, subject=subject)[0]
137 logs.append(
138 find_empty_room(
139 cfg=get_config(
140 config=config,
141 ),
142 exec_params=config.exec_params,
143 subject=subject,
144 session=get_sessions(config)[0],
145 run=run,
146 )
147 )
148 save_logs(config=config, logs=logs)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -41,7 +41,7 @@
if hasattr(bids_path_in, "find_matching_sidecar"):
in_files["sidecar"] = (
bids_path_in.copy()
- .update(datatype=None)
+ .update(datatype=None, suffix="meg")
.find_matching_sidecar(extension=".json")
)
try:
| {"golden_diff": "diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py\n--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py\n+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py\n@@ -41,7 +41,7 @@\n if hasattr(bids_path_in, \"find_matching_sidecar\"):\n in_files[\"sidecar\"] = (\n bids_path_in.copy()\n- .update(datatype=None)\n+ .update(datatype=None, suffix=\"meg\")\n .find_matching_sidecar(extension=\".json\")\n )\n try:\n", "issue": "BUG: Too many JSON files error\nhttps://mne.discourse.group/t/mne-bids-pipeline-too-many-json-files-error/6436\r\n\r\n> This is a MEG dataset which I converted to BIDS format using mne-bids. The json files which appear to cause the trouble are [_beh.json] sidecar files for the behavioural data I saved to the beh/ subfolder like so:\r\n>\r\n> |MNE-BIDS_data/\r\n> |\u2014 README\r\n> |\u2014 dataset_description.json\r\n> |\u2014 participants.json\r\n> |\u2014 participants.tsv\r\n> |\u2014 sub-01/\r\n> |------ sub-01_scans.tsv\r\n> |------ beh/\r\n> |--------- sub-01_task-main_run-01_beh.json\r\n> |--------- sub-01_task-main_run-01_beh.tsv\n", "before_files": [{"content": "\"\"\"Find empty-room data matches.\"\"\"\n\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional\n\nfrom mne.utils import _pl\nfrom mne_bids import BIDSPath\n\nfrom ..._config_utils import (\n get_datatype,\n get_task,\n get_sessions,\n get_subjects,\n get_runs,\n)\nfrom ..._io import _empty_room_match_path, _write_json\nfrom ..._logging import gen_log_kwargs, logger\nfrom ..._run import _update_for_splits, failsafe_run, save_logs\n\n\ndef get_input_fnames_find_empty_room(\n *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace\n) -> Dict[str, BIDSPath]:\n \"\"\"Get paths of files required by filter_data function.\"\"\"\n bids_path_in = BIDSPath(\n subject=subject,\n run=run,\n session=session,\n task=cfg.task,\n acquisition=cfg.acq,\n recording=cfg.rec,\n space=cfg.space,\n datatype=cfg.datatype,\n processing=cfg.proc,\n root=cfg.bids_root,\n check=False,\n )\n in_files: Dict[str, BIDSPath] = dict()\n in_files[f\"raw_run-{run}\"] = bids_path_in\n _update_for_splits(in_files, f\"raw_run-{run}\", single=True)\n if hasattr(bids_path_in, \"find_matching_sidecar\"):\n in_files[\"sidecar\"] = (\n bids_path_in.copy()\n .update(datatype=None)\n .find_matching_sidecar(extension=\".json\")\n )\n try:\n fname = bids_path_in.find_empty_room(use_sidecar_only=True)\n except Exception:\n fname = None\n if fname is None and hasattr(bids_path_in, \"get_empty_room_candidates\"):\n for ci, path in enumerate(bids_path_in.get_empty_room_candidates()):\n in_files[f\"empty_room_candidate_{ci}\"] = path\n return in_files\n\n\n@failsafe_run(\n get_input_fnames=get_input_fnames_find_empty_room,\n)\ndef find_empty_room(\n *,\n cfg: SimpleNamespace,\n exec_params: SimpleNamespace,\n subject: str,\n session: Optional[str],\n run: Optional[str],\n in_files: Dict[str, BIDSPath],\n) -> Dict[str, BIDSPath]:\n raw_path = in_files.pop(f\"raw_run-{run}\")\n in_files.pop(\"sidecar\", None)\n try:\n fname = raw_path.find_empty_room(use_sidecar_only=True)\n except (FileNotFoundError, AssertionError, ValueError):\n fname = \"\"\n if fname is None:\n # sidecar is very fast and checking all can be slow (seconds), so only\n # log when actually looking through files\n ending = \"empty-room files\"\n if len(in_files): # MNE-BIDS < 0.12 missing get_empty_room_candidates\n ending = f\"{len(in_files)} empty-room file{_pl(in_files)}\"\n msg = f\"Nearest-date matching {ending}\"\n logger.info(**gen_log_kwargs(message=msg))\n try:\n fname = raw_path.find_empty_room()\n except (\n ValueError, # non-MEG data\n AssertionError, # MNE-BIDS check assert exists()\n FileNotFoundError,\n ): # MNE-BIDS PR-1080 exists()\n fname = None\n in_files.clear() # MNE-BIDS find_empty_room should have looked at all\n elif fname == \"\":\n fname = None # not downloaded, or EEG data\n elif not fname.fpath.exists():\n fname = None # path found by sidecar but does not exist\n out_files = dict()\n out_files[\"empty_room_match\"] = _empty_room_match_path(raw_path, cfg)\n _write_json(out_files[\"empty_room_match\"], dict(fname=fname))\n return out_files\n\n\ndef get_config(\n *,\n config,\n) -> SimpleNamespace:\n cfg = SimpleNamespace(\n proc=config.proc,\n task=get_task(config),\n datatype=get_datatype(config),\n acq=config.acq,\n rec=config.rec,\n space=config.space,\n bids_root=config.bids_root,\n deriv_root=config.deriv_root,\n )\n return cfg\n\n\ndef main(*, config) -> None:\n \"\"\"Run find_empty_room.\"\"\"\n if not config.process_empty_room:\n msg = \"Skipping, process_empty_room is set to False \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"skip\"))\n return\n if get_datatype(config) != \"meg\":\n msg = \"Skipping, empty-room data only relevant for MEG \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"skip\"))\n return\n # This will be I/O bound if the sidecar is not complete, so let's not run\n # in parallel.\n logs = list()\n for subject in get_subjects(config):\n if config.use_maxwell_filter:\n run = config.mf_reference_run\n else:\n run = get_runs(config=config, subject=subject)[0]\n logs.append(\n find_empty_room(\n cfg=get_config(\n config=config,\n ),\n exec_params=config.exec_params,\n subject=subject,\n session=get_sessions(config)[0],\n run=run,\n )\n )\n save_logs(config=config, logs=logs)\n", "path": "mne_bids_pipeline/steps/init/_02_find_empty_room.py"}], "after_files": [{"content": "\"\"\"Find empty-room data matches.\"\"\"\n\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional\n\nfrom mne.utils import _pl\nfrom mne_bids import BIDSPath\n\nfrom ..._config_utils import (\n get_datatype,\n get_task,\n get_sessions,\n get_subjects,\n get_runs,\n)\nfrom ..._io import _empty_room_match_path, _write_json\nfrom ..._logging import gen_log_kwargs, logger\nfrom ..._run import _update_for_splits, failsafe_run, save_logs\n\n\ndef get_input_fnames_find_empty_room(\n *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace\n) -> Dict[str, BIDSPath]:\n \"\"\"Get paths of files required by filter_data function.\"\"\"\n bids_path_in = BIDSPath(\n subject=subject,\n run=run,\n session=session,\n task=cfg.task,\n acquisition=cfg.acq,\n recording=cfg.rec,\n space=cfg.space,\n datatype=cfg.datatype,\n processing=cfg.proc,\n root=cfg.bids_root,\n check=False,\n )\n in_files: Dict[str, BIDSPath] = dict()\n in_files[f\"raw_run-{run}\"] = bids_path_in\n _update_for_splits(in_files, f\"raw_run-{run}\", single=True)\n if hasattr(bids_path_in, \"find_matching_sidecar\"):\n in_files[\"sidecar\"] = (\n bids_path_in.copy()\n .update(datatype=None, suffix=\"meg\")\n .find_matching_sidecar(extension=\".json\")\n )\n try:\n fname = bids_path_in.find_empty_room(use_sidecar_only=True)\n except Exception:\n fname = None\n if fname is None and hasattr(bids_path_in, \"get_empty_room_candidates\"):\n for ci, path in enumerate(bids_path_in.get_empty_room_candidates()):\n in_files[f\"empty_room_candidate_{ci}\"] = path\n return in_files\n\n\n@failsafe_run(\n get_input_fnames=get_input_fnames_find_empty_room,\n)\ndef find_empty_room(\n *,\n cfg: SimpleNamespace,\n exec_params: SimpleNamespace,\n subject: str,\n session: Optional[str],\n run: Optional[str],\n in_files: Dict[str, BIDSPath],\n) -> Dict[str, BIDSPath]:\n raw_path = in_files.pop(f\"raw_run-{run}\")\n in_files.pop(\"sidecar\", None)\n try:\n fname = raw_path.find_empty_room(use_sidecar_only=True)\n except (FileNotFoundError, AssertionError, ValueError):\n fname = \"\"\n if fname is None:\n # sidecar is very fast and checking all can be slow (seconds), so only\n # log when actually looking through files\n ending = \"empty-room files\"\n if len(in_files): # MNE-BIDS < 0.12 missing get_empty_room_candidates\n ending = f\"{len(in_files)} empty-room file{_pl(in_files)}\"\n msg = f\"Nearest-date matching {ending}\"\n logger.info(**gen_log_kwargs(message=msg))\n try:\n fname = raw_path.find_empty_room()\n except (\n ValueError, # non-MEG data\n AssertionError, # MNE-BIDS check assert exists()\n FileNotFoundError,\n ): # MNE-BIDS PR-1080 exists()\n fname = None\n in_files.clear() # MNE-BIDS find_empty_room should have looked at all\n elif fname == \"\":\n fname = None # not downloaded, or EEG data\n elif not fname.fpath.exists():\n fname = None # path found by sidecar but does not exist\n out_files = dict()\n out_files[\"empty_room_match\"] = _empty_room_match_path(raw_path, cfg)\n _write_json(out_files[\"empty_room_match\"], dict(fname=fname))\n return out_files\n\n\ndef get_config(\n *,\n config,\n) -> SimpleNamespace:\n cfg = SimpleNamespace(\n proc=config.proc,\n task=get_task(config),\n datatype=get_datatype(config),\n acq=config.acq,\n rec=config.rec,\n space=config.space,\n bids_root=config.bids_root,\n deriv_root=config.deriv_root,\n )\n return cfg\n\n\ndef main(*, config) -> None:\n \"\"\"Run find_empty_room.\"\"\"\n if not config.process_empty_room:\n msg = \"Skipping, process_empty_room is set to False \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"skip\"))\n return\n if get_datatype(config) != \"meg\":\n msg = \"Skipping, empty-room data only relevant for MEG \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"skip\"))\n return\n # This will be I/O bound if the sidecar is not complete, so let's not run\n # in parallel.\n logs = list()\n for subject in get_subjects(config):\n if config.use_maxwell_filter:\n run = config.mf_reference_run\n else:\n run = get_runs(config=config, subject=subject)[0]\n logs.append(\n find_empty_room(\n cfg=get_config(\n config=config,\n ),\n exec_params=config.exec_params,\n subject=subject,\n session=get_sessions(config)[0],\n run=run,\n )\n )\n save_logs(config=config, logs=logs)\n", "path": "mne_bids_pipeline/steps/init/_02_find_empty_room.py"}]} | 1,924 | 148 |
gh_patches_debug_14606 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5301 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
La comparaison des versions a perdu sa sidebar
## Étapes pour reproduire
- prenez un tuto avec plusieurs éditions
- cliquer sur "comparer les versions"
- sélectionner deux versions
**Comportement observé**
la sidebar n'apparaît pas dans la page de comparaison des versions
**Comportement désiré**
La sidebar est là.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/htmldiff.py`
Content:
```
1 from difflib import HtmlDiff
2 from django import template
3 from django.utils.html import format_html
4 from django.utils.safestring import mark_safe
5 from django.utils.translation import ugettext_lazy as _
6
7
8 register = template.Library()
9
10
11 @register.simple_tag
12 def htmldiff(string1, string2):
13
14 try:
15 txt1 = string1.decode('utf-8').splitlines()
16 # string1 is an empty SafeText from template
17 except AttributeError:
18 txt1 = string1.splitlines()
19
20 try:
21 txt2 = string2.decode('utf-8').splitlines()
22 except AttributeError:
23 txt2 = string2.splitlines()
24
25 diff = HtmlDiff(tabsize=4, wrapcolumn=80)
26 result = diff.make_table(txt1, txt2, context=True, numlines=2)
27
28 if 'No Differences Found' in result:
29 return format_html('<p>{}</p>', _('Pas de changements.'))
30 else:
31 return format_html('<div class="diff_delta">{}</div>', mark_safe(result))
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/htmldiff.py b/zds/utils/templatetags/htmldiff.py
--- a/zds/utils/templatetags/htmldiff.py
+++ b/zds/utils/templatetags/htmldiff.py
@@ -22,10 +22,12 @@
except AttributeError:
txt2 = string2.splitlines()
- diff = HtmlDiff(tabsize=4, wrapcolumn=80)
+ diff = HtmlDiff(tabsize=4)
result = diff.make_table(txt1, txt2, context=True, numlines=2)
if 'No Differences Found' in result:
return format_html('<p>{}</p>', _('Pas de changements.'))
else:
- return format_html('<div class="diff_delta">{}</div>', mark_safe(result))
+ # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:
+ r = mark_safe(result.replace('<td nowrap="nowrap">', '<td>').replace(' ', ' '))
+ return format_html('<div class="diff_delta">{}</div>', r)
| {"golden_diff": "diff --git a/zds/utils/templatetags/htmldiff.py b/zds/utils/templatetags/htmldiff.py\n--- a/zds/utils/templatetags/htmldiff.py\n+++ b/zds/utils/templatetags/htmldiff.py\n@@ -22,10 +22,12 @@\n except AttributeError:\n txt2 = string2.splitlines()\n \n- diff = HtmlDiff(tabsize=4, wrapcolumn=80)\n+ diff = HtmlDiff(tabsize=4)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n \n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n- return format_html('<div class=\"diff_delta\">{}</div>', mark_safe(result))\n+ # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:\n+ r = mark_safe(result.replace('<td nowrap=\"nowrap\">', '<td>').replace(' ', ' '))\n+ return format_html('<div class=\"diff_delta\">{}</div>', r)\n", "issue": "La comparaison des versions a perdu sa sidebar\n## \u00c9tapes pour reproduire \r\n\r\n- prenez un tuto avec plusieurs \u00e9ditions\r\n- cliquer sur \"comparer les versions\"\r\n- s\u00e9lectionner deux versions\r\n\r\n**Comportement observ\u00e9**\r\n\r\nla sidebar n'appara\u00eet pas dans la page de comparaison des versions\r\n\r\n**Comportement d\u00e9sir\u00e9**\r\n\r\nLa sidebar est l\u00e0.\n", "before_files": [{"content": "from difflib import HtmlDiff\nfrom django import template\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef htmldiff(string1, string2):\n\n try:\n txt1 = string1.decode('utf-8').splitlines()\n # string1 is an empty SafeText from template\n except AttributeError:\n txt1 = string1.splitlines()\n\n try:\n txt2 = string2.decode('utf-8').splitlines()\n except AttributeError:\n txt2 = string2.splitlines()\n\n diff = HtmlDiff(tabsize=4, wrapcolumn=80)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n\n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n return format_html('<div class=\"diff_delta\">{}</div>', mark_safe(result))\n", "path": "zds/utils/templatetags/htmldiff.py"}], "after_files": [{"content": "from difflib import HtmlDiff\nfrom django import template\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef htmldiff(string1, string2):\n\n try:\n txt1 = string1.decode('utf-8').splitlines()\n # string1 is an empty SafeText from template\n except AttributeError:\n txt1 = string1.splitlines()\n\n try:\n txt2 = string2.decode('utf-8').splitlines()\n except AttributeError:\n txt2 = string2.splitlines()\n\n diff = HtmlDiff(tabsize=4)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n\n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:\n r = mark_safe(result.replace('<td nowrap=\"nowrap\">', '<td>').replace(' ', ' '))\n return format_html('<div class=\"diff_delta\">{}</div>', r)\n", "path": "zds/utils/templatetags/htmldiff.py"}]} | 634 | 252 |
gh_patches_debug_3292 | rasdani/github-patches | git_diff | getmoto__moto-2305 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API Gateway generates invalid IDs
Generated IDs in API Gateway resources look like `A-Z601A-Z47201`. They contain `A-Z` instead of letters.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/apigateway/utils.py`
Content:
```
1 from __future__ import unicode_literals
2 import six
3 import random
4
5
6 def create_id():
7 size = 10
8 chars = list(range(10)) + ['A-Z']
9 return ''.join(six.text_type(random.choice(chars)) for x in range(size))
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py
--- a/moto/apigateway/utils.py
+++ b/moto/apigateway/utils.py
@@ -1,9 +1,10 @@
from __future__ import unicode_literals
import six
import random
+import string
def create_id():
size = 10
- chars = list(range(10)) + ['A-Z']
+ chars = list(range(10)) + list(string.ascii_lowercase)
return ''.join(six.text_type(random.choice(chars)) for x in range(size))
| {"golden_diff": "diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py\n--- a/moto/apigateway/utils.py\n+++ b/moto/apigateway/utils.py\n@@ -1,9 +1,10 @@\n from __future__ import unicode_literals\n import six\n import random\n+import string\n \n \n def create_id():\n size = 10\n- chars = list(range(10)) + ['A-Z']\n+ chars = list(range(10)) + list(string.ascii_lowercase)\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "issue": "API Gateway generates invalid IDs\nGenerated IDs in API Gateway resources look like `A-Z601A-Z47201`. They contain `A-Z` instead of letters.\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport six\nimport random\n\n\ndef create_id():\n size = 10\n chars = list(range(10)) + ['A-Z']\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "path": "moto/apigateway/utils.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nimport six\nimport random\nimport string\n\n\ndef create_id():\n size = 10\n chars = list(range(10)) + list(string.ascii_lowercase)\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "path": "moto/apigateway/utils.py"}]} | 369 | 130 |
gh_patches_debug_28024 | rasdani/github-patches | git_diff | google__flax-1311 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
threading.Condition.notifyAll has been deprecated in favour of notify_all in Python 3.10
### Problem you have encountered:
`threading.Condition.notifyAll` has been deprecated in favour of `notify_all` in Python 3.10. Ref : python/cpython#25174
### What you expected to happen:
use `notify_all` in below places.
```
rg -t py -w 'currentThread|notifyAll|activeCount|isDaemon|setDaemon'
flax/training/prefetch_iterator.py
58: self._cond.notifyAll()
68: self._cond.notifyAll()
80: self._cond.notifyAll()
88: self._cond.notifyAll()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/training/prefetch_iterator.py`
Content:
```
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Utility for constructing an iterator which prefetches data asynchronously.
16 """
17
18 import threading
19 import warnings
20
21
22 class PrefetchIterator:
23 """Wraps an iterator to provide async prefetching.
24
25 DEPRECATION WARNING:
26 TensorFlow datasets no longer require manual prefetching.
27
28 Previously this class was used to make data loading using TensorFlow datasets
29 more efficient. Now TF data handles prefetching with NumPy iterators
30 correctly.
31
32 Example::
33
34 tf_iter = dataset.as_numpy_iterator() # only loads data while calling next
35 tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background
36
37 """
38
39 def __init__(self, data_iter, buffer_size=1):
40 """Construct a PrefetchIterator.
41
42 Args:
43 data_iter: the Iterator that should be prefetched.
44 buffer_size: how many items to prefetch (default: 1).
45 """
46 warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`'
47 ' prefetch method instead', DeprecationWarning)
48
49 self._data_iter = data_iter
50 self.buffer_size = buffer_size
51 self._cond = threading.Condition()
52 self._buffer = []
53 self._active = True
54 self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)
55 self._thread.start()
56 self._error = None
57
58 def __iter__(self):
59 return self
60
61 def __next__(self):
62 with self._cond:
63 self._cond.wait_for(lambda: self._buffer or not self._active)
64 if self._buffer:
65 item = self._buffer.pop(0)
66 self._cond.notifyAll()
67 return item
68 if self._error:
69 raise self._error # pylint: disable=raising-bad-type
70 assert not self._active
71 raise StopIteration()
72
73 def close(self):
74 with self._cond:
75 self._active = False
76 self._cond.notifyAll()
77
78 def _prefetch_loop(self):
79 """Prefetch loop that prefetches a tf dataset."""
80 def _predicate():
81 return len(self._buffer) < self.buffer_size or not self._active
82
83 while True:
84 try:
85 item = next(self._data_iter)
86 with self._cond:
87 self._buffer.append(item)
88 self._cond.notifyAll()
89 self._cond.wait_for(_predicate)
90 if not self._active:
91 return
92 except Exception as e: # pylint: disable=broad-except
93 with self._cond:
94 self._error = e
95 self._active = False
96 self._cond.notifyAll()
97 return
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/training/prefetch_iterator.py b/flax/training/prefetch_iterator.py
--- a/flax/training/prefetch_iterator.py
+++ b/flax/training/prefetch_iterator.py
@@ -55,7 +55,7 @@
self._cond.wait_for(lambda: self._buffer or not self._active)
if self._buffer:
item = self._buffer.pop(0)
- self._cond.notifyAll()
+ self._cond.notify_all()
return item
if self._error:
raise self._error # pylint: disable=raising-bad-type
@@ -65,7 +65,7 @@
def close(self):
with self._cond:
self._active = False
- self._cond.notifyAll()
+ self._cond.notify_all()
def _prefetch_loop(self):
"""Prefetch loop that prefetches a tf dataset."""
@@ -77,7 +77,7 @@
item = next(self._data_iter)
with self._cond:
self._buffer.append(item)
- self._cond.notifyAll()
+ self._cond.notify_all()
self._cond.wait_for(_predicate)
if not self._active:
return
@@ -85,5 +85,5 @@
with self._cond:
self._error = e
self._active = False
- self._cond.notifyAll()
+ self._cond.notify_all()
return
| {"golden_diff": "diff --git a/flax/training/prefetch_iterator.py b/flax/training/prefetch_iterator.py\n--- a/flax/training/prefetch_iterator.py\n+++ b/flax/training/prefetch_iterator.py\n@@ -55,7 +55,7 @@\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n@@ -65,7 +65,7 @@\n def close(self):\n with self._cond:\n self._active = False\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n \n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n@@ -77,7 +77,7 @@\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n@@ -85,5 +85,5 @@\n with self._cond:\n self._error = e\n self._active = False\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n return\n", "issue": " threading.Condition.notifyAll has been deprecated in favour of notify_all in Python 3.10\n### Problem you have encountered:\r\n\r\n `threading.Condition.notifyAll` has been deprecated in favour of `notify_all` in Python 3.10. Ref : python/cpython#25174\r\n\r\n### What you expected to happen:\r\n\r\nuse `notify_all` in below places.\r\n\r\n```\r\nrg -t py -w 'currentThread|notifyAll|activeCount|isDaemon|setDaemon' \r\nflax/training/prefetch_iterator.py\r\n58: self._cond.notifyAll()\r\n68: self._cond.notifyAll()\r\n80: self._cond.notifyAll()\r\n88: self._cond.notifyAll()\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility for constructing an iterator which prefetches data asynchronously.\n\"\"\"\n\nimport threading\nimport warnings\n\n\nclass PrefetchIterator:\n \"\"\"Wraps an iterator to provide async prefetching.\n\n DEPRECATION WARNING:\n TensorFlow datasets no longer require manual prefetching.\n\n Previously this class was used to make data loading using TensorFlow datasets\n more efficient. Now TF data handles prefetching with NumPy iterators\n correctly.\n\n Example::\n\n tf_iter = dataset.as_numpy_iterator() # only loads data while calling next\n tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background\n\n \"\"\"\n\n def __init__(self, data_iter, buffer_size=1):\n \"\"\"Construct a PrefetchIterator.\n\n Args:\n data_iter: the Iterator that should be prefetched.\n buffer_size: how many items to prefetch (default: 1).\n \"\"\"\n warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`'\n ' prefetch method instead', DeprecationWarning)\n\n self._data_iter = data_iter\n self.buffer_size = buffer_size\n self._cond = threading.Condition()\n self._buffer = []\n self._active = True\n self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)\n self._thread.start()\n self._error = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self._cond:\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n self._cond.notifyAll()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n assert not self._active\n raise StopIteration()\n\n def close(self):\n with self._cond:\n self._active = False\n self._cond.notifyAll()\n\n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n def _predicate():\n return len(self._buffer) < self.buffer_size or not self._active\n\n while True:\n try:\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n self._cond.notifyAll()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n except Exception as e: # pylint: disable=broad-except\n with self._cond:\n self._error = e\n self._active = False\n self._cond.notifyAll()\n return\n", "path": "flax/training/prefetch_iterator.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utility for constructing an iterator which prefetches data asynchronously.\n\"\"\"\n\nimport threading\n\n\nclass PrefetchIterator:\n \"\"\"Wraps an iterator to provide async prefetching.\n\n This class is particularly useful for making data loading using TensorFlow\n more efficient. Currently, the numpy iterator in TensorFlow will not\n automatically prefetch data in the background::\n\n tf_iter = dataset.as_numpy_iterator() # only loads data while calling next\n tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background\n\n \"\"\"\n\n def __init__(self, data_iter, buffer_size=1):\n \"\"\"Construct a PrefetchIterator.\n\n Args:\n data_iter: the Iterator that should be prefetched.\n buffer_size: how many items to prefetch (default: 1).\n \"\"\"\n self._data_iter = data_iter\n self.buffer_size = buffer_size\n self._cond = threading.Condition()\n self._buffer = []\n self._active = True\n self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)\n self._thread.start()\n self._error = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self._cond:\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n self._cond.notify_all()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n assert not self._active\n raise StopIteration()\n\n def close(self):\n with self._cond:\n self._active = False\n self._cond.notify_all()\n\n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n def _predicate():\n return len(self._buffer) < self.buffer_size or not self._active\n\n while True:\n try:\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n self._cond.notify_all()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n except Exception as e: # pylint: disable=broad-except\n with self._cond:\n self._error = e\n self._active = False\n self._cond.notify_all()\n return\n", "path": "flax/training/prefetch_iterator.py"}]} | 1,303 | 322 |
gh_patches_debug_823 | rasdani/github-patches | git_diff | angr__angr-1303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cachetools broke their API
There's a new major version of cachetools (providing LRUCache), 3.0.0. This has caused everything to break. I have pinned our version to `cachetools<3` for the time being, but we should migrate.
My guess is that this is because we were using the `missing` argument to LRUCache (in claripy, specifically), and I am fairly sure the intended replacement is to [implement the `__missing__` method](https://cachetools.readthedocs.io/en/latest/#extending-cache-classes). Unsure if there are more implications, which is why this issue is open under angr instead of claripy.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # pylint: disable=no-name-in-module,import-error,unused-variable
2 import os
3 import sys
4 import subprocess
5 import pkg_resources
6 import shutil
7 import platform
8
9 if bytes is str:
10 raise Exception("""
11
12 =-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=
13
14 angr has transitioned to python 3. Due to the small size of the team behind it,
15 we can't reasonably maintain compatibility between both python 2 and python 3.
16 If you want to continue using the most recent version of angr (you definitely
17 want that, trust us) you should upgrade to python 3. It's like getting your
18 vaccinations. It hurts a little bit initially but in the end it's worth it.
19
20 If you are staying on python 2 and would like to make sure you don't get
21 incompatible versions, make sure your pip is at least version 9.0, and it will
22 use our metadata to implicitly avoid them.
23
24 For more information, see here: https://docs.angr.io/MIGRATION.html
25
26 Good luck!
27 """)
28
29 try:
30 from setuptools import setup
31 from setuptools import find_packages
32 packages = find_packages()
33 except ImportError:
34 from distutils.core import setup
35 packages = [x.strip('./').replace('/','.') for x in os.popen('find -name "__init__.py" | xargs -n1 dirname').read().strip().split('\n')]
36
37 from distutils.util import get_platform
38 from distutils.errors import LibError
39 from distutils.command.build import build as _build
40
41 if sys.platform == 'darwin':
42 library_file = "angr_native.dylib"
43 elif sys.platform in ('win32', 'cygwin'):
44 library_file = "angr_native.dll"
45 else:
46 library_file = "angr_native.so"
47
48 def _build_native():
49 try:
50 import unicorn
51 import pyvex
52 except ImportError:
53 raise LibError("You must install unicorn and pyvex before building angr")
54
55 env = os.environ.copy()
56 env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),
57 ('UNICORN_LIB_PATH', 'unicorn', 'lib'),
58 ('UNICORN_LIB_FILE', 'unicorn', 'lib\\unicorn.lib'),
59 ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),
60 ('PYVEX_LIB_PATH', 'pyvex', 'lib'),
61 ('PYVEX_LIB_FILE', 'pyvex', 'lib\\pyvex.lib'))
62 for var, pkg, fnm in env_data:
63 try:
64 env[var] = pkg_resources.resource_filename(pkg, fnm)
65 except KeyError:
66 pass
67
68 cmd1 = ['nmake', '/f', 'Makefile-win']
69 cmd2 = ['make']
70 for cmd in (cmd1, cmd2):
71 try:
72 if subprocess.call(cmd, cwd='native', env=env) != 0:
73 raise LibError('Unable to build angr_native')
74 break
75 except OSError:
76 continue
77 else:
78 raise LibError('Unable to build angr_native')
79
80 shutil.rmtree('angr/lib', ignore_errors=True)
81 os.mkdir('angr/lib')
82 shutil.copy(os.path.join('native', library_file), 'angr/lib')
83
84 class build(_build):
85 def run(self, *args):
86 self.execute(_build_native, (), msg='Building angr_native')
87 _build.run(self, *args)
88
89 cmdclass = {
90 'build': build,
91 }
92
93 try:
94 from setuptools.command.develop import develop as _develop
95 class develop(_develop):
96 def run(self, *args):
97 self.execute(_build_native, (), msg='Building angr_native')
98 _develop.run(self, *args)
99
100 cmdclass['develop'] = develop
101 except ImportError:
102 pass
103
104 if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:
105 sys.argv.append('--plat-name')
106 name = get_platform()
107 if 'linux' in name:
108 # linux_* platform tags are disallowed because the python ecosystem is fubar
109 # linux builds should be built in the centos 5 vm for maximum compatibility
110 sys.argv.append('manylinux1_' + platform.machine())
111 else:
112 # https://www.python.org/dev/peps/pep-0425/
113 sys.argv.append(name.replace('.', '_').replace('-', '_'))
114
115 setup(
116 name='angr',
117 version='8.18.10.25',
118 python_requires='>=3.5',
119 description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',
120 url='https://github.com/angr/angr',
121 packages=packages,
122 install_requires=[
123 'ana',
124 'sortedcontainers',
125 'cachetools<3',
126 'capstone>=3.0.5rc2',
127 'cooldict',
128 'dpkt',
129 'futures; python_version == "2.7"',
130 'mulpyplexer',
131 'networkx>=2.0',
132 'progressbar',
133 'rpyc',
134 'cffi>=1.7.0',
135 'unicorn',
136 'archinfo==8.18.10.25',
137 'claripy==8.18.10.25',
138 'cle==8.18.10.25',
139 'pyvex==8.18.10.25',
140 'ailment==8.18.10.25',
141 'GitPython',
142 'pycparser>=2.18',
143 'itanium_demangler',
144 ],
145 setup_requires=['unicorn', 'pyvex'],
146 cmdclass=cmdclass,
147 include_package_data=True,
148 package_data={
149 'angr': ['lib/*']
150 }
151 )
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -122,7 +122,7 @@
install_requires=[
'ana',
'sortedcontainers',
- 'cachetools<3',
+ 'cachetools',
'capstone>=3.0.5rc2',
'cooldict',
'dpkt',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -122,7 +122,7 @@\n install_requires=[\n 'ana',\n 'sortedcontainers',\n- 'cachetools<3',\n+ 'cachetools',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n", "issue": "Cachetools broke their API\nThere's a new major version of cachetools (providing LRUCache), 3.0.0. This has caused everything to break. I have pinned our version to `cachetools<3` for the time being, but we should migrate.\r\n\r\nMy guess is that this is because we were using the `missing` argument to LRUCache (in claripy, specifically), and I am fairly sure the intended replacement is to [implement the `__missing__` method](https://cachetools.readthedocs.io/en/latest/#extending-cache-classes). Unsure if there are more implications, which is why this issue is open under angr instead of claripy.\n", "before_files": [{"content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/MIGRATION.html\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\ncmdclass = {\n 'build': build,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\nsetup(\n name='angr',\n version='8.18.10.25',\n python_requires='>=3.5',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'ana',\n 'sortedcontainers',\n 'cachetools<3',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n 'futures; python_version == \"2.7\"',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar',\n 'rpyc',\n 'cffi>=1.7.0',\n 'unicorn',\n 'archinfo==8.18.10.25',\n 'claripy==8.18.10.25',\n 'cle==8.18.10.25',\n 'pyvex==8.18.10.25',\n 'ailment==8.18.10.25',\n 'GitPython',\n 'pycparser>=2.18',\n 'itanium_demangler',\n ],\n setup_requires=['unicorn', 'pyvex'],\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*']\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/MIGRATION.html\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\ncmdclass = {\n 'build': build,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\nsetup(\n name='angr',\n version='8.18.10.25',\n python_requires='>=3.5',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'ana',\n 'sortedcontainers',\n 'cachetools',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n 'futures; python_version == \"2.7\"',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar',\n 'rpyc',\n 'cffi>=1.7.0',\n 'unicorn',\n 'archinfo==8.18.10.25',\n 'claripy==8.18.10.25',\n 'cle==8.18.10.25',\n 'pyvex==8.18.10.25',\n 'ailment==8.18.10.25',\n 'GitPython',\n 'pycparser>=2.18',\n 'itanium_demangler',\n ],\n setup_requires=['unicorn', 'pyvex'],\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*']\n }\n)\n", "path": "setup.py"}]} | 2,028 | 89 |
gh_patches_debug_11730 | rasdani/github-patches | git_diff | ckan__ckan-7077 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Search indexing logic called twice after update or create dataset
**CKAN version**
2.10
**Describe the bug**
When updating or creating a dataset, we are indexing the dataset twice in a row, ie the [`index_package()`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/lib/search/index.py#L108) function gets called twice during the same operation (and of course any `IPackageController.before_index()` hook gets called twice as well.
The root cause is the the obscure code run in the [`DomainObjectModificationExtension`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/model/modification.py#L27), which considers the same Package object both new and changed at the same time, and fires two separate notification events.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/model/modification.py`
Content:
```
1 # encoding: utf-8
2
3 import logging
4 from typing import Any
5
6 from ckan.lib.search import SearchIndexError
7
8 import ckan.plugins as plugins
9 import ckan.model as model
10
11 log = logging.getLogger(__name__)
12
13 __all__ = ['DomainObjectModificationExtension']
14
15
16 class DomainObjectModificationExtension(plugins.SingletonPlugin):
17 """
18 Notify observers about domain object modifications before commit.
19
20 Observers are other plugins implementing the IDomainObjectModification
21 interface.
22 """
23
24 def before_commit(self, session: Any):
25 self.notify_observers(session, self.notify)
26
27 def notify_observers(self, session: Any, method: Any):
28 session.flush()
29 if not hasattr(session, '_object_cache'):
30 return
31
32 obj_cache = session._object_cache
33 new = obj_cache['new']
34 changed = obj_cache['changed']
35 deleted = obj_cache['deleted']
36
37 for obj in set(new):
38 if isinstance(obj, (model.Package, model.Resource)):
39 method(obj, model.DomainObjectOperation.new)
40 for obj in set(deleted):
41 if isinstance(obj, (model.Package, model.Resource)):
42 method(obj, model.DomainObjectOperation.deleted)
43 for obj in set(changed):
44 if isinstance(obj, model.Resource):
45 method(obj, model.DomainObjectOperation.changed)
46 if getattr(obj, 'url_changed', False):
47 for item in plugins.PluginImplementations(plugins.IResourceUrlChange):
48 item.notify(obj)
49
50 changed_pkgs = set(obj for obj in changed
51 if isinstance(obj, model.Package))
52
53 for obj in new | changed | deleted:
54 if not isinstance(obj, model.Package):
55 try:
56 changed_pkgs.update(obj.related_packages())
57 except AttributeError:
58 continue
59
60 for obj in changed_pkgs:
61 method(obj, model.DomainObjectOperation.changed)
62
63 def notify(self, entity: Any, operation: Any):
64 for observer in plugins.PluginImplementations(
65 plugins.IDomainObjectModification):
66 try:
67 observer.notify(entity, operation)
68 except SearchIndexError as search_error:
69 log.exception(search_error)
70 # Reraise, since it's pretty crucial to ckan if it can't index
71 # a dataset
72 raise
73 except Exception as ex:
74 log.exception(ex)
75 # Don't reraise other exceptions since they are generally of
76 # secondary importance so shouldn't disrupt the commit.
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/model/modification.py b/ckan/model/modification.py
--- a/ckan/model/modification.py
+++ b/ckan/model/modification.py
@@ -47,8 +47,12 @@
for item in plugins.PluginImplementations(plugins.IResourceUrlChange):
item.notify(obj)
- changed_pkgs = set(obj for obj in changed
- if isinstance(obj, model.Package))
+
+ changed_pkgs = set()
+ new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]
+ for obj in changed:
+ if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:
+ changed_pkgs.add(obj)
for obj in new | changed | deleted:
if not isinstance(obj, model.Package):
| {"golden_diff": "diff --git a/ckan/model/modification.py b/ckan/model/modification.py\n--- a/ckan/model/modification.py\n+++ b/ckan/model/modification.py\n@@ -47,8 +47,12 @@\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n \n- changed_pkgs = set(obj for obj in changed\n- if isinstance(obj, model.Package))\n+\n+ changed_pkgs = set()\n+ new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]\n+ for obj in changed:\n+ if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:\n+ changed_pkgs.add(obj)\n \n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n", "issue": "Search indexing logic called twice after update or create dataset\n**CKAN version**\r\n2.10\r\n\r\n\r\n**Describe the bug**\r\n\r\nWhen updating or creating a dataset, we are indexing the dataset twice in a row, ie the [`index_package()`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/lib/search/index.py#L108) function gets called twice during the same operation (and of course any `IPackageController.before_index()` hook gets called twice as well.\r\n\r\nThe root cause is the the obscure code run in the [`DomainObjectModificationExtension`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/model/modification.py#L27), which considers the same Package object both new and changed at the same time, and fires two separate notification events.\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom typing import Any\n\nfrom ckan.lib.search import SearchIndexError\n\nimport ckan.plugins as plugins\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['DomainObjectModificationExtension']\n\n\nclass DomainObjectModificationExtension(plugins.SingletonPlugin):\n \"\"\"\n Notify observers about domain object modifications before commit.\n\n Observers are other plugins implementing the IDomainObjectModification\n interface.\n \"\"\"\n\n def before_commit(self, session: Any):\n self.notify_observers(session, self.notify)\n\n def notify_observers(self, session: Any, method: Any):\n session.flush()\n if not hasattr(session, '_object_cache'):\n return\n\n obj_cache = session._object_cache\n new = obj_cache['new']\n changed = obj_cache['changed']\n deleted = obj_cache['deleted']\n\n for obj in set(new):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.new)\n for obj in set(deleted):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.deleted)\n for obj in set(changed):\n if isinstance(obj, model.Resource):\n method(obj, model.DomainObjectOperation.changed)\n if getattr(obj, 'url_changed', False):\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n\n changed_pkgs = set(obj for obj in changed\n if isinstance(obj, model.Package))\n\n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n try:\n changed_pkgs.update(obj.related_packages())\n except AttributeError:\n continue\n\n for obj in changed_pkgs:\n method(obj, model.DomainObjectOperation.changed)\n\n def notify(self, entity: Any, operation: Any):\n for observer in plugins.PluginImplementations(\n plugins.IDomainObjectModification):\n try:\n observer.notify(entity, operation)\n except SearchIndexError as search_error:\n log.exception(search_error)\n # Reraise, since it's pretty crucial to ckan if it can't index\n # a dataset\n raise\n except Exception as ex:\n log.exception(ex)\n # Don't reraise other exceptions since they are generally of\n # secondary importance so shouldn't disrupt the commit.\n", "path": "ckan/model/modification.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom typing import Any\n\nfrom ckan.lib.search import SearchIndexError\n\nimport ckan.plugins as plugins\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['DomainObjectModificationExtension']\n\n\nclass DomainObjectModificationExtension(plugins.SingletonPlugin):\n \"\"\"\n Notify observers about domain object modifications before commit.\n\n Observers are other plugins implementing the IDomainObjectModification\n interface.\n \"\"\"\n\n def before_commit(self, session: Any):\n self.notify_observers(session, self.notify)\n\n def notify_observers(self, session: Any, method: Any):\n session.flush()\n if not hasattr(session, '_object_cache'):\n return\n\n obj_cache = session._object_cache\n new = obj_cache['new']\n changed = obj_cache['changed']\n deleted = obj_cache['deleted']\n\n for obj in set(new):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.new)\n for obj in set(deleted):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.deleted)\n for obj in set(changed):\n if isinstance(obj, model.Resource):\n method(obj, model.DomainObjectOperation.changed)\n if getattr(obj, 'url_changed', False):\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n\n\n changed_pkgs = set()\n new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]\n for obj in changed:\n if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:\n changed_pkgs.add(obj)\n\n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n try:\n changed_pkgs.update(obj.related_packages())\n except AttributeError:\n continue\n\n for obj in changed_pkgs:\n method(obj, model.DomainObjectOperation.changed)\n\n def notify(self, entity: Any, operation: Any):\n for observer in plugins.PluginImplementations(\n plugins.IDomainObjectModification):\n try:\n observer.notify(entity, operation)\n except SearchIndexError as search_error:\n log.exception(search_error)\n # Reraise, since it's pretty crucial to ckan if it can't index\n # a dataset\n raise\n except Exception as ex:\n log.exception(ex)\n # Don't reraise other exceptions since they are generally of\n # secondary importance so shouldn't disrupt the commit.\n", "path": "ckan/model/modification.py"}]} | 1,151 | 178 |
gh_patches_debug_36676 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2767 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py`
Content:
```
1 from typing import Optional
2
3 import torch
4 import torch.distributed as dist
5 import torch.nn as nn
6 import torch.optim as optim
7
8 import colossalai
9 from colossalai.nn.optimizer import CPUAdam, HybridAdam
10 from colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper
11 from colossalai.tensor import ProcessGroup, ShardSpec
12 from colossalai.utils import get_current_device
13 from colossalai.utils.model.colo_init_context import ColoInitContext
14
15 from .ddp import DDPStrategy
16
17
18 class ColossalAIStrategy(DDPStrategy):
19 """
20 The strategy for training with ColossalAI.
21
22 Args:
23 stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)
24 seed(int): The seed for the random number generator.
25 shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
26 placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
27 If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
28 If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
29 pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.
30 force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.
31 search_range_mb(int): The search range in MB for the chunk size. Only for ZeRO-3.
32 hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.
33 min_chunk_size_mb(float): The minimum chunk size in MB. Only for ZeRO-3.
34 gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.
35 reduce_bugket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.
36 overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.
37 initial_scale(float): The initial scale for the optimizer.
38 growth_factor(float): The growth factor for the optimizer.
39 backoff_factor(float): The backoff factor for the optimizer.
40 growth_interval(int): The growth interval for the optimizer.
41 hysteresis(int): The hysteresis for the optimizer.
42 min_scale(float): The minimum scale for the optimizer.
43 max_scale(float): The maximum scale for the optimizer.
44 max_norm(float): The maximum norm for the optimizer.
45 norm_type(float): The norm type for the optimizer.
46
47 """
48
49 def __init__(
50 self,
51 stage: int = 3,
52 seed: int = 42,
53 shard_init: bool = True, # only for stage 3
54 placement_policy: str = 'cuda',
55 pin_memory: bool = True, # only for stage 3
56 force_outputs_fp32: bool = False, # only for stage 3
57 search_range_mb: int = 32, # only for stage 3
58 hidden_dim: Optional[int] = None, # only for stage 3
59 min_chunk_size_mb: float = 32, # only for stage 3
60 gpu_margin_mem_ratio: float = 0.0, # only for stage 3
61 reduce_bucket_size: int = 12 * 1024**2, # only for stage 1&2
62 overlap_communication: bool = True, # only for stage 1&2
63 initial_scale: float = 2**16,
64 growth_factor: float = 2,
65 backoff_factor: float = 0.5,
66 growth_interval: int = 1000,
67 hysteresis: int = 2,
68 min_scale: float = 1,
69 max_scale: float = 2**32,
70 max_norm: float = 0.0,
71 norm_type: float = 2.0) -> None:
72 super().__init__(seed)
73 assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy "{placement_policy}"'
74 self.stage = stage
75 self.shard_init = shard_init
76 self.gemini_config = dict(device=get_current_device(),
77 placement_policy=placement_policy,
78 pin_memory=pin_memory,
79 force_outputs_fp32=force_outputs_fp32,
80 strict_ddp_mode=shard_init,
81 search_range_mb=search_range_mb,
82 hidden_dim=hidden_dim,
83 min_chunk_size_mb=min_chunk_size_mb)
84 if stage == 3:
85 self.zero_optim_config = dict(gpu_margin_mem_ratio=gpu_margin_mem_ratio)
86 else:
87 self.zero_optim_config = dict(reduce_bucket_size=reduce_bucket_size,
88 overlap_communication=overlap_communication,
89 cpu_offload=(placement_policy == 'cpu'))
90 self.optim_kwargs = dict(initial_scale=initial_scale,
91 growth_factor=growth_factor,
92 backoff_factor=backoff_factor,
93 growth_interval=growth_interval,
94 hysteresis=hysteresis,
95 min_scale=min_scale,
96 max_scale=max_scale,
97 max_norm=max_norm,
98 norm_type=norm_type)
99
100 def setup_distributed(self) -> None:
101 colossalai.launch_from_torch({}, seed=self.seed)
102
103 def model_init_context(self):
104 if self.stage == 3:
105 world_size = dist.get_world_size()
106 shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None
107 default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None
108 return ColoInitContext(device=get_current_device(),
109 dtype=torch.half,
110 default_pg=shard_pg,
111 default_dist_spec=default_dist_spec)
112 return super().model_init_context()
113
114 def setup_model(self, model: nn.Module) -> nn.Module:
115 return zero_model_wrapper(model, zero_stage=self.stage, gemini_config=self.gemini_config)
116
117 def setup_optimizer(self, optimizer: optim.Optimizer, model: nn.Module) -> optim.Optimizer:
118 assert isinstance(optimizer, (CPUAdam, HybridAdam)), f'Unsupported optimizer {type(optimizer)}'
119 return zero_optim_wrapper(model, optimizer, optim_config=self.zero_optim_config, **self.optim_kwargs)
120
121 def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: optim.Optimizer, **kwargs) -> None:
122 optimizer.backward(loss)
123
124 def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None:
125 optimizer.step()
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py
--- a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py
+++ b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py
@@ -1,3 +1,4 @@
+import warnings
from typing import Optional
import torch
@@ -23,6 +24,7 @@
stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)
seed(int): The seed for the random number generator.
shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
+ This is not compativle with `from_pretrained()`. We temporarily disable this and will support it in the future.
placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
@@ -50,7 +52,7 @@
self,
stage: int = 3,
seed: int = 42,
- shard_init: bool = True, # only for stage 3
+ shard_init: bool = False, # only for stage 3
placement_policy: str = 'cuda',
pin_memory: bool = True, # only for stage 3
force_outputs_fp32: bool = False, # only for stage 3
@@ -72,6 +74,10 @@
super().__init__(seed)
assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy "{placement_policy}"'
self.stage = stage
+ # TODO(ver217): support shard_init when using from_pretrained()
+ if shard_init:
+ warnings.warn(f'Shard init is not supported yet. Ignore.')
+ shard_init = False
self.shard_init = shard_init
self.gemini_config = dict(device=get_current_device(),
placement_policy=placement_policy,
| {"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py\n--- a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py\n+++ b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py\n@@ -1,3 +1,4 @@\n+import warnings\n from typing import Optional\n \n import torch\n@@ -23,6 +24,7 @@\n stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)\n seed(int): The seed for the random number generator.\n shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.\n+ This is not compativle with `from_pretrained()`. We temporarily disable this and will support it in the future.\n placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')\n If it is \u201ccpu\u201d, parameters, gradients and optimizer states will be offloaded to CPU,\n If it is \u201ccuda\u201d, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.\n@@ -50,7 +52,7 @@\n self,\n stage: int = 3,\n seed: int = 42,\n- shard_init: bool = True, # only for stage 3\n+ shard_init: bool = False, # only for stage 3\n placement_policy: str = 'cuda',\n pin_memory: bool = True, # only for stage 3\n force_outputs_fp32: bool = False, # only for stage 3\n@@ -72,6 +74,10 @@\n super().__init__(seed)\n assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy \"{placement_policy}\"'\n self.stage = stage\n+ # TODO(ver217): support shard_init when using from_pretrained()\n+ if shard_init:\n+ warnings.warn(f'Shard init is not supported yet. Ignore.')\n+ shard_init = False\n self.shard_init = shard_init\n self.gemini_config = dict(device=get_current_device(),\n placement_policy=placement_policy,\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport colossalai\nfrom colossalai.nn.optimizer import CPUAdam, HybridAdam\nfrom colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper\nfrom colossalai.tensor import ProcessGroup, ShardSpec\nfrom colossalai.utils import get_current_device\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\n\nfrom .ddp import DDPStrategy\n\n\nclass ColossalAIStrategy(DDPStrategy):\n \"\"\"\n The strategy for training with ColossalAI.\n\n Args:\n stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)\n seed(int): The seed for the random number generator.\n shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.\n placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')\n If it is \u201ccpu\u201d, parameters, gradients and optimizer states will be offloaded to CPU,\n If it is \u201ccuda\u201d, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.\n pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.\n force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.\n search_range_mb(int): The search range in MB for the chunk size. Only for ZeRO-3.\n hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.\n min_chunk_size_mb(float): The minimum chunk size in MB. Only for ZeRO-3.\n gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.\n reduce_bugket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.\n overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.\n initial_scale(float): The initial scale for the optimizer.\n growth_factor(float): The growth factor for the optimizer.\n backoff_factor(float): The backoff factor for the optimizer.\n growth_interval(int): The growth interval for the optimizer.\n hysteresis(int): The hysteresis for the optimizer.\n min_scale(float): The minimum scale for the optimizer.\n max_scale(float): The maximum scale for the optimizer.\n max_norm(float): The maximum norm for the optimizer.\n norm_type(float): The norm type for the optimizer.\n\n \"\"\"\n\n def __init__(\n self,\n stage: int = 3,\n seed: int = 42,\n shard_init: bool = True, # only for stage 3\n placement_policy: str = 'cuda',\n pin_memory: bool = True, # only for stage 3\n force_outputs_fp32: bool = False, # only for stage 3\n search_range_mb: int = 32, # only for stage 3\n hidden_dim: Optional[int] = None, # only for stage 3\n min_chunk_size_mb: float = 32, # only for stage 3\n gpu_margin_mem_ratio: float = 0.0, # only for stage 3\n reduce_bucket_size: int = 12 * 1024**2, # only for stage 1&2\n overlap_communication: bool = True, # only for stage 1&2\n initial_scale: float = 2**16,\n growth_factor: float = 2,\n backoff_factor: float = 0.5,\n growth_interval: int = 1000,\n hysteresis: int = 2,\n min_scale: float = 1,\n max_scale: float = 2**32,\n max_norm: float = 0.0,\n norm_type: float = 2.0) -> None:\n super().__init__(seed)\n assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy \"{placement_policy}\"'\n self.stage = stage\n self.shard_init = shard_init\n self.gemini_config = dict(device=get_current_device(),\n placement_policy=placement_policy,\n pin_memory=pin_memory,\n force_outputs_fp32=force_outputs_fp32,\n strict_ddp_mode=shard_init,\n search_range_mb=search_range_mb,\n hidden_dim=hidden_dim,\n min_chunk_size_mb=min_chunk_size_mb)\n if stage == 3:\n self.zero_optim_config = dict(gpu_margin_mem_ratio=gpu_margin_mem_ratio)\n else:\n self.zero_optim_config = dict(reduce_bucket_size=reduce_bucket_size,\n overlap_communication=overlap_communication,\n cpu_offload=(placement_policy == 'cpu'))\n self.optim_kwargs = dict(initial_scale=initial_scale,\n growth_factor=growth_factor,\n backoff_factor=backoff_factor,\n growth_interval=growth_interval,\n hysteresis=hysteresis,\n min_scale=min_scale,\n max_scale=max_scale,\n max_norm=max_norm,\n norm_type=norm_type)\n\n def setup_distributed(self) -> None:\n colossalai.launch_from_torch({}, seed=self.seed)\n\n def model_init_context(self):\n if self.stage == 3:\n world_size = dist.get_world_size()\n shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None\n default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None\n return ColoInitContext(device=get_current_device(),\n dtype=torch.half,\n default_pg=shard_pg,\n default_dist_spec=default_dist_spec)\n return super().model_init_context()\n\n def setup_model(self, model: nn.Module) -> nn.Module:\n return zero_model_wrapper(model, zero_stage=self.stage, gemini_config=self.gemini_config)\n\n def setup_optimizer(self, optimizer: optim.Optimizer, model: nn.Module) -> optim.Optimizer:\n assert isinstance(optimizer, (CPUAdam, HybridAdam)), f'Unsupported optimizer {type(optimizer)}'\n return zero_optim_wrapper(model, optimizer, optim_config=self.zero_optim_config, **self.optim_kwargs)\n\n def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: optim.Optimizer, **kwargs) -> None:\n optimizer.backward(loss)\n\n def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None:\n optimizer.step()\n", "path": "applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py"}], "after_files": [{"content": "import warnings\nfrom typing import Optional\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport colossalai\nfrom colossalai.nn.optimizer import CPUAdam, HybridAdam\nfrom colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper\nfrom colossalai.tensor import ProcessGroup, ShardSpec\nfrom colossalai.utils import get_current_device\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\n\nfrom .ddp import DDPStrategy\n\n\nclass ColossalAIStrategy(DDPStrategy):\n \"\"\"\n The strategy for training with ColossalAI.\n\n Args:\n stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)\n seed(int): The seed for the random number generator.\n shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.\n This is not compativle with `from_pretrained()`. We temporarily disable this and will support it in the future.\n placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')\n If it is \u201ccpu\u201d, parameters, gradients and optimizer states will be offloaded to CPU,\n If it is \u201ccuda\u201d, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.\n pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.\n force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.\n search_range_mb(int): The search range in MB for the chunk size. Only for ZeRO-3.\n hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.\n min_chunk_size_mb(float): The minimum chunk size in MB. Only for ZeRO-3.\n gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.\n reduce_bugket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.\n overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.\n initial_scale(float): The initial scale for the optimizer.\n growth_factor(float): The growth factor for the optimizer.\n backoff_factor(float): The backoff factor for the optimizer.\n growth_interval(int): The growth interval for the optimizer.\n hysteresis(int): The hysteresis for the optimizer.\n min_scale(float): The minimum scale for the optimizer.\n max_scale(float): The maximum scale for the optimizer.\n max_norm(float): The maximum norm for the optimizer.\n norm_type(float): The norm type for the optimizer.\n\n \"\"\"\n\n def __init__(\n self,\n stage: int = 3,\n seed: int = 42,\n shard_init: bool = False, # only for stage 3\n placement_policy: str = 'cuda',\n pin_memory: bool = True, # only for stage 3\n force_outputs_fp32: bool = False, # only for stage 3\n search_range_mb: int = 32, # only for stage 3\n hidden_dim: Optional[int] = None, # only for stage 3\n min_chunk_size_mb: float = 32, # only for stage 3\n gpu_margin_mem_ratio: float = 0.0, # only for stage 3\n reduce_bucket_size: int = 12 * 1024**2, # only for stage 1&2\n overlap_communication: bool = True, # only for stage 1&2\n initial_scale: float = 2**16,\n growth_factor: float = 2,\n backoff_factor: float = 0.5,\n growth_interval: int = 1000,\n hysteresis: int = 2,\n min_scale: float = 1,\n max_scale: float = 2**32,\n max_norm: float = 0.0,\n norm_type: float = 2.0) -> None:\n super().__init__(seed)\n assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy \"{placement_policy}\"'\n self.stage = stage\n # TODO(ver217): support shard_init when using from_pretrained()\n if shard_init:\n warnings.warn(f'Shard init is not supported yet. Ignore.')\n shard_init = False\n self.shard_init = shard_init\n self.gemini_config = dict(device=get_current_device(),\n placement_policy=placement_policy,\n pin_memory=pin_memory,\n force_outputs_fp32=force_outputs_fp32,\n strict_ddp_mode=shard_init,\n search_range_mb=search_range_mb,\n hidden_dim=hidden_dim,\n min_chunk_size_mb=min_chunk_size_mb)\n if stage == 3:\n self.zero_optim_config = dict(gpu_margin_mem_ratio=gpu_margin_mem_ratio)\n else:\n self.zero_optim_config = dict(reduce_bucket_size=reduce_bucket_size,\n overlap_communication=overlap_communication,\n cpu_offload=(placement_policy == 'cpu'))\n self.optim_kwargs = dict(initial_scale=initial_scale,\n growth_factor=growth_factor,\n backoff_factor=backoff_factor,\n growth_interval=growth_interval,\n hysteresis=hysteresis,\n min_scale=min_scale,\n max_scale=max_scale,\n max_norm=max_norm,\n norm_type=norm_type)\n\n def setup_distributed(self) -> None:\n colossalai.launch_from_torch({}, seed=self.seed)\n\n def model_init_context(self):\n if self.stage == 3:\n world_size = dist.get_world_size()\n shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None\n default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None\n return ColoInitContext(device=get_current_device(),\n dtype=torch.half,\n default_pg=shard_pg,\n default_dist_spec=default_dist_spec)\n return super().model_init_context()\n\n def setup_model(self, model: nn.Module) -> nn.Module:\n return zero_model_wrapper(model, zero_stage=self.stage, gemini_config=self.gemini_config)\n\n def setup_optimizer(self, optimizer: optim.Optimizer, model: nn.Module) -> optim.Optimizer:\n assert isinstance(optimizer, (CPUAdam, HybridAdam)), f'Unsupported optimizer {type(optimizer)}'\n return zero_optim_wrapper(model, optimizer, optim_config=self.zero_optim_config, **self.optim_kwargs)\n\n def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: optim.Optimizer, **kwargs) -> None:\n optimizer.backward(loss)\n\n def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None:\n optimizer.step()\n", "path": "applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py"}]} | 1,989 | 505 |
gh_patches_debug_50780 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3745 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Explicitly depend on setuptools
Context: #3295
We should explicitly depend on a minimum version of setuptools to get around problems installing our packages if setuptools is too old.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
22
23 with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
24 README = file_obj.read()
25
26 # NOTE: This is duplicated throughout and we should try to
27 # consolidate.
28 SETUP_BASE = {
29 'author': 'Google Cloud Platform',
30 'author_email': '[email protected]',
31 'scripts': [],
32 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
33 'license': 'Apache 2.0',
34 'platforms': 'Posix; MacOS X; Windows',
35 'include_package_data': True,
36 'zip_safe': False,
37 'classifiers': [
38 'Development Status :: 4 - Beta',
39 'Intended Audience :: Developers',
40 'License :: OSI Approved :: Apache Software License',
41 'Operating System :: OS Independent',
42 'Programming Language :: Python :: 2',
43 'Programming Language :: Python :: 2.7',
44 'Programming Language :: Python :: 3',
45 'Programming Language :: Python :: 3.4',
46 'Programming Language :: Python :: 3.5',
47 'Programming Language :: Python :: 3.6',
48 'Topic :: Internet',
49 ],
50 }
51
52
53 REQUIREMENTS = [
54 'googleapis-common-protos >= 1.3.4',
55 'protobuf >= 3.0.0',
56 'google-auth >= 0.4.0, < 2.0.0dev',
57 'requests >= 2.4.0, < 3.0.0dev',
58 'six',
59 'tenacity >= 4.0.0, <5.0.0dev'
60 ]
61
62 setup(
63 name='google-cloud-core',
64 version='0.26.0',
65 description='API Client library for Google Cloud: Core Helpers',
66 long_description=README,
67 namespace_packages=[
68 'google',
69 'google.cloud',
70 'google.api',
71 ],
72 packages=find_packages(exclude=('tests*',)),
73 install_requires=REQUIREMENTS,
74 **SETUP_BASE
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -55,6 +55,7 @@
'protobuf >= 3.0.0',
'google-auth >= 0.4.0, < 2.0.0dev',
'requests >= 2.4.0, < 3.0.0dev',
+ 'setuptools >= 34.0.0',
'six',
'tenacity >= 4.0.0, <5.0.0dev'
]
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -55,6 +55,7 @@\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n+ 'setuptools >= 34.0.0',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n ]\n", "issue": "Explicitly depend on setuptools\nContext: #3295\r\n\r\nWe should explicitly depend on a minimum version of setuptools to get around problems installing our packages if setuptools is too old.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\n\nREQUIREMENTS = [\n 'googleapis-common-protos >= 1.3.4',\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n]\n\nsetup(\n name='google-cloud-core',\n version='0.26.0',\n description='API Client library for Google Cloud: Core Helpers',\n long_description=README,\n namespace_packages=[\n 'google',\n 'google.cloud',\n 'google.api',\n ],\n packages=find_packages(exclude=('tests*',)),\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\n\nREQUIREMENTS = [\n 'googleapis-common-protos >= 1.3.4',\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n 'setuptools >= 34.0.0',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n]\n\nsetup(\n name='google-cloud-core',\n version='0.26.0',\n description='API Client library for Google Cloud: Core Helpers',\n long_description=README,\n namespace_packages=[\n 'google',\n 'google.cloud',\n 'google.api',\n ],\n packages=find_packages(exclude=('tests*',)),\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n", "path": "core/setup.py"}]} | 1,034 | 127 |
gh_patches_debug_28051 | rasdani/github-patches | git_diff | cupy__cupy-5226 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove cupyx.allow_synchronize?
`cupyx.allow_synchronize` (#2808) was introduced originally for both unit tests and users who want to notice device synchronization in their own code.
Application in uint tests was dissmissed (#2893) because much more tests were synchronous than I had expected.
Now I doubt the usefulness of this feature for users as well.
It's fundamentally impossible to eliminate false positives and/or false negatives.
If we took a policy for zero false positives, there would be too many false negatives which I think would make this feature useless.
For example, the documentation of [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) says *"This function exhibits synchronous behavior for most use cases"*. If we took this policy, we wouldn't be able to consider this function synchronous, because no condition is mentioned that would make this function asynchronous.
If we took a policy to allow some false positives, this feature wouldn't be used to detect unexpected synchronization in users' code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/_core/syncdetect.py`
Content:
```
1 import contextlib
2 import threading
3
4 from cupy import _util
5
6
7 _thread_local = threading.local()
8
9
10 class DeviceSynchronized(RuntimeError):
11 """Raised when device synchronization is detected while disallowed.
12
13 .. seealso:: :func:`cupyx.allow_synchronize`
14
15 """
16
17 def __init__(self, message=None):
18 if message is None:
19 message = 'Device synchronization was detected while disallowed.'
20 super().__init__(message)
21
22
23 def _is_allowed():
24 # Returns whether device synchronization is allowed in the current thread.
25 try:
26 return _thread_local.allowed
27 except AttributeError:
28 _thread_local.allowed = True
29 return True
30
31
32 def _declare_synchronize():
33 # Raises DeviceSynchronized if device synchronization is disallowed in
34 # the current thread.
35 if not _is_allowed():
36 raise DeviceSynchronized()
37
38
39 @contextlib.contextmanager
40 def allow_synchronize(allow):
41 """Allows or disallows device synchronization temporarily in the current \
42 thread.
43
44 If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
45 will be raised.
46
47 Note that there can be false negatives and positives.
48 Device synchronization outside CuPy will not be detected.
49 """
50 _util.experimental('cupyx.allow_synchronize')
51 old = _is_allowed()
52 _thread_local.allowed = allow
53 try:
54 yield
55 finally:
56 _thread_local.allowed = old
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/_core/syncdetect.py b/cupy/_core/syncdetect.py
--- a/cupy/_core/syncdetect.py
+++ b/cupy/_core/syncdetect.py
@@ -1,7 +1,6 @@
import contextlib
import threading
-
-from cupy import _util
+import warnings
_thread_local = threading.local()
@@ -10,6 +9,11 @@
class DeviceSynchronized(RuntimeError):
"""Raised when device synchronization is detected while disallowed.
+ .. warning::
+
+ This API has been deprecated in CuPy v10 and will be removed in future
+ releases.
+
.. seealso:: :func:`cupyx.allow_synchronize`
"""
@@ -41,13 +45,21 @@
"""Allows or disallows device synchronization temporarily in the current \
thread.
+ .. warning::
+
+ This API has been deprecated in CuPy v10 and will be removed in future
+ releases.
+
If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
will be raised.
Note that there can be false negatives and positives.
Device synchronization outside CuPy will not be detected.
"""
- _util.experimental('cupyx.allow_synchronize')
+ warnings.warn(
+ 'cupyx.allow_synchronize will be removed in future releases as it '
+ 'is not possible to reliably detect synchronizations.')
+
old = _is_allowed()
_thread_local.allowed = allow
try:
| {"golden_diff": "diff --git a/cupy/_core/syncdetect.py b/cupy/_core/syncdetect.py\n--- a/cupy/_core/syncdetect.py\n+++ b/cupy/_core/syncdetect.py\n@@ -1,7 +1,6 @@\n import contextlib\n import threading\n-\n-from cupy import _util\n+import warnings\n \n \n _thread_local = threading.local()\n@@ -10,6 +9,11 @@\n class DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n \n+ .. warning::\n+\n+ This API has been deprecated in CuPy v10 and will be removed in future\n+ releases.\n+\n .. seealso:: :func:`cupyx.allow_synchronize`\n \n \"\"\"\n@@ -41,13 +45,21 @@\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\n thread.\n \n+ .. warning::\n+\n+ This API has been deprecated in CuPy v10 and will be removed in future\n+ releases.\n+\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n \n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n- _util.experimental('cupyx.allow_synchronize')\n+ warnings.warn(\n+ 'cupyx.allow_synchronize will be removed in future releases as it '\n+ 'is not possible to reliably detect synchronizations.')\n+\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n", "issue": "Remove cupyx.allow_synchronize?\n`cupyx.allow_synchronize` (#2808) was introduced originally for both unit tests and users who want to notice device synchronization in their own code.\r\n\r\nApplication in uint tests was dissmissed (#2893) because much more tests were synchronous than I had expected. \r\n\r\nNow I doubt the usefulness of this feature for users as well.\r\nIt's fundamentally impossible to eliminate false positives and/or false negatives.\r\n\r\nIf we took a policy for zero false positives, there would be too many false negatives which I think would make this feature useless.\r\nFor example, the documentation of [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) says *\"This function exhibits synchronous behavior for most use cases\"*. If we took this policy, we wouldn't be able to consider this function synchronous, because no condition is mentioned that would make this function asynchronous.\r\n\r\nIf we took a policy to allow some false positives, this feature wouldn't be used to detect unexpected synchronization in users' code.\n", "before_files": [{"content": "import contextlib\nimport threading\n\nfrom cupy import _util\n\n\n_thread_local = threading.local()\n\n\nclass DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n\n .. seealso:: :func:`cupyx.allow_synchronize`\n\n \"\"\"\n\n def __init__(self, message=None):\n if message is None:\n message = 'Device synchronization was detected while disallowed.'\n super().__init__(message)\n\n\ndef _is_allowed():\n # Returns whether device synchronization is allowed in the current thread.\n try:\n return _thread_local.allowed\n except AttributeError:\n _thread_local.allowed = True\n return True\n\n\ndef _declare_synchronize():\n # Raises DeviceSynchronized if device synchronization is disallowed in\n # the current thread.\n if not _is_allowed():\n raise DeviceSynchronized()\n\n\[email protected]\ndef allow_synchronize(allow):\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\nthread.\n\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n\n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n _util.experimental('cupyx.allow_synchronize')\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n yield\n finally:\n _thread_local.allowed = old\n", "path": "cupy/_core/syncdetect.py"}], "after_files": [{"content": "import contextlib\nimport threading\nimport warnings\n\n\n_thread_local = threading.local()\n\n\nclass DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n\n .. warning::\n\n This API has been deprecated in CuPy v10 and will be removed in future\n releases.\n\n .. seealso:: :func:`cupyx.allow_synchronize`\n\n \"\"\"\n\n def __init__(self, message=None):\n if message is None:\n message = 'Device synchronization was detected while disallowed.'\n super().__init__(message)\n\n\ndef _is_allowed():\n # Returns whether device synchronization is allowed in the current thread.\n try:\n return _thread_local.allowed\n except AttributeError:\n _thread_local.allowed = True\n return True\n\n\ndef _declare_synchronize():\n # Raises DeviceSynchronized if device synchronization is disallowed in\n # the current thread.\n if not _is_allowed():\n raise DeviceSynchronized()\n\n\[email protected]\ndef allow_synchronize(allow):\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\nthread.\n\n .. warning::\n\n This API has been deprecated in CuPy v10 and will be removed in future\n releases.\n\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n\n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n warnings.warn(\n 'cupyx.allow_synchronize will be removed in future releases as it '\n 'is not possible to reliably detect synchronizations.')\n\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n yield\n finally:\n _thread_local.allowed = old\n", "path": "cupy/_core/syncdetect.py"}]} | 928 | 333 |
gh_patches_debug_27897 | rasdani/github-patches | git_diff | freqtrade__freqtrade-4144 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possibly slightly wrong informative pair merging
When merging longer timeframe to a shorter one, the timedelta is added to the date:
https://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L35
then it merges to the shorter timeframe with these dates..
https://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L42-L43
So lets say we are merging a `1h` timeframe to a `5m` timeframe and there is a signal `True` at `00:00` on the `1h` timeseries.
With this merge, the signal will appear in the `5m` timeseries also at `00:00`. However the `00:00` candle for the `5m` timeframe is received at `00:05`, that is five minutes later than the time you actually received the `1h` candle, which should have been received at the candle with date `11:55`.
So after merging, the values should be shifted backward (`shift(-1)`)..or the merging dates should be reduced by one unit of timedelta of the shorter timeframe..
```python
informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/strategy/strategy_helper.py`
Content:
```
1 import pandas as pd
2
3 from freqtrade.exchange import timeframe_to_minutes
4
5
6 def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
7 timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:
8 """
9 Correctly merge informative samples to the original dataframe, avoiding lookahead bias.
10
11 Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a
12 1h candle that starts at 15:00 will result in all candles to know the close at 16:00
13 which they should not know.
14
15 Moves the date of the informative pair by 1 time interval forward.
16 This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the
17 last candle that's closed at 15:00, 15:15, 15:30 or 15:45.
18
19 Assuming inf_tf = '1d' - then the resulting columns will be:
20 date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d
21
22 :param dataframe: Original dataframe
23 :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe
24 :param timeframe: Timeframe of the original pair sample.
25 :param timeframe_inf: Timeframe of the informative pair sample.
26 :param ffill: Forwardfill missing values - optional but usually required
27 """
28
29 minutes_inf = timeframe_to_minutes(timeframe_inf)
30 minutes = timeframe_to_minutes(timeframe)
31 if minutes >= minutes_inf:
32 # No need to forwardshift if the timeframes are identical
33 informative['date_merge'] = informative["date"]
34 else:
35 informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm')
36
37 # Rename columns to be unique
38 informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
39
40 # Combine the 2 dataframes
41 # all indicators on the informative sample MUST be calculated before this point
42 dataframe = pd.merge(dataframe, informative, left_on='date',
43 right_on=f'date_merge_{timeframe_inf}', how='left')
44 dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)
45
46 if ffill:
47 dataframe = dataframe.ffill()
48
49 return dataframe
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py
--- a/freqtrade/strategy/strategy_helper.py
+++ b/freqtrade/strategy/strategy_helper.py
@@ -24,15 +24,24 @@
:param timeframe: Timeframe of the original pair sample.
:param timeframe_inf: Timeframe of the informative pair sample.
:param ffill: Forwardfill missing values - optional but usually required
+ :return: Merged dataframe
+ :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe
"""
minutes_inf = timeframe_to_minutes(timeframe_inf)
minutes = timeframe_to_minutes(timeframe)
- if minutes >= minutes_inf:
+ if minutes == minutes_inf:
# No need to forwardshift if the timeframes are identical
informative['date_merge'] = informative["date"]
+ elif minutes < minutes_inf:
+ # Subtract "small" timeframe so merging is not delayed by 1 small candle
+ # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073
+ informative['date_merge'] = (
+ informative["date"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')
+ )
else:
- informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm')
+ raise ValueError("Tried to merge a faster timeframe to a slower timeframe."
+ "This would create new rows, and can throw off your regular indicators.")
# Rename columns to be unique
informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
| {"golden_diff": "diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py\n--- a/freqtrade/strategy/strategy_helper.py\n+++ b/freqtrade/strategy/strategy_helper.py\n@@ -24,15 +24,24 @@\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n+ :return: Merged dataframe\n+ :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n \n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n- if minutes >= minutes_inf:\n+ if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n+ elif minutes < minutes_inf:\n+ # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n+ # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n+ informative['date_merge'] = (\n+ informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\n+ )\n else:\n- informative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm')\n+ raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n+ \"This would create new rows, and can throw off your regular indicators.\")\n \n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n", "issue": "Possibly slightly wrong informative pair merging\nWhen merging longer timeframe to a shorter one, the timedelta is added to the date:\r\nhttps://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L35\r\n\r\nthen it merges to the shorter timeframe with these dates..\r\nhttps://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L42-L43\r\n\r\nSo lets say we are merging a `1h` timeframe to a `5m` timeframe and there is a signal `True` at `00:00` on the `1h` timeseries. \r\nWith this merge, the signal will appear in the `5m` timeseries also at `00:00`. However the `00:00` candle for the `5m` timeframe is received at `00:05`, that is five minutes later than the time you actually received the `1h` candle, which should have been received at the candle with date `11:55`. \r\nSo after merging, the values should be shifted backward (`shift(-1)`)..or the merging dates should be reduced by one unit of timedelta of the shorter timeframe..\r\n```python\r\ninformative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\r\n```\n", "before_files": [{"content": "import pandas as pd\n\nfrom freqtrade.exchange import timeframe_to_minutes\n\n\ndef merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes >= minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n else:\n informative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm')\n\n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=f'date_merge_{timeframe_inf}', how='left')\n dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)\n\n if ffill:\n dataframe = dataframe.ffill()\n\n return dataframe\n", "path": "freqtrade/strategy/strategy_helper.py"}], "after_files": [{"content": "import pandas as pd\n\nfrom freqtrade.exchange import timeframe_to_minutes\n\n\ndef merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n :return: Merged dataframe\n :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n elif minutes < minutes_inf:\n # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n informative['date_merge'] = (\n informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\n )\n else:\n raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n \"This would create new rows, and can throw off your regular indicators.\")\n\n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=f'date_merge_{timeframe_inf}', how='left')\n dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)\n\n if ffill:\n dataframe = dataframe.ffill()\n\n return dataframe\n", "path": "freqtrade/strategy/strategy_helper.py"}]} | 1,269 | 372 |
gh_patches_debug_13394 | rasdani/github-patches | git_diff | ManimCommunity__manim-1923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Command line help text is cut off
## Enhancement proposal
When running `manim --help`, the following output is returned:
```
Manim Community v0.9.0
Usage: manim [OPTIONS] COMMAND [ARGS]...
Animation engine for explanatory math videos
Options:
--version Show version and exit.
--help Show this message and exit.
Commands:
render* Render SCENE(S) from the input FILE.
cfg Manages Manim configuration files.
init Sets up a project in current working directory with default...
new Create a new project or insert a new scene.
plugins Manages Manim plugins.
Made with <3 by Manim Community developers.
```
As you can see, the help text for `init` is cut off, and does not provide sufficient information about what that command does.
## Additional comments
<!-- Add further context that you think might be relevant. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/cli/init/commands.py`
Content:
```
1 """Manim's init subcommand.
2
3 Manim's init subcommand is accessed in the command-line interface via ``manim
4 init``. Here you can specify options, subcommands, and subgroups for the init
5 group.
6
7 """
8 from pathlib import Path
9
10 import click
11
12 from ...constants import CONTEXT_SETTINGS, EPILOG
13 from ...utils.file_ops import copy_template_files
14
15
16 @click.command(
17 context_settings=CONTEXT_SETTINGS,
18 epilog=EPILOG,
19 )
20 def init():
21 """Sets up a project in current working directory with default settings.
22
23 It copies files from templates directory and pastes them in the current working dir.
24
25 The new project is set up with default settings.
26 """
27 cfg = Path("manim.cfg")
28 if cfg.exists():
29 raise FileExistsError(f"\t{cfg} exists\n")
30 else:
31 copy_template_files()
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/cli/init/commands.py b/manim/cli/init/commands.py
--- a/manim/cli/init/commands.py
+++ b/manim/cli/init/commands.py
@@ -16,13 +16,14 @@
@click.command(
context_settings=CONTEXT_SETTINGS,
epilog=EPILOG,
+ short_help="""Sets up a new project in current working directory with default settings.\n
+It copies files from templates directory and pastes them in the current working dir.
+""",
)
def init():
- """Sets up a project in current working directory with default settings.
+ """Sets up a new project in current working directory with default settings.
It copies files from templates directory and pastes them in the current working dir.
-
- The new project is set up with default settings.
"""
cfg = Path("manim.cfg")
if cfg.exists():
| {"golden_diff": "diff --git a/manim/cli/init/commands.py b/manim/cli/init/commands.py\n--- a/manim/cli/init/commands.py\n+++ b/manim/cli/init/commands.py\n@@ -16,13 +16,14 @@\n @click.command(\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n+ short_help=\"\"\"Sets up a new project in current working directory with default settings.\\n\n+It copies files from templates directory and pastes them in the current working dir.\n+\"\"\",\n )\n def init():\n- \"\"\"Sets up a project in current working directory with default settings.\n+ \"\"\"Sets up a new project in current working directory with default settings.\n \n It copies files from templates directory and pastes them in the current working dir.\n-\n- The new project is set up with default settings.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n", "issue": "Command line help text is cut off\n## Enhancement proposal\r\nWhen running `manim --help`, the following output is returned:\r\n\r\n```\r\nManim Community v0.9.0\r\n\r\nUsage: manim [OPTIONS] COMMAND [ARGS]...\r\n\r\n Animation engine for explanatory math videos\r\n\r\nOptions:\r\n --version Show version and exit.\r\n --help Show this message and exit.\r\n\r\nCommands:\r\n render* Render SCENE(S) from the input FILE.\r\n cfg Manages Manim configuration files.\r\n init Sets up a project in current working directory with default...\r\n new Create a new project or insert a new scene.\r\n plugins Manages Manim plugins.\r\n\r\n Made with <3 by Manim Community developers.\r\n```\r\n\r\nAs you can see, the help text for `init` is cut off, and does not provide sufficient information about what that command does.\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's init subcommand.\n\nManim's init subcommand is accessed in the command-line interface via ``manim\ninit``. Here you can specify options, subcommands, and subgroups for the init\ngroup.\n\n\"\"\"\nfrom pathlib import Path\n\nimport click\n\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.file_ops import copy_template_files\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n)\ndef init():\n \"\"\"Sets up a project in current working directory with default settings.\n\n It copies files from templates directory and pastes them in the current working dir.\n\n The new project is set up with default settings.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n raise FileExistsError(f\"\\t{cfg} exists\\n\")\n else:\n copy_template_files()\n", "path": "manim/cli/init/commands.py"}], "after_files": [{"content": "\"\"\"Manim's init subcommand.\n\nManim's init subcommand is accessed in the command-line interface via ``manim\ninit``. Here you can specify options, subcommands, and subgroups for the init\ngroup.\n\n\"\"\"\nfrom pathlib import Path\n\nimport click\n\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.file_ops import copy_template_files\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n short_help=\"\"\"Sets up a new project in current working directory with default settings.\\n\nIt copies files from templates directory and pastes them in the current working dir.\n\"\"\",\n)\ndef init():\n \"\"\"Sets up a new project in current working directory with default settings.\n\n It copies files from templates directory and pastes them in the current working dir.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n raise FileExistsError(f\"\\t{cfg} exists\\n\")\n else:\n copy_template_files()\n", "path": "manim/cli/init/commands.py"}]} | 704 | 193 |
gh_patches_debug_16755 | rasdani/github-patches | git_diff | scrapy__scrapy-5299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tests failing with new Sybil
Sybil 3.0.0 was just released, and at least the following change breaks our tests: `CodeBlockParser has been renamed to PythonCodeBlockParser`
```python-traceback
docs/conftest.py:24: in <module>
CodeBlockParser(future_imports=['print_function']),
E TypeError: __init__() got an unexpected keyword argument 'future_imports'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conftest.py`
Content:
```
1 import os
2 from doctest import ELLIPSIS, NORMALIZE_WHITESPACE
3
4 from scrapy.http.response.html import HtmlResponse
5 from sybil import Sybil
6 from sybil.parsers.codeblock import CodeBlockParser
7 from sybil.parsers.doctest import DocTestParser
8 from sybil.parsers.skip import skip
9
10
11 def load_response(url, filename):
12 input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)
13 with open(input_path, 'rb') as input_file:
14 return HtmlResponse(url, body=input_file.read())
15
16
17 def setup(namespace):
18 namespace['load_response'] = load_response
19
20
21 pytest_collect_file = Sybil(
22 parsers=[
23 DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),
24 CodeBlockParser(future_imports=['print_function']),
25 skip,
26 ],
27 pattern='*.rst',
28 setup=setup,
29 ).pytest()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conftest.py b/docs/conftest.py
--- a/docs/conftest.py
+++ b/docs/conftest.py
@@ -3,7 +3,11 @@
from scrapy.http.response.html import HtmlResponse
from sybil import Sybil
-from sybil.parsers.codeblock import CodeBlockParser
+try:
+ # >2.0.1
+ from sybil.parsers.codeblock import PythonCodeBlockParser
+except ImportError:
+ from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip
@@ -21,7 +25,7 @@
pytest_collect_file = Sybil(
parsers=[
DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),
- CodeBlockParser(future_imports=['print_function']),
+ PythonCodeBlockParser(future_imports=['print_function']),
skip,
],
pattern='*.rst',
| {"golden_diff": "diff --git a/docs/conftest.py b/docs/conftest.py\n--- a/docs/conftest.py\n+++ b/docs/conftest.py\n@@ -3,7 +3,11 @@\n \n from scrapy.http.response.html import HtmlResponse\n from sybil import Sybil\n-from sybil.parsers.codeblock import CodeBlockParser\n+try:\n+ # >2.0.1\n+ from sybil.parsers.codeblock import PythonCodeBlockParser\n+except ImportError:\n+ from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser\n from sybil.parsers.doctest import DocTestParser\n from sybil.parsers.skip import skip\n \n@@ -21,7 +25,7 @@\n pytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n- CodeBlockParser(future_imports=['print_function']),\n+ PythonCodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n", "issue": "Tests failing with new Sybil\nSybil 3.0.0 was just released, and at least the following change breaks our tests: `CodeBlockParser has been renamed to PythonCodeBlockParser`\r\n\r\n```python-traceback\r\ndocs/conftest.py:24: in <module>\r\n CodeBlockParser(future_imports=['print_function']),\r\nE TypeError: __init__() got an unexpected keyword argument 'future_imports'\r\n```\n", "before_files": [{"content": "import os\nfrom doctest import ELLIPSIS, NORMALIZE_WHITESPACE\n\nfrom scrapy.http.response.html import HtmlResponse\nfrom sybil import Sybil\nfrom sybil.parsers.codeblock import CodeBlockParser\nfrom sybil.parsers.doctest import DocTestParser\nfrom sybil.parsers.skip import skip\n\n\ndef load_response(url, filename):\n input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)\n with open(input_path, 'rb') as input_file:\n return HtmlResponse(url, body=input_file.read())\n\n\ndef setup(namespace):\n namespace['load_response'] = load_response\n\n\npytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n CodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n setup=setup,\n).pytest()\n", "path": "docs/conftest.py"}], "after_files": [{"content": "import os\nfrom doctest import ELLIPSIS, NORMALIZE_WHITESPACE\n\nfrom scrapy.http.response.html import HtmlResponse\nfrom sybil import Sybil\ntry:\n # >2.0.1\n from sybil.parsers.codeblock import PythonCodeBlockParser\nexcept ImportError:\n from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser\nfrom sybil.parsers.doctest import DocTestParser\nfrom sybil.parsers.skip import skip\n\n\ndef load_response(url, filename):\n input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)\n with open(input_path, 'rb') as input_file:\n return HtmlResponse(url, body=input_file.read())\n\n\ndef setup(namespace):\n namespace['load_response'] = load_response\n\n\npytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n PythonCodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n setup=setup,\n).pytest()\n", "path": "docs/conftest.py"}]} | 596 | 216 |
gh_patches_debug_4388 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build number in -dev versioning
Since we do not have a build number in the -dev version, it may be quite complex to understand which particular version of freqtrade the user runs (especially with that your conda). So if we change user interface I guess it's better for a while to make those changes along with changing the version of both develop and master...
Is it possible to introduce the build number into the -dev versioning, which will change with every merge, as ccxt employs, for example?
It's not necessary for master (2019-8 is completely enough), but for develop changing every day this may be useful since `freqtrade -V` will give particular version info (even when a user has current codebase from github fetched in a dir, but installed an outdated version with that your conda, which is used instead of latest develop from the dir; that's the confusing usecase when it's complicated to find out the actual version of freqtrade that runs at the user site...)
It's not necessary to be an increasing number, it can be a short commit id (merged into develop), for example...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/__init__.py`
Content:
```
1 """ FreqTrade bot """
2 __version__ = '2019.7-dev'
3
4
5 class DependencyException(Exception):
6 """
7 Indicates that an assumed dependency is not met.
8 This could happen when there is currently not enough money on the account.
9 """
10
11
12 class OperationalException(Exception):
13 """
14 Requires manual intervention and will usually stop the bot.
15 This happens when an exchange returns an unexpected error during runtime
16 or given configuration is invalid.
17 """
18
19
20 class InvalidOrderException(Exception):
21 """
22 This is returned when the order is not valid. Example:
23 If stoploss on exchange order is hit, then trying to cancel the order
24 should return this exception.
25 """
26
27
28 class TemporaryError(Exception):
29 """
30 Temporary network or exchange related error.
31 This could happen when an exchange is congested, unavailable, or the user
32 has networking problems. Usually resolves itself after a time.
33 """
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py
--- a/freqtrade/__init__.py
+++ b/freqtrade/__init__.py
@@ -1,5 +1,16 @@
""" FreqTrade bot """
-__version__ = '2019.7-dev'
+__version__ = 'develop'
+
+if __version__ == 'develop':
+
+ try:
+ import subprocess
+ __version__ = 'develop-' + subprocess.check_output(
+ ['git', 'log', '--format="%h"', '-n 1'],
+ stderr=subprocess.DEVNULL).decode("utf-8").rstrip().strip('"')
+ except Exception:
+ # git not available, ignore
+ pass
class DependencyException(Exception):
| {"golden_diff": "diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py\n--- a/freqtrade/__init__.py\n+++ b/freqtrade/__init__.py\n@@ -1,5 +1,16 @@\n \"\"\" FreqTrade bot \"\"\"\n-__version__ = '2019.7-dev'\n+__version__ = 'develop'\n+\n+if __version__ == 'develop':\n+\n+ try:\n+ import subprocess\n+ __version__ = 'develop-' + subprocess.check_output(\n+ ['git', 'log', '--format=\"%h\"', '-n 1'],\n+ stderr=subprocess.DEVNULL).decode(\"utf-8\").rstrip().strip('\"')\n+ except Exception:\n+ # git not available, ignore\n+ pass\n \n \n class DependencyException(Exception):\n", "issue": "Build number in -dev versioning\nSince we do not have a build number in the -dev version, it may be quite complex to understand which particular version of freqtrade the user runs (especially with that your conda). So if we change user interface I guess it's better for a while to make those changes along with changing the version of both develop and master...\r\n\r\nIs it possible to introduce the build number into the -dev versioning, which will change with every merge, as ccxt employs, for example?\r\n\r\nIt's not necessary for master (2019-8 is completely enough), but for develop changing every day this may be useful since `freqtrade -V` will give particular version info (even when a user has current codebase from github fetched in a dir, but installed an outdated version with that your conda, which is used instead of latest develop from the dir; that's the confusing usecase when it's complicated to find out the actual version of freqtrade that runs at the user site...)\r\n\r\nIt's not necessary to be an increasing number, it can be a short commit id (merged into develop), for example...\r\n\n", "before_files": [{"content": "\"\"\" FreqTrade bot \"\"\"\n__version__ = '2019.7-dev'\n\n\nclass DependencyException(Exception):\n \"\"\"\n Indicates that an assumed dependency is not met.\n This could happen when there is currently not enough money on the account.\n \"\"\"\n\n\nclass OperationalException(Exception):\n \"\"\"\n Requires manual intervention and will usually stop the bot.\n This happens when an exchange returns an unexpected error during runtime\n or given configuration is invalid.\n \"\"\"\n\n\nclass InvalidOrderException(Exception):\n \"\"\"\n This is returned when the order is not valid. Example:\n If stoploss on exchange order is hit, then trying to cancel the order\n should return this exception.\n \"\"\"\n\n\nclass TemporaryError(Exception):\n \"\"\"\n Temporary network or exchange related error.\n This could happen when an exchange is congested, unavailable, or the user\n has networking problems. Usually resolves itself after a time.\n \"\"\"\n", "path": "freqtrade/__init__.py"}], "after_files": [{"content": "\"\"\" FreqTrade bot \"\"\"\n__version__ = 'develop'\n\nif __version__ == 'develop':\n\n try:\n import subprocess\n __version__ = 'develop-' + subprocess.check_output(\n ['git', 'log', '--format=\"%h\"', '-n 1'],\n stderr=subprocess.DEVNULL).decode(\"utf-8\").rstrip().strip('\"')\n except Exception:\n # git not available, ignore\n pass\n\n\nclass DependencyException(Exception):\n \"\"\"\n Indicates that an assumed dependency is not met.\n This could happen when there is currently not enough money on the account.\n \"\"\"\n\n\nclass OperationalException(Exception):\n \"\"\"\n Requires manual intervention and will usually stop the bot.\n This happens when an exchange returns an unexpected error during runtime\n or given configuration is invalid.\n \"\"\"\n\n\nclass InvalidOrderException(Exception):\n \"\"\"\n This is returned when the order is not valid. Example:\n If stoploss on exchange order is hit, then trying to cancel the order\n should return this exception.\n \"\"\"\n\n\nclass TemporaryError(Exception):\n \"\"\"\n Temporary network or exchange related error.\n This could happen when an exchange is congested, unavailable, or the user\n has networking problems. Usually resolves itself after a time.\n \"\"\"\n", "path": "freqtrade/__init__.py"}]} | 750 | 174 |
gh_patches_debug_13454 | rasdani/github-patches | git_diff | psychopy__psychopy-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Overlapping recordings problem
I am having a problem with mic.record and mic.stop - I am currently on psychopy 1.81.00, but I have had the same problem in earlier versions. I have written some code which records until the participant hits a key, or until a time-limit is reached. I am getting occasional truncated recordings or zero-length recordings - these occur when I test the code myself, so it's not just the participants being trigger-happy. I think the problem occurs when the timer on some past recording runs out, it stops the current recording. So say you set a recording running with a limit of 10 seconds, send a mic.stop() after 5 seconds, then start a new recording, that new recording will be stopped after 5 seconds, when the timer on the original recording runs out - it doesn't seem to be quite as neat as that in practice, which is confusing, but you can see this in action with something like the following little program. How often to occurs depends on how unlucky you are, but if you run through the for loop 10-15 times you will get some truncated recordings.
from psychopy import microphone,core,event, visual
def recording(window,trialNum,mic):
print('recording ' + str(trialNum))
mic.reset()
instructionText = visual.TextStim(window, text='Count to five, then press space',color="black",pos=(0,0.0),wrapWidth=2)
instructionText.draw()
window.flip()
mic.record(7,block=False,filename=str(trialNum)+'.wav') #start recording
event.waitKeys(maxWait='inf', keyList=['space']) #wait for a space from participant
core.wait(0.1) #so you can hear the click of the spacebar
window.flip()
mic.stop() #stop the mic
core.wait(0.1) #to get a flicker between screens
# set up mic and window
microphone.switchOn(sampleRate=44100)
mic = microphone.AudioCapture()
myWin = visual.Window((800,600), allowGUI=True,color='white')
for t in range(100): #shouldn't need to do as many as 100 to get some truncated recordings!
recording(myWin,t,mic)
microphone.switchOff()
core.quit()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/builder/components/microphone.py`
Content:
```
1 # Part of the PsychoPy library
2 # Copyright (C) 2014 Jonathan Peirce
3 # Distributed under the terms of the GNU General Public License (GPL).
4
5 # Author: Jeremy R. Gray, 2012
6
7 from _base import *
8 from os import path
9 from psychopy.app.builder import components #for getInitVals()
10
11 thisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path
12 iconFile = path.join(thisFolder,'microphone.png')
13 tooltip = _translate('Microphone: basic sound capture (fixed onset & duration), okay for spoken words')
14
15 _localized = {'stereo': _translate('Stereo')}
16
17 class MicrophoneComponent(BaseComponent):
18 """An event class for capturing short sound stimuli"""
19 categories = ['Responses']
20 def __init__(self, exp, parentName, name='mic_1',
21 startType='time (s)', startVal=0.0,
22 stopType='duration (s)', stopVal=2.0, startEstim='', durationEstim='',
23 stereo=False
24 ):
25 super(MicrophoneComponent, self).__init__(exp, parentName, name=name,
26 startType=startType, startVal=startVal,
27 stopType=stopType, stopVal=stopVal,
28 startEstim=startEstim, durationEstim=durationEstim)
29 self.type='Microphone'
30 self.url="http://www.psychopy.org/builder/components/microphone.html"
31 self.exp.requirePsychopyLibs(['microphone'])
32 #params
33 self.params['stereo']=Param(stereo, valType='bool',
34 hint=_translate("Record two channels (stereo) or one (mono, smaller file)"),
35 label=_localized['stereo'])
36 self.params['stopType'].allowedVals = ['duration (s)']
37 self.params['stopType'].hint = _translate('The duration of the recording in seconds; blank = 0 sec')
38 def writeStartCode(self,buff):
39 # filename should have date_time, so filename_wav should be unique
40 buff.writeIndented("wavDirName = filename + '_wav'\n")
41 buff.writeIndented("if not os.path.isdir(wavDirName):\n" +
42 " os.makedirs(wavDirName) # to hold .wav files\n")
43 def writeRoutineStartCode(self,buff):
44 inits = components.getInitVals(self.params)
45 buff.writeIndented("%s = microphone.AdvAudioCapture(name='%s', saveDir=wavDirName, stereo=%s)\n" %(
46 inits['name'], inits['name'], inits['stereo']))
47 def writeFrameCode(self,buff):
48 """Write the code that will be called every frame"""
49 duration = "%s" % self.params['stopVal'] # type is code
50 if not len(duration):
51 duration = "0"
52 # starting condition:
53 buff.writeIndented("\n")
54 buff.writeIndented("# *%s* updates\n" %(self.params['name']))
55 self.writeStartTestCode(buff) # writes an if statement
56 buff.writeIndented("%(name)s.status = STARTED\n" %(self.params))
57 buff.writeIndented("%s.record(sec=%s, block=False) # start the recording thread\n" %
58 (self.params['name'], duration))
59 buff.setIndentLevel(-1, relative=True) # ends the if statement
60 buff.writeIndented("\n")
61 # these lines handle both normal end of rec thread, and user .stop():
62 buff.writeIndented("if %(name)s.status == STARTED and not %(name)s.recorder.running:\n" % self.params)
63 buff.writeIndented(" %s.status = FINISHED\n" % self.params['name'])
64 def writeRoutineEndCode(self,buff):
65 #some shortcuts
66 name = self.params['name']
67 if len(self.exp.flow._loopList):
68 currLoop = self.exp.flow._loopList[-1] #last (outer-most) loop
69 else:
70 currLoop = self.exp._expHandler
71
72 #write the actual code
73 buff.writeIndented("# check responses\n" %self.params)
74 buff.writeIndented("if not %(name)s.savedFile:\n"%self.params)
75 buff.writeIndented(" %(name)s.savedFile = None\n" %(self.params))
76 buff.writeIndented("# store data for %s (%s)\n" %(currLoop.params['name'], currLoop.type))
77
78 #always add saved file name
79 buff.writeIndented("%s.addData('%s.filename', %s.savedFile)\n" % (currLoop.params['name'],name,name))
80 if currLoop.params['name'].val == self.exp._expHandler.name:
81 buff.writeIndented("%s.nextEntry()\n" % self.exp._expHandler.name)
82 # best not to do loudness / rms or other processing here
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/builder/components/microphone.py b/psychopy/app/builder/components/microphone.py
--- a/psychopy/app/builder/components/microphone.py
+++ b/psychopy/app/builder/components/microphone.py
@@ -70,7 +70,8 @@
currLoop = self.exp._expHandler
#write the actual code
- buff.writeIndented("# check responses\n" %self.params)
+ buff.writeIndented("# %(name)s stop & responses\n" %self.params)
+ buff.writeIndented("%s.stop() # sometimes helpful\n" % self.params['name'])
buff.writeIndented("if not %(name)s.savedFile:\n"%self.params)
buff.writeIndented(" %(name)s.savedFile = None\n" %(self.params))
buff.writeIndented("# store data for %s (%s)\n" %(currLoop.params['name'], currLoop.type))
| {"golden_diff": "diff --git a/psychopy/app/builder/components/microphone.py b/psychopy/app/builder/components/microphone.py\n--- a/psychopy/app/builder/components/microphone.py\n+++ b/psychopy/app/builder/components/microphone.py\n@@ -70,7 +70,8 @@\n currLoop = self.exp._expHandler\n \n #write the actual code\n- buff.writeIndented(\"# check responses\\n\" %self.params)\n+ buff.writeIndented(\"# %(name)s stop & responses\\n\" %self.params)\n+ buff.writeIndented(\"%s.stop() # sometimes helpful\\n\" % self.params['name'])\n buff.writeIndented(\"if not %(name)s.savedFile:\\n\"%self.params)\n buff.writeIndented(\" %(name)s.savedFile = None\\n\" %(self.params))\n buff.writeIndented(\"# store data for %s (%s)\\n\" %(currLoop.params['name'], currLoop.type))\n", "issue": "Overlapping recordings problem\nI am having a problem with mic.record and mic.stop - I am currently on psychopy 1.81.00, but I have had the same problem in earlier versions. I have written some code which records until the participant hits a key, or until a time-limit is reached. I am getting occasional truncated recordings or zero-length recordings - these occur when I test the code myself, so it's not just the participants being trigger-happy. I think the problem occurs when the timer on some past recording runs out, it stops the current recording. So say you set a recording running with a limit of 10 seconds, send a mic.stop() after 5 seconds, then start a new recording, that new recording will be stopped after 5 seconds, when the timer on the original recording runs out - it doesn't seem to be quite as neat as that in practice, which is confusing, but you can see this in action with something like the following little program. How often to occurs depends on how unlucky you are, but if you run through the for loop 10-15 times you will get some truncated recordings. \n\nfrom psychopy import microphone,core,event, visual\n\ndef recording(window,trialNum,mic):\n print('recording ' + str(trialNum))\n mic.reset()\n instructionText = visual.TextStim(window, text='Count to five, then press space',color=\"black\",pos=(0,0.0),wrapWidth=2)\n instructionText.draw()\n window.flip()\n mic.record(7,block=False,filename=str(trialNum)+'.wav') #start recording\n event.waitKeys(maxWait='inf', keyList=['space']) #wait for a space from participant\n core.wait(0.1) #so you can hear the click of the spacebar\n window.flip()\n mic.stop() #stop the mic\n core.wait(0.1) #to get a flicker between screens\n# set up mic and window\n\nmicrophone.switchOn(sampleRate=44100)\nmic = microphone.AudioCapture()\nmyWin = visual.Window((800,600), allowGUI=True,color='white')\nfor t in range(100): #shouldn't need to do as many as 100 to get some truncated recordings!\n recording(myWin,t,mic)\nmicrophone.switchOff()\ncore.quit()\n\n", "before_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Author: Jeremy R. Gray, 2012\n\nfrom _base import *\nfrom os import path\nfrom psychopy.app.builder import components #for getInitVals()\n\nthisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path\niconFile = path.join(thisFolder,'microphone.png')\ntooltip = _translate('Microphone: basic sound capture (fixed onset & duration), okay for spoken words')\n\n_localized = {'stereo': _translate('Stereo')}\n\nclass MicrophoneComponent(BaseComponent):\n \"\"\"An event class for capturing short sound stimuli\"\"\"\n categories = ['Responses']\n def __init__(self, exp, parentName, name='mic_1',\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=2.0, startEstim='', durationEstim='',\n stereo=False\n ):\n super(MicrophoneComponent, self).__init__(exp, parentName, name=name,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Microphone'\n self.url=\"http://www.psychopy.org/builder/components/microphone.html\"\n self.exp.requirePsychopyLibs(['microphone'])\n #params\n self.params['stereo']=Param(stereo, valType='bool',\n hint=_translate(\"Record two channels (stereo) or one (mono, smaller file)\"),\n label=_localized['stereo'])\n self.params['stopType'].allowedVals = ['duration (s)']\n self.params['stopType'].hint = _translate('The duration of the recording in seconds; blank = 0 sec')\n def writeStartCode(self,buff):\n # filename should have date_time, so filename_wav should be unique\n buff.writeIndented(\"wavDirName = filename + '_wav'\\n\")\n buff.writeIndented(\"if not os.path.isdir(wavDirName):\\n\" +\n \" os.makedirs(wavDirName) # to hold .wav files\\n\")\n def writeRoutineStartCode(self,buff):\n inits = components.getInitVals(self.params)\n buff.writeIndented(\"%s = microphone.AdvAudioCapture(name='%s', saveDir=wavDirName, stereo=%s)\\n\" %(\n inits['name'], inits['name'], inits['stereo']))\n def writeFrameCode(self,buff):\n \"\"\"Write the code that will be called every frame\"\"\"\n duration = \"%s\" % self.params['stopVal'] # type is code\n if not len(duration):\n duration = \"0\"\n # starting condition:\n buff.writeIndented(\"\\n\")\n buff.writeIndented(\"# *%s* updates\\n\" %(self.params['name']))\n self.writeStartTestCode(buff) # writes an if statement\n buff.writeIndented(\"%(name)s.status = STARTED\\n\" %(self.params))\n buff.writeIndented(\"%s.record(sec=%s, block=False) # start the recording thread\\n\" %\n (self.params['name'], duration))\n buff.setIndentLevel(-1, relative=True) # ends the if statement\n buff.writeIndented(\"\\n\")\n # these lines handle both normal end of rec thread, and user .stop():\n buff.writeIndented(\"if %(name)s.status == STARTED and not %(name)s.recorder.running:\\n\" % self.params)\n buff.writeIndented(\" %s.status = FINISHED\\n\" % self.params['name'])\n def writeRoutineEndCode(self,buff):\n #some shortcuts\n name = self.params['name']\n if len(self.exp.flow._loopList):\n currLoop = self.exp.flow._loopList[-1] #last (outer-most) loop\n else:\n currLoop = self.exp._expHandler\n\n #write the actual code\n buff.writeIndented(\"# check responses\\n\" %self.params)\n buff.writeIndented(\"if not %(name)s.savedFile:\\n\"%self.params)\n buff.writeIndented(\" %(name)s.savedFile = None\\n\" %(self.params))\n buff.writeIndented(\"# store data for %s (%s)\\n\" %(currLoop.params['name'], currLoop.type))\n\n #always add saved file name\n buff.writeIndented(\"%s.addData('%s.filename', %s.savedFile)\\n\" % (currLoop.params['name'],name,name))\n if currLoop.params['name'].val == self.exp._expHandler.name:\n buff.writeIndented(\"%s.nextEntry()\\n\" % self.exp._expHandler.name)\n # best not to do loudness / rms or other processing here\n", "path": "psychopy/app/builder/components/microphone.py"}], "after_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2014 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# Author: Jeremy R. Gray, 2012\n\nfrom _base import *\nfrom os import path\nfrom psychopy.app.builder import components #for getInitVals()\n\nthisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path\niconFile = path.join(thisFolder,'microphone.png')\ntooltip = _translate('Microphone: basic sound capture (fixed onset & duration), okay for spoken words')\n\n_localized = {'stereo': _translate('Stereo')}\n\nclass MicrophoneComponent(BaseComponent):\n \"\"\"An event class for capturing short sound stimuli\"\"\"\n categories = ['Responses']\n def __init__(self, exp, parentName, name='mic_1',\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=2.0, startEstim='', durationEstim='',\n stereo=False\n ):\n super(MicrophoneComponent, self).__init__(exp, parentName, name=name,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Microphone'\n self.url=\"http://www.psychopy.org/builder/components/microphone.html\"\n self.exp.requirePsychopyLibs(['microphone'])\n #params\n self.params['stereo']=Param(stereo, valType='bool',\n hint=_translate(\"Record two channels (stereo) or one (mono, smaller file)\"),\n label=_localized['stereo'])\n self.params['stopType'].allowedVals = ['duration (s)']\n self.params['stopType'].hint = _translate('The duration of the recording in seconds; blank = 0 sec')\n def writeStartCode(self,buff):\n # filename should have date_time, so filename_wav should be unique\n buff.writeIndented(\"wavDirName = filename + '_wav'\\n\")\n buff.writeIndented(\"if not os.path.isdir(wavDirName):\\n\" +\n \" os.makedirs(wavDirName) # to hold .wav files\\n\")\n def writeRoutineStartCode(self,buff):\n inits = components.getInitVals(self.params)\n buff.writeIndented(\"%s = microphone.AdvAudioCapture(name='%s', saveDir=wavDirName, stereo=%s)\\n\" %(\n inits['name'], inits['name'], inits['stereo']))\n def writeFrameCode(self,buff):\n \"\"\"Write the code that will be called every frame\"\"\"\n duration = \"%s\" % self.params['stopVal'] # type is code\n if not len(duration):\n duration = \"0\"\n # starting condition:\n buff.writeIndented(\"\\n\")\n buff.writeIndented(\"# *%s* updates\\n\" %(self.params['name']))\n self.writeStartTestCode(buff) # writes an if statement\n buff.writeIndented(\"%(name)s.status = STARTED\\n\" %(self.params))\n buff.writeIndented(\"%s.record(sec=%s, block=False) # start the recording thread\\n\" %\n (self.params['name'], duration))\n buff.setIndentLevel(-1, relative=True) # ends the if statement\n buff.writeIndented(\"\\n\")\n # these lines handle both normal end of rec thread, and user .stop():\n buff.writeIndented(\"if %(name)s.status == STARTED and not %(name)s.recorder.running:\\n\" % self.params)\n buff.writeIndented(\" %s.status = FINISHED\\n\" % self.params['name'])\n def writeRoutineEndCode(self,buff):\n #some shortcuts\n name = self.params['name']\n if len(self.exp.flow._loopList):\n currLoop = self.exp.flow._loopList[-1] #last (outer-most) loop\n else:\n currLoop = self.exp._expHandler\n\n #write the actual code\n buff.writeIndented(\"# %(name)s stop & responses\\n\" %self.params)\n buff.writeIndented(\"%s.stop() # sometimes helpful\\n\" % self.params['name'])\n buff.writeIndented(\"if not %(name)s.savedFile:\\n\"%self.params)\n buff.writeIndented(\" %(name)s.savedFile = None\\n\" %(self.params))\n buff.writeIndented(\"# store data for %s (%s)\\n\" %(currLoop.params['name'], currLoop.type))\n\n #always add saved file name\n buff.writeIndented(\"%s.addData('%s.filename', %s.savedFile)\\n\" % (currLoop.params['name'],name,name))\n if currLoop.params['name'].val == self.exp._expHandler.name:\n buff.writeIndented(\"%s.nextEntry()\\n\" % self.exp._expHandler.name)\n # best not to do loudness / rms or other processing here\n", "path": "psychopy/app/builder/components/microphone.py"}]} | 1,971 | 201 |
gh_patches_debug_15719 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-20731 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gfycat.com url changes
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2019.04.17**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/ytdl-org/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/ytdl-org/youtube-dl#faq) and [BUGS](https://github.com/ytdl-org/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [x] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
gfycat.com has added dashes to some urls [https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball](https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball) causing a HTTP Error.
This could be fixed by excluding dashes in the url InfoExtractor.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/gfycat.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..utils import (
6 int_or_none,
7 float_or_none,
8 qualities,
9 ExtractorError,
10 )
11
12
13 class GfycatIE(InfoExtractor):
14 _VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)'
15 _TESTS = [{
16 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',
17 'info_dict': {
18 'id': 'DeadlyDecisiveGermanpinscher',
19 'ext': 'mp4',
20 'title': 'Ghost in the Shell',
21 'timestamp': 1410656006,
22 'upload_date': '20140914',
23 'uploader': 'anonymous',
24 'duration': 10.4,
25 'view_count': int,
26 'like_count': int,
27 'dislike_count': int,
28 'categories': list,
29 'age_limit': 0,
30 }
31 }, {
32 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa',
33 'info_dict': {
34 'id': 'JauntyTimelyAmazontreeboa',
35 'ext': 'mp4',
36 'title': 'JauntyTimelyAmazontreeboa',
37 'timestamp': 1411720126,
38 'upload_date': '20140926',
39 'uploader': 'anonymous',
40 'duration': 3.52,
41 'view_count': int,
42 'like_count': int,
43 'dislike_count': int,
44 'categories': list,
45 'age_limit': 0,
46 }
47 }, {
48 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',
49 'only_matching': True
50 }]
51
52 def _real_extract(self, url):
53 video_id = self._match_id(url)
54
55 gfy = self._download_json(
56 'https://api.gfycat.com/v1/gfycats/%s' % video_id,
57 video_id, 'Downloading video info')
58 if 'error' in gfy:
59 raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)
60 gfy = gfy['gfyItem']
61
62 title = gfy.get('title') or gfy['gfyName']
63 description = gfy.get('description')
64 timestamp = int_or_none(gfy.get('createDate'))
65 uploader = gfy.get('userName')
66 view_count = int_or_none(gfy.get('views'))
67 like_count = int_or_none(gfy.get('likes'))
68 dislike_count = int_or_none(gfy.get('dislikes'))
69 age_limit = 18 if gfy.get('nsfw') == '1' else 0
70
71 width = int_or_none(gfy.get('width'))
72 height = int_or_none(gfy.get('height'))
73 fps = int_or_none(gfy.get('frameRate'))
74 num_frames = int_or_none(gfy.get('numFrames'))
75
76 duration = float_or_none(num_frames, fps) if num_frames and fps else None
77
78 categories = gfy.get('tags') or gfy.get('extraLemmas') or []
79
80 FORMATS = ('gif', 'webm', 'mp4')
81 quality = qualities(FORMATS)
82
83 formats = []
84 for format_id in FORMATS:
85 video_url = gfy.get('%sUrl' % format_id)
86 if not video_url:
87 continue
88 filesize = int_or_none(gfy.get('%sSize' % format_id))
89 formats.append({
90 'url': video_url,
91 'format_id': format_id,
92 'width': width,
93 'height': height,
94 'fps': fps,
95 'filesize': filesize,
96 'quality': quality(format_id),
97 })
98 self._sort_formats(formats)
99
100 return {
101 'id': video_id,
102 'title': title,
103 'description': description,
104 'timestamp': timestamp,
105 'uploader': uploader,
106 'duration': duration,
107 'view_count': view_count,
108 'like_count': like_count,
109 'dislike_count': dislike_count,
110 'categories': categories,
111 'age_limit': age_limit,
112 'formats': formats,
113 }
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/gfycat.py b/youtube_dl/extractor/gfycat.py
--- a/youtube_dl/extractor/gfycat.py
+++ b/youtube_dl/extractor/gfycat.py
@@ -11,7 +11,7 @@
class GfycatIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)'
+ _VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/|gifs/detail/)?(?P<id>[^-/?#]+)'
_TESTS = [{
'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',
'info_dict': {
@@ -47,6 +47,9 @@
}, {
'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',
'only_matching': True
+ }, {
+ 'url': 'https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball',
+ 'only_matching': True
}]
def _real_extract(self, url):
| {"golden_diff": "diff --git a/youtube_dl/extractor/gfycat.py b/youtube_dl/extractor/gfycat.py\n--- a/youtube_dl/extractor/gfycat.py\n+++ b/youtube_dl/extractor/gfycat.py\n@@ -11,7 +11,7 @@\n \n \n class GfycatIE(InfoExtractor):\n- _VALID_URL = r'https?://(?:www\\.)?gfycat\\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)'\n+ _VALID_URL = r'https?://(?:www\\.)?gfycat\\.com/(?:ifr/|gifs/detail/)?(?P<id>[^-/?#]+)'\n _TESTS = [{\n 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',\n 'info_dict': {\n@@ -47,6 +47,9 @@\n }, {\n 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',\n 'only_matching': True\n+ }, {\n+ 'url': 'https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball',\n+ 'only_matching': True\n }]\n \n def _real_extract(self, url):\n", "issue": "gfycat.com url changes\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2019.04.17**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/ytdl-org/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/ytdl-org/youtube-dl#faq) and [BUGS](https://github.com/ytdl-org/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [x] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\ngfycat.com has added dashes to some urls [https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball](https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball) causing a HTTP Error. \r\nThis could be fixed by excluding dashes in the url InfoExtractor.\r\n\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n float_or_none,\n qualities,\n ExtractorError,\n)\n\n\nclass GfycatIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?gfycat\\.com/(?:ifr/|gifs/detail/)?(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',\n 'info_dict': {\n 'id': 'DeadlyDecisiveGermanpinscher',\n 'ext': 'mp4',\n 'title': 'Ghost in the Shell',\n 'timestamp': 1410656006,\n 'upload_date': '20140914',\n 'uploader': 'anonymous',\n 'duration': 10.4,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa',\n 'info_dict': {\n 'id': 'JauntyTimelyAmazontreeboa',\n 'ext': 'mp4',\n 'title': 'JauntyTimelyAmazontreeboa',\n 'timestamp': 1411720126,\n 'upload_date': '20140926',\n 'uploader': 'anonymous',\n 'duration': 3.52,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n gfy = self._download_json(\n 'https://api.gfycat.com/v1/gfycats/%s' % video_id,\n video_id, 'Downloading video info')\n if 'error' in gfy:\n raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)\n gfy = gfy['gfyItem']\n\n title = gfy.get('title') or gfy['gfyName']\n description = gfy.get('description')\n timestamp = int_or_none(gfy.get('createDate'))\n uploader = gfy.get('userName')\n view_count = int_or_none(gfy.get('views'))\n like_count = int_or_none(gfy.get('likes'))\n dislike_count = int_or_none(gfy.get('dislikes'))\n age_limit = 18 if gfy.get('nsfw') == '1' else 0\n\n width = int_or_none(gfy.get('width'))\n height = int_or_none(gfy.get('height'))\n fps = int_or_none(gfy.get('frameRate'))\n num_frames = int_or_none(gfy.get('numFrames'))\n\n duration = float_or_none(num_frames, fps) if num_frames and fps else None\n\n categories = gfy.get('tags') or gfy.get('extraLemmas') or []\n\n FORMATS = ('gif', 'webm', 'mp4')\n quality = qualities(FORMATS)\n\n formats = []\n for format_id in FORMATS:\n video_url = gfy.get('%sUrl' % format_id)\n if not video_url:\n continue\n filesize = int_or_none(gfy.get('%sSize' % format_id))\n formats.append({\n 'url': video_url,\n 'format_id': format_id,\n 'width': width,\n 'height': height,\n 'fps': fps,\n 'filesize': filesize,\n 'quality': quality(format_id),\n })\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'timestamp': timestamp,\n 'uploader': uploader,\n 'duration': duration,\n 'view_count': view_count,\n 'like_count': like_count,\n 'dislike_count': dislike_count,\n 'categories': categories,\n 'age_limit': age_limit,\n 'formats': formats,\n }\n", "path": "youtube_dl/extractor/gfycat.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n float_or_none,\n qualities,\n ExtractorError,\n)\n\n\nclass GfycatIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?gfycat\\.com/(?:ifr/|gifs/detail/)?(?P<id>[^-/?#]+)'\n _TESTS = [{\n 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher',\n 'info_dict': {\n 'id': 'DeadlyDecisiveGermanpinscher',\n 'ext': 'mp4',\n 'title': 'Ghost in the Shell',\n 'timestamp': 1410656006,\n 'upload_date': '20140914',\n 'uploader': 'anonymous',\n 'duration': 10.4,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa',\n 'info_dict': {\n 'id': 'JauntyTimelyAmazontreeboa',\n 'ext': 'mp4',\n 'title': 'JauntyTimelyAmazontreeboa',\n 'timestamp': 1411720126,\n 'upload_date': '20140926',\n 'uploader': 'anonymous',\n 'duration': 3.52,\n 'view_count': int,\n 'like_count': int,\n 'dislike_count': int,\n 'categories': list,\n 'age_limit': 0,\n }\n }, {\n 'url': 'https://gfycat.com/gifs/detail/UnconsciousLankyIvorygull',\n 'only_matching': True\n }, {\n 'url': 'https://gfycat.com/acceptablehappygoluckyharborporpoise-baseball',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n gfy = self._download_json(\n 'https://api.gfycat.com/v1/gfycats/%s' % video_id,\n video_id, 'Downloading video info')\n if 'error' in gfy:\n raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True)\n gfy = gfy['gfyItem']\n\n title = gfy.get('title') or gfy['gfyName']\n description = gfy.get('description')\n timestamp = int_or_none(gfy.get('createDate'))\n uploader = gfy.get('userName')\n view_count = int_or_none(gfy.get('views'))\n like_count = int_or_none(gfy.get('likes'))\n dislike_count = int_or_none(gfy.get('dislikes'))\n age_limit = 18 if gfy.get('nsfw') == '1' else 0\n\n width = int_or_none(gfy.get('width'))\n height = int_or_none(gfy.get('height'))\n fps = int_or_none(gfy.get('frameRate'))\n num_frames = int_or_none(gfy.get('numFrames'))\n\n duration = float_or_none(num_frames, fps) if num_frames and fps else None\n\n categories = gfy.get('tags') or gfy.get('extraLemmas') or []\n\n FORMATS = ('gif', 'webm', 'mp4')\n quality = qualities(FORMATS)\n\n formats = []\n for format_id in FORMATS:\n video_url = gfy.get('%sUrl' % format_id)\n if not video_url:\n continue\n filesize = int_or_none(gfy.get('%sSize' % format_id))\n formats.append({\n 'url': video_url,\n 'format_id': format_id,\n 'width': width,\n 'height': height,\n 'fps': fps,\n 'filesize': filesize,\n 'quality': quality(format_id),\n })\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'timestamp': timestamp,\n 'uploader': uploader,\n 'duration': duration,\n 'view_count': view_count,\n 'like_count': like_count,\n 'dislike_count': dislike_count,\n 'categories': categories,\n 'age_limit': age_limit,\n 'formats': formats,\n }\n", "path": "youtube_dl/extractor/gfycat.py"}]} | 1,807 | 289 |
gh_patches_debug_22409 | rasdani/github-patches | git_diff | xonsh__xonsh-1551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`xonsh.completers.pip` explodes if `pip` is not on PATH
On my Windows installation, Python is not on PATH (because multiple Python madness), and therefore neither is pip. However, the pip completer [expects pip to be on the path](https://github.com/xonsh/xonsh/blob/master/xonsh/completers/pip.py#L14).
This causes the completer to blow up with a `FileNotFoundError` when it tries to complete.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/completers/pip.py`
Content:
```
1 import re
2 import subprocess
3
4 import xonsh.lazyasd as xl
5
6 PIP_RE = xl.LazyObject(lambda: re.compile("pip(?:\d|\.)*"),
7 globals(), 'PIP_RE')
8 PIP_LIST_RE = xl.LazyObject(lambda: re.compile("pip(?:\d|\.)* (?:uninstall|show)"),
9 globals(), 'PIP_LIST_RE')
10
11
12 @xl.lazyobject
13 def ALL_COMMANDS():
14 help_text = str(subprocess.check_output(['pip', '--help'],
15 stderr=subprocess.DEVNULL))
16 commands = re.findall(" (\w+) ", help_text)
17 return [c for c in commands if c not in ['completion', 'help']]
18
19
20 def complete_pip(prefix, line, begidx, endidx, ctx):
21 """Completes python's package manager pip"""
22 line_len = len(line.split())
23 if (line_len > 3) or (line_len > 2 and line.endswith(' ')) or \
24 (not PIP_RE.search(line)):
25 return
26 if PIP_LIST_RE.search(line):
27 items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)
28 items = items.decode('utf-8').splitlines()
29 return set(i.split()[0] for i in items)
30
31 if (line_len > 1 and line.endswith(' ')) or line_len > 2:
32 # "pip show " -> no complete (note space)
33 return
34 if prefix not in ALL_COMMANDS:
35 suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]
36 if suggestions:
37 return suggestions, len(prefix)
38 return ALL_COMMANDS, len(prefix)
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py
--- a/xonsh/completers/pip.py
+++ b/xonsh/completers/pip.py
@@ -11,8 +11,11 @@
@xl.lazyobject
def ALL_COMMANDS():
- help_text = str(subprocess.check_output(['pip', '--help'],
- stderr=subprocess.DEVNULL))
+ try:
+ help_text = str(subprocess.check_output(['pip', '--help'],
+ stderr=subprocess.DEVNULL))
+ except FileNotFoundError:
+ return []
commands = re.findall(" (\w+) ", help_text)
return [c for c in commands if c not in ['completion', 'help']]
@@ -24,7 +27,11 @@
(not PIP_RE.search(line)):
return
if PIP_LIST_RE.search(line):
- items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)
+ try:
+ items = subprocess.check_output(['pip', 'list'],
+ stderr=subprocess.DEVNULL)
+ except FileNotFoundError:
+ return set()
items = items.decode('utf-8').splitlines()
return set(i.split()[0] for i in items)
| {"golden_diff": "diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py\n--- a/xonsh/completers/pip.py\n+++ b/xonsh/completers/pip.py\n@@ -11,8 +11,11 @@\n \n @xl.lazyobject\n def ALL_COMMANDS():\n- help_text = str(subprocess.check_output(['pip', '--help'],\n- stderr=subprocess.DEVNULL))\n+ try:\n+ help_text = str(subprocess.check_output(['pip', '--help'],\n+ stderr=subprocess.DEVNULL))\n+ except FileNotFoundError:\n+ return []\n commands = re.findall(\" (\\w+) \", help_text)\n return [c for c in commands if c not in ['completion', 'help']]\n \n@@ -24,7 +27,11 @@\n (not PIP_RE.search(line)):\n return\n if PIP_LIST_RE.search(line):\n- items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)\n+ try:\n+ items = subprocess.check_output(['pip', 'list'],\n+ stderr=subprocess.DEVNULL)\n+ except FileNotFoundError:\n+ return set()\n items = items.decode('utf-8').splitlines()\n return set(i.split()[0] for i in items)\n", "issue": "`xonsh.completers.pip` explodes if `pip` is not on PATH\nOn my Windows installation, Python is not on PATH (because multiple Python madness), and therefore neither is pip. However, the pip completer [expects pip to be on the path](https://github.com/xonsh/xonsh/blob/master/xonsh/completers/pip.py#L14).\n\nThis causes the completer to blow up with a `FileNotFoundError` when it tries to complete.\n\n", "before_files": [{"content": "import re\nimport subprocess\n\nimport xonsh.lazyasd as xl\n\nPIP_RE = xl.LazyObject(lambda: re.compile(\"pip(?:\\d|\\.)*\"),\n globals(), 'PIP_RE')\nPIP_LIST_RE = xl.LazyObject(lambda: re.compile(\"pip(?:\\d|\\.)* (?:uninstall|show)\"),\n globals(), 'PIP_LIST_RE')\n\n\[email protected]\ndef ALL_COMMANDS():\n help_text = str(subprocess.check_output(['pip', '--help'],\n stderr=subprocess.DEVNULL))\n commands = re.findall(\" (\\w+) \", help_text)\n return [c for c in commands if c not in ['completion', 'help']]\n\n\ndef complete_pip(prefix, line, begidx, endidx, ctx):\n \"\"\"Completes python's package manager pip\"\"\"\n line_len = len(line.split())\n if (line_len > 3) or (line_len > 2 and line.endswith(' ')) or \\\n (not PIP_RE.search(line)):\n return\n if PIP_LIST_RE.search(line):\n items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)\n items = items.decode('utf-8').splitlines()\n return set(i.split()[0] for i in items)\n\n if (line_len > 1 and line.endswith(' ')) or line_len > 2:\n # \"pip show \" -> no complete (note space)\n return\n if prefix not in ALL_COMMANDS:\n suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]\n if suggestions:\n return suggestions, len(prefix)\n return ALL_COMMANDS, len(prefix)\n", "path": "xonsh/completers/pip.py"}], "after_files": [{"content": "import re\nimport subprocess\n\nimport xonsh.lazyasd as xl\n\nPIP_RE = xl.LazyObject(lambda: re.compile(\"pip(?:\\d|\\.)*\"),\n globals(), 'PIP_RE')\nPIP_LIST_RE = xl.LazyObject(lambda: re.compile(\"pip(?:\\d|\\.)* (?:uninstall|show)\"),\n globals(), 'PIP_LIST_RE')\n\n\[email protected]\ndef ALL_COMMANDS():\n try:\n help_text = str(subprocess.check_output(['pip', '--help'],\n stderr=subprocess.DEVNULL))\n except FileNotFoundError:\n return []\n commands = re.findall(\" (\\w+) \", help_text)\n return [c for c in commands if c not in ['completion', 'help']]\n\n\ndef complete_pip(prefix, line, begidx, endidx, ctx):\n \"\"\"Completes python's package manager pip\"\"\"\n line_len = len(line.split())\n if (line_len > 3) or (line_len > 2 and line.endswith(' ')) or \\\n (not PIP_RE.search(line)):\n return\n if PIP_LIST_RE.search(line):\n try:\n items = subprocess.check_output(['pip', 'list'],\n stderr=subprocess.DEVNULL)\n except FileNotFoundError:\n return set()\n items = items.decode('utf-8').splitlines()\n return set(i.split()[0] for i in items)\n\n if (line_len > 1 and line.endswith(' ')) or line_len > 2:\n # \"pip show \" -> no complete (note space)\n return\n if prefix not in ALL_COMMANDS:\n suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]\n if suggestions:\n return suggestions, len(prefix)\n return ALL_COMMANDS, len(prefix)\n", "path": "xonsh/completers/pip.py"}]} | 790 | 281 |
gh_patches_debug_11511 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-307 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor differences in what's displayed after user response 45.state-management bot
## Version
v4.50b4
## Describe the bug
There's a minor difference in what's displayed after the user responds to the bot. The javascript_nodejs bot exhibits the same behavior (see [issue 1718](https://github.com/microsoft/BotBuilder-Samples/issues/1718) for more information).
## To Reproduce
Run bot per README.md instructions
1. go to bot's folder
2. run `python install -r requirement.txt`, then run `python app.py`
3. open in Emulator
The csharp_dotnet and javascript_nodejs bots were also run via CLI.
## Expected behavior
Bot should look and function just like bots in other languages (specifically csharp_dotnet bot since there are currently issues with javascript_nodejs sample).
## Screenshots
**charp_dotnetcore bot**: Bot responds with, "Thanks <string_user_responded_with. To see conversation data, type anything." after user's second response. Also welcomes users. This is IMHO the best version/gold standard for the sample currently.

**Python bot**: Bot responds with, "Thanks <string_user_responded_with." after user's second response. Also welcomes user.

**javascript_nodejs bot**: Bot responds with, "Thanks <string_user_responded_with." after user's second response. Does not welcome user (addressed in [issue 1718](https://github.com/microsoft/BotBuilder-Samples/issues/1718)).

## Additional context
To fix: Add **"To see conversation data, type anything."** to the string in **line 62** in 45.state-management/bots/state_management_bot.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samples/45.state-management/bots/state_management_bot.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import time
5 import pytz
6 from datetime import datetime
7
8 from botbuilder.core import ActivityHandler, ConversationState, TurnContext, UserState
9 from botbuilder.schema import ChannelAccount
10
11 from data_models import ConversationData, UserProfile
12
13
14 class StateManagementBot(ActivityHandler):
15 def __init__(self, conversation_state: ConversationState, user_state: UserState):
16 if conversation_state is None:
17 raise TypeError(
18 "[StateManagementBot]: Missing parameter. conversation_state is required but None was given"
19 )
20 if user_state is None:
21 raise TypeError(
22 "[StateManagementBot]: Missing parameter. user_state is required but None was given"
23 )
24
25 self.conversation_state = conversation_state
26 self.user_state = user_state
27
28 self.conversation_data = self.conversation_state.create_property(
29 "ConversationData"
30 )
31 self.user_profile = self.conversation_state.create_property("UserProfile")
32
33 async def on_turn(self, turn_context: TurnContext):
34 await super().on_turn(turn_context)
35
36 await self.conversation_state.save_changes(turn_context)
37 await self.user_state.save_changes(turn_context)
38
39 async def on_members_added_activity(
40 self, members_added: [ChannelAccount], turn_context: TurnContext
41 ):
42 for member in members_added:
43 if member.id != turn_context.activity.recipient.id:
44 await turn_context.send_activity(
45 "Welcome to State Bot Sample. Type anything to get started."
46 )
47
48 async def on_message_activity(self, turn_context: TurnContext):
49 # Get the state properties from the turn context.
50 user_profile = await self.user_profile.get(turn_context, UserProfile)
51 conversation_data = await self.conversation_data.get(
52 turn_context, ConversationData
53 )
54
55 if user_profile.name is None:
56 # First time around this is undefined, so we will prompt user for name.
57 if conversation_data.prompted_for_user_name:
58 # Set the name to what the user provided.
59 user_profile.name = turn_context.activity.text
60
61 # Acknowledge that we got their name.
62 await turn_context.send_activity(f"Thanks { user_profile.name }.")
63
64 # Reset the flag to allow the bot to go though the cycle again.
65 conversation_data.prompted_for_user_name = False
66 else:
67 # Prompt the user for their name.
68 await turn_context.send_activity("What is your name?")
69
70 # Set the flag to true, so we don't prompt in the next turn.
71 conversation_data.prompted_for_user_name = True
72 else:
73 # Add message details to the conversation data.
74 conversation_data.timestamp = self.__datetime_from_utc_to_local(
75 turn_context.activity.timestamp
76 )
77 conversation_data.channel_id = turn_context.activity.channel_id
78
79 # Display state data.
80 await turn_context.send_activity(
81 f"{ user_profile.name } sent: { turn_context.activity.text }"
82 )
83 await turn_context.send_activity(
84 f"Message received at: { conversation_data.timestamp }"
85 )
86 await turn_context.send_activity(
87 f"Message received from: { conversation_data.channel_id }"
88 )
89
90 def __datetime_from_utc_to_local(self, utc_datetime):
91 now_timestamp = time.time()
92 offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(
93 now_timestamp
94 )
95 result = utc_datetime + offset
96 return result.strftime("%I:%M:%S %p, %A, %B %d of %Y")
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samples/45.state-management/bots/state_management_bot.py b/samples/45.state-management/bots/state_management_bot.py
--- a/samples/45.state-management/bots/state_management_bot.py
+++ b/samples/45.state-management/bots/state_management_bot.py
@@ -59,7 +59,9 @@
user_profile.name = turn_context.activity.text
# Acknowledge that we got their name.
- await turn_context.send_activity(f"Thanks { user_profile.name }.")
+ await turn_context.send_activity(
+ f"Thanks { user_profile.name }. To see conversation data, type anything."
+ )
# Reset the flag to allow the bot to go though the cycle again.
conversation_data.prompted_for_user_name = False
| {"golden_diff": "diff --git a/samples/45.state-management/bots/state_management_bot.py b/samples/45.state-management/bots/state_management_bot.py\n--- a/samples/45.state-management/bots/state_management_bot.py\n+++ b/samples/45.state-management/bots/state_management_bot.py\n@@ -59,7 +59,9 @@\n user_profile.name = turn_context.activity.text\n \n # Acknowledge that we got their name.\n- await turn_context.send_activity(f\"Thanks { user_profile.name }.\")\n+ await turn_context.send_activity(\n+ f\"Thanks { user_profile.name }. To see conversation data, type anything.\"\n+ )\n \n # Reset the flag to allow the bot to go though the cycle again.\n conversation_data.prompted_for_user_name = False\n", "issue": "Minor differences in what's displayed after user response 45.state-management bot\n## Version\r\nv4.50b4\r\n\r\n## Describe the bug\r\nThere's a minor difference in what's displayed after the user responds to the bot. The javascript_nodejs bot exhibits the same behavior (see [issue 1718](https://github.com/microsoft/BotBuilder-Samples/issues/1718) for more information).\r\n\r\n## To Reproduce\r\nRun bot per README.md instructions\r\n1. go to bot's folder\r\n2. run `python install -r requirement.txt`, then run `python app.py`\r\n3. open in Emulator\r\n\r\nThe csharp_dotnet and javascript_nodejs bots were also run via CLI. \r\n\r\n## Expected behavior\r\nBot should look and function just like bots in other languages (specifically csharp_dotnet bot since there are currently issues with javascript_nodejs sample). \r\n\r\n## Screenshots\r\n**charp_dotnetcore bot**: Bot responds with, \"Thanks <string_user_responded_with. To see conversation data, type anything.\" after user's second response. Also welcomes users. This is IMHO the best version/gold standard for the sample currently. \r\n\r\n\r\n**Python bot**: Bot responds with, \"Thanks <string_user_responded_with.\" after user's second response. Also welcomes user.\r\n\r\n\r\n**javascript_nodejs bot**: Bot responds with, \"Thanks <string_user_responded_with.\" after user's second response. Does not welcome user (addressed in [issue 1718](https://github.com/microsoft/BotBuilder-Samples/issues/1718)).\r\n\r\n\r\n\r\n## Additional context\r\nTo fix: Add **\"To see conversation data, type anything.\"** to the string in **line 62** in 45.state-management/bots/state_management_bot.py\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport time\nimport pytz\nfrom datetime import datetime\n\nfrom botbuilder.core import ActivityHandler, ConversationState, TurnContext, UserState\nfrom botbuilder.schema import ChannelAccount\n\nfrom data_models import ConversationData, UserProfile\n\n\nclass StateManagementBot(ActivityHandler):\n def __init__(self, conversation_state: ConversationState, user_state: UserState):\n if conversation_state is None:\n raise TypeError(\n \"[StateManagementBot]: Missing parameter. conversation_state is required but None was given\"\n )\n if user_state is None:\n raise TypeError(\n \"[StateManagementBot]: Missing parameter. user_state is required but None was given\"\n )\n\n self.conversation_state = conversation_state\n self.user_state = user_state\n\n self.conversation_data = self.conversation_state.create_property(\n \"ConversationData\"\n )\n self.user_profile = self.conversation_state.create_property(\"UserProfile\")\n\n async def on_turn(self, turn_context: TurnContext):\n await super().on_turn(turn_context)\n\n await self.conversation_state.save_changes(turn_context)\n await self.user_state.save_changes(turn_context)\n\n async def on_members_added_activity(\n self, members_added: [ChannelAccount], turn_context: TurnContext\n ):\n for member in members_added:\n if member.id != turn_context.activity.recipient.id:\n await turn_context.send_activity(\n \"Welcome to State Bot Sample. Type anything to get started.\"\n )\n\n async def on_message_activity(self, turn_context: TurnContext):\n # Get the state properties from the turn context.\n user_profile = await self.user_profile.get(turn_context, UserProfile)\n conversation_data = await self.conversation_data.get(\n turn_context, ConversationData\n )\n\n if user_profile.name is None:\n # First time around this is undefined, so we will prompt user for name.\n if conversation_data.prompted_for_user_name:\n # Set the name to what the user provided.\n user_profile.name = turn_context.activity.text\n\n # Acknowledge that we got their name.\n await turn_context.send_activity(f\"Thanks { user_profile.name }.\")\n\n # Reset the flag to allow the bot to go though the cycle again.\n conversation_data.prompted_for_user_name = False\n else:\n # Prompt the user for their name.\n await turn_context.send_activity(\"What is your name?\")\n\n # Set the flag to true, so we don't prompt in the next turn.\n conversation_data.prompted_for_user_name = True\n else:\n # Add message details to the conversation data.\n conversation_data.timestamp = self.__datetime_from_utc_to_local(\n turn_context.activity.timestamp\n )\n conversation_data.channel_id = turn_context.activity.channel_id\n\n # Display state data.\n await turn_context.send_activity(\n f\"{ user_profile.name } sent: { turn_context.activity.text }\"\n )\n await turn_context.send_activity(\n f\"Message received at: { conversation_data.timestamp }\"\n )\n await turn_context.send_activity(\n f\"Message received from: { conversation_data.channel_id }\"\n )\n\n def __datetime_from_utc_to_local(self, utc_datetime):\n now_timestamp = time.time()\n offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(\n now_timestamp\n )\n result = utc_datetime + offset\n return result.strftime(\"%I:%M:%S %p, %A, %B %d of %Y\")\n", "path": "samples/45.state-management/bots/state_management_bot.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport time\nimport pytz\nfrom datetime import datetime\n\nfrom botbuilder.core import ActivityHandler, ConversationState, TurnContext, UserState\nfrom botbuilder.schema import ChannelAccount\n\nfrom data_models import ConversationData, UserProfile\n\n\nclass StateManagementBot(ActivityHandler):\n def __init__(self, conversation_state: ConversationState, user_state: UserState):\n if conversation_state is None:\n raise TypeError(\n \"[StateManagementBot]: Missing parameter. conversation_state is required but None was given\"\n )\n if user_state is None:\n raise TypeError(\n \"[StateManagementBot]: Missing parameter. user_state is required but None was given\"\n )\n\n self.conversation_state = conversation_state\n self.user_state = user_state\n\n self.conversation_data = self.conversation_state.create_property(\n \"ConversationData\"\n )\n self.user_profile = self.conversation_state.create_property(\"UserProfile\")\n\n async def on_turn(self, turn_context: TurnContext):\n await super().on_turn(turn_context)\n\n await self.conversation_state.save_changes(turn_context)\n await self.user_state.save_changes(turn_context)\n\n async def on_members_added_activity(\n self, members_added: [ChannelAccount], turn_context: TurnContext\n ):\n for member in members_added:\n if member.id != turn_context.activity.recipient.id:\n await turn_context.send_activity(\n \"Welcome to State Bot Sample. Type anything to get started.\"\n )\n\n async def on_message_activity(self, turn_context: TurnContext):\n # Get the state properties from the turn context.\n user_profile = await self.user_profile.get(turn_context, UserProfile)\n conversation_data = await self.conversation_data.get(\n turn_context, ConversationData\n )\n\n if user_profile.name is None:\n # First time around this is undefined, so we will prompt user for name.\n if conversation_data.prompted_for_user_name:\n # Set the name to what the user provided.\n user_profile.name = turn_context.activity.text\n\n # Acknowledge that we got their name.\n await turn_context.send_activity(\n f\"Thanks { user_profile.name }. To see conversation data, type anything.\"\n )\n\n # Reset the flag to allow the bot to go though the cycle again.\n conversation_data.prompted_for_user_name = False\n else:\n # Prompt the user for their name.\n await turn_context.send_activity(\"What is your name?\")\n\n # Set the flag to true, so we don't prompt in the next turn.\n conversation_data.prompted_for_user_name = True\n else:\n # Add message details to the conversation data.\n conversation_data.timestamp = self.__datetime_from_utc_to_local(\n turn_context.activity.timestamp\n )\n conversation_data.channel_id = turn_context.activity.channel_id\n\n # Display state data.\n await turn_context.send_activity(\n f\"{ user_profile.name } sent: { turn_context.activity.text }\"\n )\n await turn_context.send_activity(\n f\"Message received at: { conversation_data.timestamp }\"\n )\n await turn_context.send_activity(\n f\"Message received from: { conversation_data.channel_id }\"\n )\n\n def __datetime_from_utc_to_local(self, utc_datetime):\n now_timestamp = time.time()\n offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(\n now_timestamp\n )\n result = utc_datetime + offset\n return result.strftime(\"%I:%M:%S %p, %A, %B %d of %Y\")\n", "path": "samples/45.state-management/bots/state_management_bot.py"}]} | 1,773 | 169 |
gh_patches_debug_28331 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing 5024: missing location label
**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list
**user:** any
**expected behaviour:**
**behaviour:** location label (Bezeichnung des markierten Ortes) is missing
**important screensize:**
**device & browser:**
**Comment/Question:** maybe we need a smaller char restriction here? it's at 255 now, I wonder if something like 50 should be enough for something displayed as a tag? or continue with ... for longer words?
old list
<img width="446" alt="Bildschirmfoto 2021-12-21 um 16 35 27" src="https://user-images.githubusercontent.com/35491681/146956690-789f6d02-372c-4877-a4c9-c539b5fc90c3.png">
new list
<img width="446" alt="Bildschirmfoto 2021-12-21 um 16 34 09" src="https://user-images.githubusercontent.com/35491681/146956491-2472f9f2-e90d-4975-88a8-fbe1a7012657.png">
old list with long label
<img width="656" alt="Bildschirmfoto 2021-12-21 um 16 36 09" src="https://user-images.githubusercontent.com/35491681/146956804-ced5b4b8-0da8-42fc-a17c-901fc86efe9b.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/budgeting/serializers.py`
Content:
```
1 from django.contrib.contenttypes.models import ContentType
2 from rest_framework import serializers
3
4 from adhocracy4.categories.models import Category
5 from meinberlin.apps.votes.models import TokenVote
6
7 from .models import Proposal
8
9
10 class CategoryField(serializers.Field):
11
12 def to_internal_value(self, category):
13 if category:
14 return Category.objects.get(pk=category)
15 else:
16 return None
17
18 def to_representation(self, category):
19 return {'id': category.pk, 'name': category.name}
20
21
22 class ProposalSerializer(serializers.ModelSerializer):
23
24 creator = serializers.SerializerMethodField()
25 comment_count = serializers.SerializerMethodField()
26 positive_rating_count = serializers.SerializerMethodField()
27 negative_rating_count = serializers.SerializerMethodField()
28 category = CategoryField()
29 url = serializers.SerializerMethodField()
30 moderator_feedback = serializers.SerializerMethodField()
31 session_token_voted = serializers.SerializerMethodField()
32
33 class Meta:
34 model = Proposal
35 fields = ('budget', 'category', 'comment_count', 'created', 'modified',
36 'creator', 'is_archived', 'name', 'negative_rating_count',
37 'positive_rating_count', 'url', 'pk', 'moderator_feedback',
38 'session_token_voted')
39 read_only_fields = ('budget', 'category', 'comment_count', 'created',
40 'modified', 'creator', 'is_archived', 'name',
41 'negative_rating_count', 'positive_rating_count',
42 'url', 'pk', 'moderator_feedback',
43 'session_token_voted')
44
45 def get_creator(self, proposal):
46 return proposal.creator.username
47
48 def get_comment_count(self, proposal):
49 if hasattr(proposal, 'comment_count'):
50 return proposal.comment_count
51 else:
52 return 0
53
54 def get_positive_rating_count(self, proposal):
55 if hasattr(proposal, 'positive_rating_count'):
56 return proposal.positive_rating_count
57 else:
58 return 0
59
60 def get_negative_rating_count(self, proposal):
61 if hasattr(proposal, 'negative_rating_count'):
62 return proposal.negative_rating_count
63 else:
64 return 0
65
66 def get_url(self, proposal):
67 return proposal.get_absolute_url()
68
69 def get_moderator_feedback(self, proposal):
70 if hasattr(proposal, 'moderator_feedback'):
71 return (proposal.moderator_feedback,
72 proposal.get_moderator_feedback_display())
73 else:
74 return None
75
76 def get_session_token_voted(self, proposal):
77 """Serialize if proposal has been voted.
78
79 Returns bool that indicates whether the proposal has
80 been voted with the token in the current session
81 """
82 if 'request' in self.context:
83 if 'voting_token' in self.context['request'].session:
84 vote = TokenVote.objects.filter(
85 token__pk=self.context['request'].session['voting_token'],
86 content_type=ContentType.objects.get_for_model(
87 proposal.__class__),
88 object_pk=proposal.pk
89 )
90 if vote.exists():
91 return True
92
93 return False
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py
--- a/meinberlin/apps/budgeting/serializers.py
+++ b/meinberlin/apps/budgeting/serializers.py
@@ -35,12 +35,12 @@
fields = ('budget', 'category', 'comment_count', 'created', 'modified',
'creator', 'is_archived', 'name', 'negative_rating_count',
'positive_rating_count', 'url', 'pk', 'moderator_feedback',
- 'session_token_voted')
+ 'point_label', 'session_token_voted')
read_only_fields = ('budget', 'category', 'comment_count', 'created',
'modified', 'creator', 'is_archived', 'name',
'negative_rating_count', 'positive_rating_count',
'url', 'pk', 'moderator_feedback',
- 'session_token_voted')
+ 'point_label', 'session_token_voted')
def get_creator(self, proposal):
return proposal.creator.username
@@ -73,6 +73,12 @@
else:
return None
+ def get_point_label(self, proposal):
+ if hasattr(proposal, 'point_label'):
+ return (proposal.point_label)
+ else:
+ return None
+
def get_session_token_voted(self, proposal):
"""Serialize if proposal has been voted.
| {"golden_diff": "diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py\n--- a/meinberlin/apps/budgeting/serializers.py\n+++ b/meinberlin/apps/budgeting/serializers.py\n@@ -35,12 +35,12 @@\n fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n- 'session_token_voted')\n+ 'point_label', 'session_token_voted')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n- 'session_token_voted')\n+ 'point_label', 'session_token_voted')\n \n def get_creator(self, proposal):\n return proposal.creator.username\n@@ -73,6 +73,12 @@\n else:\n return None\n \n+ def get_point_label(self, proposal):\n+ if hasattr(proposal, 'point_label'):\n+ return (proposal.point_label)\n+ else:\n+ return None\n+\n def get_session_token_voted(self, proposal):\n \"\"\"Serialize if proposal has been voted.\n", "issue": "testing 5024: missing location label\n**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list\r\n**user:** any\r\n**expected behaviour:** \r\n**behaviour:** location label (Bezeichnung des markierten Ortes) is missing\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** maybe we need a smaller char restriction here? it's at 255 now, I wonder if something like 50 should be enough for something displayed as a tag? or continue with ... for longer words?\r\n\r\nold list\r\n<img width=\"446\" alt=\"Bildschirmfoto 2021-12-21 um 16 35 27\" src=\"https://user-images.githubusercontent.com/35491681/146956690-789f6d02-372c-4877-a4c9-c539b5fc90c3.png\">\r\n\r\n\r\nnew list\r\n<img width=\"446\" alt=\"Bildschirmfoto 2021-12-21 um 16 34 09\" src=\"https://user-images.githubusercontent.com/35491681/146956491-2472f9f2-e90d-4975-88a8-fbe1a7012657.png\">\r\n\r\nold list with long label\r\n<img width=\"656\" alt=\"Bildschirmfoto 2021-12-21 um 16 36 09\" src=\"https://user-images.githubusercontent.com/35491681/146956804-ced5b4b8-0da8-42fc-a17c-901fc86efe9b.png\">\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\n\nfrom adhocracy4.categories.models import Category\nfrom meinberlin.apps.votes.models import TokenVote\n\nfrom .models import Proposal\n\n\nclass CategoryField(serializers.Field):\n\n def to_internal_value(self, category):\n if category:\n return Category.objects.get(pk=category)\n else:\n return None\n\n def to_representation(self, category):\n return {'id': category.pk, 'name': category.name}\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n\n creator = serializers.SerializerMethodField()\n comment_count = serializers.SerializerMethodField()\n positive_rating_count = serializers.SerializerMethodField()\n negative_rating_count = serializers.SerializerMethodField()\n category = CategoryField()\n url = serializers.SerializerMethodField()\n moderator_feedback = serializers.SerializerMethodField()\n session_token_voted = serializers.SerializerMethodField()\n\n class Meta:\n model = Proposal\n fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'session_token_voted')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'session_token_voted')\n\n def get_creator(self, proposal):\n return proposal.creator.username\n\n def get_comment_count(self, proposal):\n if hasattr(proposal, 'comment_count'):\n return proposal.comment_count\n else:\n return 0\n\n def get_positive_rating_count(self, proposal):\n if hasattr(proposal, 'positive_rating_count'):\n return proposal.positive_rating_count\n else:\n return 0\n\n def get_negative_rating_count(self, proposal):\n if hasattr(proposal, 'negative_rating_count'):\n return proposal.negative_rating_count\n else:\n return 0\n\n def get_url(self, proposal):\n return proposal.get_absolute_url()\n\n def get_moderator_feedback(self, proposal):\n if hasattr(proposal, 'moderator_feedback'):\n return (proposal.moderator_feedback,\n proposal.get_moderator_feedback_display())\n else:\n return None\n\n def get_session_token_voted(self, proposal):\n \"\"\"Serialize if proposal has been voted.\n\n Returns bool that indicates whether the proposal has\n been voted with the token in the current session\n \"\"\"\n if 'request' in self.context:\n if 'voting_token' in self.context['request'].session:\n vote = TokenVote.objects.filter(\n token__pk=self.context['request'].session['voting_token'],\n content_type=ContentType.objects.get_for_model(\n proposal.__class__),\n object_pk=proposal.pk\n )\n if vote.exists():\n return True\n\n return False\n", "path": "meinberlin/apps/budgeting/serializers.py"}], "after_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\n\nfrom adhocracy4.categories.models import Category\nfrom meinberlin.apps.votes.models import TokenVote\n\nfrom .models import Proposal\n\n\nclass CategoryField(serializers.Field):\n\n def to_internal_value(self, category):\n if category:\n return Category.objects.get(pk=category)\n else:\n return None\n\n def to_representation(self, category):\n return {'id': category.pk, 'name': category.name}\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n\n creator = serializers.SerializerMethodField()\n comment_count = serializers.SerializerMethodField()\n positive_rating_count = serializers.SerializerMethodField()\n negative_rating_count = serializers.SerializerMethodField()\n category = CategoryField()\n url = serializers.SerializerMethodField()\n moderator_feedback = serializers.SerializerMethodField()\n session_token_voted = serializers.SerializerMethodField()\n\n class Meta:\n model = Proposal\n fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'point_label', 'session_token_voted')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'point_label', 'session_token_voted')\n\n def get_creator(self, proposal):\n return proposal.creator.username\n\n def get_comment_count(self, proposal):\n if hasattr(proposal, 'comment_count'):\n return proposal.comment_count\n else:\n return 0\n\n def get_positive_rating_count(self, proposal):\n if hasattr(proposal, 'positive_rating_count'):\n return proposal.positive_rating_count\n else:\n return 0\n\n def get_negative_rating_count(self, proposal):\n if hasattr(proposal, 'negative_rating_count'):\n return proposal.negative_rating_count\n else:\n return 0\n\n def get_url(self, proposal):\n return proposal.get_absolute_url()\n\n def get_moderator_feedback(self, proposal):\n if hasattr(proposal, 'moderator_feedback'):\n return (proposal.moderator_feedback,\n proposal.get_moderator_feedback_display())\n else:\n return None\n\n def get_point_label(self, proposal):\n if hasattr(proposal, 'point_label'):\n return (proposal.point_label)\n else:\n return None\n\n def get_session_token_voted(self, proposal):\n \"\"\"Serialize if proposal has been voted.\n\n Returns bool that indicates whether the proposal has\n been voted with the token in the current session\n \"\"\"\n if 'request' in self.context:\n if 'voting_token' in self.context['request'].session:\n vote = TokenVote.objects.filter(\n token__pk=self.context['request'].session['voting_token'],\n content_type=ContentType.objects.get_for_model(\n proposal.__class__),\n object_pk=proposal.pk\n )\n if vote.exists():\n return True\n\n return False\n", "path": "meinberlin/apps/budgeting/serializers.py"}]} | 1,522 | 317 |
gh_patches_debug_56812 | rasdani/github-patches | git_diff | microsoft__knossos-ksc-1027 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Segmentation fault in sqrl_pytorch-PyTorch CUDA
Just saw this while working on something else. I haven't done a lot to debug it, but note that it's in copydown, on a fairly innocuous operation (aten::sum(Tensor 2) -> Float), so might be something to do with KS_ALLOCATOR not being defined?
Or could just be out of memory not caught?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/dl-capsule/sqrl.py`
Content:
```
1 import torch
2 import ksc.torch_frontend as knossos
3
4 # run-bench: Knossos source, and "nice" PyTorch implementation
5 # BEGINDOC
6 @knossos.register
7 def sqrl(x: torch.Tensor):
8 """
9 sqrl: Squared Leaky Relu
10 Like a capsule from /Stuck in a Rut/
11 Typically x is a 4x4 tensor, possibly
12 packed in a 4n x 4m array
13 """
14 y = torch.sum(x)
15 if y < 0.0:
16 t = -0.125 * x
17 else:
18 t = 1 / 2 * x ** 2
19 return torch.mean(torch.sin(t) * t)
20
21
22 # ENDDOC
23
24 # run-bench: PyTorch "fast" implementation
25 def sqrl_pytorch(x: torch.Tensor):
26 return sqrl(x)
27
28
29 # run-bench: PyTorch "nice" implementation
30 def sqrl_pytorch_nice(x: torch.Tensor):
31 return sqrl(x)
32
33
34 # run-bench: Define a range of values at which to call the methods
35 def sqrl_bench_configs():
36 yield torch.randn((4, 4))
37 yield torch.randn((16, 16))
38
39
40 #################################
41 #
42 # vsqrl - vectorized sqrl
43 #
44
45 vsqrl = knossos.vmap(sqrl)
46
47
48 # run-bench: Define a range of values at which to call the methods
49 def vsqrl_bench_configs():
50 yield torch.randn((10, 4, 4))
51 yield torch.randn((1000, 4, 4))
52 yield torch.randn((1000, 16, 16))
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/dl-capsule/sqrl.py b/examples/dl-capsule/sqrl.py
--- a/examples/dl-capsule/sqrl.py
+++ b/examples/dl-capsule/sqrl.py
@@ -23,12 +23,12 @@
# run-bench: PyTorch "fast" implementation
def sqrl_pytorch(x: torch.Tensor):
- return sqrl(x)
+ return sqrl.raw_f(x)
# run-bench: PyTorch "nice" implementation
def sqrl_pytorch_nice(x: torch.Tensor):
- return sqrl(x)
+ return sqrl.raw_f(x)
# run-bench: Define a range of values at which to call the methods
| {"golden_diff": "diff --git a/examples/dl-capsule/sqrl.py b/examples/dl-capsule/sqrl.py\n--- a/examples/dl-capsule/sqrl.py\n+++ b/examples/dl-capsule/sqrl.py\n@@ -23,12 +23,12 @@\n \n # run-bench: PyTorch \"fast\" implementation\n def sqrl_pytorch(x: torch.Tensor):\n- return sqrl(x)\n+ return sqrl.raw_f(x)\n \n \n # run-bench: PyTorch \"nice\" implementation\n def sqrl_pytorch_nice(x: torch.Tensor):\n- return sqrl(x)\n+ return sqrl.raw_f(x)\n \n \n # run-bench: Define a range of values at which to call the methods\n", "issue": "Bug: Segmentation fault in sqrl_pytorch-PyTorch CUDA\nJust saw this while working on something else. I haven't done a lot to debug it, but note that it's in copydown, on a fairly innocuous operation (aten::sum(Tensor 2) -> Float), so might be something to do with KS_ALLOCATOR not being defined?\r\nOr could just be out of memory not caught?\r\n\r\n\n", "before_files": [{"content": "import torch\nimport ksc.torch_frontend as knossos\n\n# run-bench: Knossos source, and \"nice\" PyTorch implementation\n# BEGINDOC\[email protected]\ndef sqrl(x: torch.Tensor):\n \"\"\"\n sqrl: Squared Leaky Relu\n Like a capsule from /Stuck in a Rut/\n Typically x is a 4x4 tensor, possibly\n packed in a 4n x 4m array\n \"\"\"\n y = torch.sum(x)\n if y < 0.0:\n t = -0.125 * x\n else:\n t = 1 / 2 * x ** 2\n return torch.mean(torch.sin(t) * t)\n\n\n# ENDDOC\n\n# run-bench: PyTorch \"fast\" implementation\ndef sqrl_pytorch(x: torch.Tensor):\n return sqrl(x)\n\n\n# run-bench: PyTorch \"nice\" implementation\ndef sqrl_pytorch_nice(x: torch.Tensor):\n return sqrl(x)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef sqrl_bench_configs():\n yield torch.randn((4, 4))\n yield torch.randn((16, 16))\n\n\n#################################\n#\n# vsqrl - vectorized sqrl\n#\n\nvsqrl = knossos.vmap(sqrl)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef vsqrl_bench_configs():\n yield torch.randn((10, 4, 4))\n yield torch.randn((1000, 4, 4))\n yield torch.randn((1000, 16, 16))\n", "path": "examples/dl-capsule/sqrl.py"}], "after_files": [{"content": "import torch\nimport ksc.torch_frontend as knossos\n\n# run-bench: Knossos source, and \"nice\" PyTorch implementation\n# BEGINDOC\[email protected]\ndef sqrl(x: torch.Tensor):\n \"\"\"\n sqrl: Squared Leaky Relu\n Like a capsule from /Stuck in a Rut/\n Typically x is a 4x4 tensor, possibly\n packed in a 4n x 4m array\n \"\"\"\n y = torch.sum(x)\n if y < 0.0:\n t = -0.125 * x\n else:\n t = 1 / 2 * x ** 2\n return torch.mean(torch.sin(t) * t)\n\n\n# ENDDOC\n\n# run-bench: PyTorch \"fast\" implementation\ndef sqrl_pytorch(x: torch.Tensor):\n return sqrl.raw_f(x)\n\n\n# run-bench: PyTorch \"nice\" implementation\ndef sqrl_pytorch_nice(x: torch.Tensor):\n return sqrl.raw_f(x)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef sqrl_bench_configs():\n yield torch.randn((4, 4))\n yield torch.randn((16, 16))\n\n\n#################################\n#\n# vsqrl - vectorized sqrl\n#\n\nvsqrl = knossos.vmap(sqrl)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef vsqrl_bench_configs():\n yield torch.randn((10, 4, 4))\n yield torch.randn((1000, 4, 4))\n yield torch.randn((1000, 16, 16))\n", "path": "examples/dl-capsule/sqrl.py"}]} | 896 | 166 |
gh_patches_debug_31908 | rasdani/github-patches | git_diff | rucio__rucio-5322 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add add-exception command in the CLI
Motivation
----------
A CLI command to add a new exception is missing and need to be added
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/client/lifetimeclient.py`
Content:
```
1 # Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 # Authors:
16 # - Cedric Serfon <[email protected]>, 2017
17 # - Vincent Garonne <[email protected]>, 2018
18 # - Martin Barisits <[email protected]>, 2018
19 # - Andrew Lister <[email protected]>, 2019
20
21 from __future__ import print_function
22
23 from json import loads
24 from requests.status_codes import codes
25
26 from rucio.client.baseclient import BaseClient
27 from rucio.client.baseclient import choice
28 from rucio.common.utils import build_url, render_json
29
30
31 class LifetimeClient(BaseClient):
32
33 """Lifetime client class for working with Lifetime Model exceptions"""
34
35 LIFETIME_BASEURL = 'lifetime_exceptions'
36
37 def list_exceptions(self, exception_id=None, states=None):
38 """
39 List exceptions to Lifetime Model.
40
41 :param id: The id of the exception
42 :param states: The states to filter
43 """
44
45 path = self.LIFETIME_BASEURL + '/'
46 params = {}
47 if exception_id:
48 params['exception_id'] = exception_id
49 if states:
50 params['states'] = exception_id
51 url = build_url(choice(self.list_hosts), path=path, params=params)
52
53 result = self._send_request(url)
54 if result.status_code == codes.ok:
55 lifetime_exceptions = self._load_json_data(result)
56 return lifetime_exceptions
57 else:
58 exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code)
59 raise exc_cls(exc_msg)
60
61 def add_exception(self, dids, account, pattern, comments, expires_at):
62 """
63 Add exceptions to Lifetime Model.
64
65 :param dids: The list of dids
66 :param account: The account of the requester.
67 :param pattern: The account.
68 :param comments: The comments associated to the exception.
69 :param expires_at: The expiration date of the exception.
70
71 returns: The id of the exception.
72 """
73 path = self.LIFETIME_BASEURL + '/'
74 url = build_url(choice(self.list_hosts), path=path)
75 data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}
76 print(render_json(**data))
77 result = self._send_request(url, type_='POST', data=render_json(**data))
78 print(result.text)
79 if result.status_code == codes.created:
80 return loads(result.text)
81 exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
82 raise exc_cls(exc_msg)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/client/lifetimeclient.py b/lib/rucio/client/lifetimeclient.py
--- a/lib/rucio/client/lifetimeclient.py
+++ b/lib/rucio/client/lifetimeclient.py
@@ -1,4 +1,5 @@
-# Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.
+# -*- coding: utf-8 -*-
+# Copyright 2017-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +14,13 @@
# limitations under the License.
#
# Authors:
-# - Cedric Serfon <[email protected]>, 2017
-# - Vincent Garonne <[email protected]>, 2018
+# - Cedric Serfon <[email protected]>, 2017-2022
+# - Vincent Garonne <[email protected]>, 2018
+# - Joaquín Bogado <[email protected]>, 2018
# - Martin Barisits <[email protected]>, 2018
# - Andrew Lister <[email protected]>, 2019
+# - David Población Criado <[email protected]>, 2021
+# - Igor Mandrichenko <[email protected]>, 2021
from __future__ import print_function
@@ -73,9 +77,7 @@
path = self.LIFETIME_BASEURL + '/'
url = build_url(choice(self.list_hosts), path=path)
data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}
- print(render_json(**data))
result = self._send_request(url, type_='POST', data=render_json(**data))
- print(result.text)
if result.status_code == codes.created:
return loads(result.text)
exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
| {"golden_diff": "diff --git a/lib/rucio/client/lifetimeclient.py b/lib/rucio/client/lifetimeclient.py\n--- a/lib/rucio/client/lifetimeclient.py\n+++ b/lib/rucio/client/lifetimeclient.py\n@@ -1,4 +1,5 @@\n-# Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.\n+# -*- coding: utf-8 -*-\n+# Copyright 2017-2022 CERN\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -13,10 +14,13 @@\n # limitations under the License.\n #\n # Authors:\n-# - Cedric Serfon <[email protected]>, 2017\n-# - Vincent Garonne <[email protected]>, 2018\n+# - Cedric Serfon <[email protected]>, 2017-2022\n+# - Vincent Garonne <[email protected]>, 2018\n+# - Joaqu\u00edn Bogado <[email protected]>, 2018\n # - Martin Barisits <[email protected]>, 2018\n # - Andrew Lister <[email protected]>, 2019\n+# - David Poblaci\u00f3n Criado <[email protected]>, 2021\n+# - Igor Mandrichenko <[email protected]>, 2021\n \n from __future__ import print_function\n \n@@ -73,9 +77,7 @@\n path = self.LIFETIME_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}\n- print(render_json(**data))\n result = self._send_request(url, type_='POST', data=render_json(**data))\n- print(result.text)\n if result.status_code == codes.created:\n return loads(result.text)\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)\n", "issue": "Add add-exception command in the CLI\nMotivation\r\n----------\r\nA CLI command to add a new exception is missing and need to be added\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Cedric Serfon <[email protected]>, 2017\n# - Vincent Garonne <[email protected]>, 2018\n# - Martin Barisits <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n\nfrom __future__ import print_function\n\nfrom json import loads\nfrom requests.status_codes import codes\n\nfrom rucio.client.baseclient import BaseClient\nfrom rucio.client.baseclient import choice\nfrom rucio.common.utils import build_url, render_json\n\n\nclass LifetimeClient(BaseClient):\n\n \"\"\"Lifetime client class for working with Lifetime Model exceptions\"\"\"\n\n LIFETIME_BASEURL = 'lifetime_exceptions'\n\n def list_exceptions(self, exception_id=None, states=None):\n \"\"\"\n List exceptions to Lifetime Model.\n\n :param id: The id of the exception\n :param states: The states to filter\n \"\"\"\n\n path = self.LIFETIME_BASEURL + '/'\n params = {}\n if exception_id:\n params['exception_id'] = exception_id\n if states:\n params['states'] = exception_id\n url = build_url(choice(self.list_hosts), path=path, params=params)\n\n result = self._send_request(url)\n if result.status_code == codes.ok:\n lifetime_exceptions = self._load_json_data(result)\n return lifetime_exceptions\n else:\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code)\n raise exc_cls(exc_msg)\n\n def add_exception(self, dids, account, pattern, comments, expires_at):\n \"\"\"\n Add exceptions to Lifetime Model.\n\n :param dids: The list of dids\n :param account: The account of the requester.\n :param pattern: The account.\n :param comments: The comments associated to the exception.\n :param expires_at: The expiration date of the exception.\n\n returns: The id of the exception.\n \"\"\"\n path = self.LIFETIME_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}\n print(render_json(**data))\n result = self._send_request(url, type_='POST', data=render_json(**data))\n print(result.text)\n if result.status_code == codes.created:\n return loads(result.text)\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)\n raise exc_cls(exc_msg)\n", "path": "lib/rucio/client/lifetimeclient.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017-2022 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Cedric Serfon <[email protected]>, 2017-2022\n# - Vincent Garonne <[email protected]>, 2018\n# - Joaqu\u00edn Bogado <[email protected]>, 2018\n# - Martin Barisits <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n# - David Poblaci\u00f3n Criado <[email protected]>, 2021\n# - Igor Mandrichenko <[email protected]>, 2021\n\nfrom __future__ import print_function\n\nfrom json import loads\nfrom requests.status_codes import codes\n\nfrom rucio.client.baseclient import BaseClient\nfrom rucio.client.baseclient import choice\nfrom rucio.common.utils import build_url, render_json\n\n\nclass LifetimeClient(BaseClient):\n\n \"\"\"Lifetime client class for working with Lifetime Model exceptions\"\"\"\n\n LIFETIME_BASEURL = 'lifetime_exceptions'\n\n def list_exceptions(self, exception_id=None, states=None):\n \"\"\"\n List exceptions to Lifetime Model.\n\n :param id: The id of the exception\n :param states: The states to filter\n \"\"\"\n\n path = self.LIFETIME_BASEURL + '/'\n params = {}\n if exception_id:\n params['exception_id'] = exception_id\n if states:\n params['states'] = exception_id\n url = build_url(choice(self.list_hosts), path=path, params=params)\n\n result = self._send_request(url)\n if result.status_code == codes.ok:\n lifetime_exceptions = self._load_json_data(result)\n return lifetime_exceptions\n else:\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code)\n raise exc_cls(exc_msg)\n\n def add_exception(self, dids, account, pattern, comments, expires_at):\n \"\"\"\n Add exceptions to Lifetime Model.\n\n :param dids: The list of dids\n :param account: The account of the requester.\n :param pattern: The account.\n :param comments: The comments associated to the exception.\n :param expires_at: The expiration date of the exception.\n\n returns: The id of the exception.\n \"\"\"\n path = self.LIFETIME_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}\n result = self._send_request(url, type_='POST', data=render_json(**data))\n if result.status_code == codes.created:\n return loads(result.text)\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)\n raise exc_cls(exc_msg)\n", "path": "lib/rucio/client/lifetimeclient.py"}]} | 1,184 | 513 |
gh_patches_debug_856 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1451 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dependecy conflict between botframework 4.11.0 and azure-identity 1.5.0
## Version
4.11 (also happening with 4.10)
## Describe the bug
`botframework-connector == 4.11.0` (current) requires `msal == 1.2.0`
`azure-identity == 1.5.0` (current) requires `msal >=1.6.0,<2.0.0`
This created a dependency conflict where bot libraries can't coexist in the same program. This used to work a couple of months ago (I bumped into this issue after revisiting some code I had worked on before).
## To Reproduce
This is my `requirements.txt` file, just add it and run `pipenv install -r requirements.txt` (versions pinned to :
```
botbuilder-core == 4.11
azure-keyvault-secrets
azure-identity == 1.5
botbuilder-ai == 4.11
```
## Expected behavior
Packages should install without conflict
## Screenshots
Extract from the error message `pipenv install` shows:
```
[pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.
First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again.
Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation.
Hint: try $ pipenv lock --pre if it is a pre-release dependency.
ERROR: ERROR: Could not find a version that matches msal<2.0.0,==1.2.0,>=0.4.1,>=1.6.0
Tried: 0.1.0, 0.1.0, 0.2.0, 0.2.0, 0.3.0, 0.3.0, 0.3.1, 0.3.1, 0.4.0, 0.4.0, 0.4.1, 0.4.1, 0.5.0, 0.5.0, 0.5.1, 0.5.1, 0.6.0, 0.6.0, 0.6.1, 0.6.1, 0.7.0, 0.7.0, 0.8.0, 0.8.0, 0.8.0, 0.9.0, 0.9.0, 1.0.0, 1.0.0, 1.1.0, 1.1.0, 1.2.0, 1.2.0, 1.3.0, 1.3.0, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.4.2, 1.4.2, 1.4.3, 1.4.3, 1.5.0, 1.5.0, 1.5.1, 1.5.1, 1.6.0, 1.6.0, 1.7.0, 1.7.0, 1.8.0, 1.8.0
There are incompatible versions in the resolved dependencies.
```
Relevant extract from the output of `pipenv graph` as per the suggestion above:
```
azure-identity==1.5.0
- msal [required: >=1.6.0,<2.0.0, installed: 1.2.0]
- msal-extensions [required: ~=0.3.0, installed: 0.3.0]
- msal [required: >=0.4.1,<2.0.0, installed: 1.2.0]
azure-keyvault-secrets==4.2.0
botbuilder-ai==4.11.0
- botbuilder-core [required: ==4.11.0, installed: 4.11.0]
- botframework-connector [required: ==4.11.0, installed: 4.11.0]
- msal [required: ==1.2.0, installed: 1.2.0]
```
## Additional context
This issue was also reported in [botbuilder-samples repo's issue 2978](https://github.com/microsoft/BotBuilder-Samples/issues/2978)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botframework-connector/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 import os
4 from setuptools import setup
5
6 NAME = "botframework-connector"
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0"
8 REQUIRES = [
9 "msrest==0.6.10",
10 "requests==2.23.0",
11 "cryptography==3.2",
12 "PyJWT==1.5.3",
13 "botbuilder-schema==4.12.0",
14 "adal==1.2.1",
15 "msal==1.2.0",
16 ]
17
18 root = os.path.abspath(os.path.dirname(__file__))
19
20 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
21 long_description = f.read()
22
23 setup(
24 name=NAME,
25 version=VERSION,
26 description="Microsoft Bot Framework Bot Builder SDK for Python.",
27 author="Microsoft",
28 url="https://www.github.com/Microsoft/botbuilder-python",
29 keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"],
30 install_requires=REQUIRES,
31 packages=[
32 "botframework.connector",
33 "botframework.connector.auth",
34 "botframework.connector.async_mixin",
35 "botframework.connector.operations",
36 "botframework.connector.models",
37 "botframework.connector.aio",
38 "botframework.connector.aio.operations_async",
39 "botframework.connector.teams",
40 "botframework.connector.teams.operations",
41 "botframework.connector.token_api",
42 "botframework.connector.token_api.aio",
43 "botframework.connector.token_api.models",
44 "botframework.connector.token_api.operations",
45 ],
46 include_package_data=True,
47 long_description=long_description,
48 long_description_content_type="text/x-rst",
49 license="MIT",
50 classifiers=[
51 "Programming Language :: Python :: 3.7",
52 "Intended Audience :: Developers",
53 "License :: OSI Approved :: MIT License",
54 "Operating System :: OS Independent",
55 "Development Status :: 5 - Production/Stable",
56 "Topic :: Scientific/Engineering :: Artificial Intelligence",
57 ],
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py
--- a/libraries/botframework-connector/setup.py
+++ b/libraries/botframework-connector/setup.py
@@ -12,7 +12,7 @@
"PyJWT==1.5.3",
"botbuilder-schema==4.12.0",
"adal==1.2.1",
- "msal==1.2.0",
+ "msal==1.6.0",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py\n--- a/libraries/botframework-connector/setup.py\n+++ b/libraries/botframework-connector/setup.py\n@@ -12,7 +12,7 @@\n \"PyJWT==1.5.3\",\n \"botbuilder-schema==4.12.0\",\n \"adal==1.2.1\",\n- \"msal==1.2.0\",\n+ \"msal==1.6.0\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "dependecy conflict between botframework 4.11.0 and azure-identity 1.5.0\n## Version\r\n4.11 (also happening with 4.10)\r\n\r\n## Describe the bug\r\n`botframework-connector == 4.11.0` (current) requires `msal == 1.2.0`\r\n`azure-identity == 1.5.0` (current) requires `msal >=1.6.0,<2.0.0`\r\n\r\nThis created a dependency conflict where bot libraries can't coexist in the same program. This used to work a couple of months ago (I bumped into this issue after revisiting some code I had worked on before).\r\n\r\n## To Reproduce\r\nThis is my `requirements.txt` file, just add it and run `pipenv install -r requirements.txt` (versions pinned to :\r\n```\r\nbotbuilder-core == 4.11\r\nazure-keyvault-secrets\r\nazure-identity == 1.5\r\nbotbuilder-ai == 4.11\r\n```\r\n\r\n## Expected behavior\r\nPackages should install without conflict\r\n\r\n## Screenshots\r\nExtract from the error message `pipenv install` shows:\r\n```\r\n[pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\r\n First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again.\r\n Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation.\r\n Hint: try $ pipenv lock --pre if it is a pre-release dependency.\r\nERROR: ERROR: Could not find a version that matches msal<2.0.0,==1.2.0,>=0.4.1,>=1.6.0\r\nTried: 0.1.0, 0.1.0, 0.2.0, 0.2.0, 0.3.0, 0.3.0, 0.3.1, 0.3.1, 0.4.0, 0.4.0, 0.4.1, 0.4.1, 0.5.0, 0.5.0, 0.5.1, 0.5.1, 0.6.0, 0.6.0, 0.6.1, 0.6.1, 0.7.0, 0.7.0, 0.8.0, 0.8.0, 0.8.0, 0.9.0, 0.9.0, 1.0.0, 1.0.0, 1.1.0, 1.1.0, 1.2.0, 1.2.0, 1.3.0, 1.3.0, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.4.2, 1.4.2, 1.4.3, 1.4.3, 1.5.0, 1.5.0, 1.5.1, 1.5.1, 1.6.0, 1.6.0, 1.7.0, 1.7.0, 1.8.0, 1.8.0\r\nThere are incompatible versions in the resolved dependencies.\r\n```\r\nRelevant extract from the output of `pipenv graph` as per the suggestion above:\r\n```\r\nazure-identity==1.5.0\r\n - msal [required: >=1.6.0,<2.0.0, installed: 1.2.0]\r\n - msal-extensions [required: ~=0.3.0, installed: 0.3.0]\r\n - msal [required: >=0.4.1,<2.0.0, installed: 1.2.0]\r\nazure-keyvault-secrets==4.2.0\r\nbotbuilder-ai==4.11.0\r\n - botbuilder-core [required: ==4.11.0, installed: 4.11.0]\r\n - botframework-connector [required: ==4.11.0, installed: 4.11.0]\r\n - msal [required: ==1.2.0, installed: 1.2.0]\r\n```\r\n\r\n## Additional context\r\nThis issue was also reported in [botbuilder-samples repo's issue 2978](https://github.com/microsoft/BotBuilder-Samples/issues/2978)\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"msrest==0.6.10\",\n \"requests==2.23.0\",\n \"cryptography==3.2\",\n \"PyJWT==1.5.3\",\n \"botbuilder-schema==4.12.0\",\n \"adal==1.2.1\",\n \"msal==1.2.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"msrest==0.6.10\",\n \"requests==2.23.0\",\n \"cryptography==3.2\",\n \"PyJWT==1.5.3\",\n \"botbuilder-schema==4.12.0\",\n \"adal==1.2.1\",\n \"msal==1.6.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}]} | 1,871 | 131 |
gh_patches_debug_4120 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/cli/launcher/__init__.py`
Content:
```
1 import click
2 from .run import launch_multi_processes
3 from colossalai.context import Config
4
5
6 @click.command(help="Launch distributed training on a single node or multiple nodes",
7 context_settings=dict(ignore_unknown_options=True))
8 @click.option("-H",
9 "-host",
10 "--host",
11 type=str,
12 default=None,
13 help="the list of hostnames to launch in the format <host1>,<host2>")
14 @click.option(
15 "--hostfile",
16 type=str,
17 default=None,
18 help="Hostfile path that defines the device pool available to the job, each line in the file is a hostname")
19 @click.option("--include",
20 type=str,
21 default=None,
22 help="Specify computing devices to use during execution. String format is <host1>,<host2>,"
23 " only effective when used with --hostfile.")
24 @click.option(
25 "--exclude",
26 type=str,
27 default=None,
28 help=
29 "Specify computing devices to NOT use during execution. Mutually exclusive with --include. Formatting is the same as --includ,"
30 " only effective when used with --hostfile.")
31 @click.option("--num_nodes",
32 type=int,
33 default=-1,
34 help="Total number of worker nodes to use, only effective when used with --hostfile.")
35 @click.option("--nproc_per_node", type=int, default=None, help="Number of GPUs to use on each node.")
36 @click.option("--master_port",
37 type=int,
38 default=29500,
39 help="(optional) Port used by PyTorch distributed for communication during distributed training.")
40 @click.option("--master_addr",
41 type=str,
42 default="127.0.0.1",
43 help="(optional) IP address of node 0, will be inferred via 'hostname -I' if not specified.")
44 @click.option(
45 "--extra_launch_args",
46 type=str,
47 default=None,
48 help=
49 "Set additional torch distributed launcher arguments such as --standalone. The format is --extra_launch_args arg1=1,arg2=2. "
50 "This will be converted to --arg1=1 --arg2=2 during execution")
51 @click.option("--ssh-port", type=int, default=None, help="(optional) the port used for ssh connection")
52 @click.argument("user_script", type=str)
53 @click.argument('user_args', nargs=-1)
54 def run(host: str, hostfile: str, num_nodes: int, nproc_per_node: int, include: str, exclude: str, master_addr: str,
55 master_port: int, extra_launch_args: str, ssh_port: int, user_script: str, user_args: str) -> None:
56 """
57 To launch multiple processes on a single node or multiple nodes via command line.
58
59 Usage::
60 # run with 4 GPUs on the current node use default port 29500
61 colossalai run --nprocs_per_node 4 train.py
62
63 # run with 2 GPUs on the current node at port 29550
64 colossalai run --nprocs_per_node 4 --master_port 29550 train.py
65
66 # run on two nodes
67 colossalai run --host <host1>,<host2> --master_addr host1 --nprocs_per_node 4 train.py
68
69 # run with hostfile
70 colossalai run --hostfile <file_path> --master_addr <host> --nprocs_per_node 4 train.py
71
72 # run with hostfile with only included hosts
73 colossalai run --hostfile <file_path> --master_addr host1 --include host1,host2 --nprocs_per_node 4 train.py
74
75 # run with hostfile excluding the hosts selected
76 colossalai run --hostfile <file_path> --master_addr host1 --exclude host2 --nprocs_per_node 4 train.py
77 """
78 if not user_script.endswith('.py'):
79 click.echo(f'Error: invalid Python file {user_script}. Did you use a wrong option? Try colossalai run --help')
80 exit()
81
82 args_dict = locals()
83 args = Config(args_dict)
84 args.user_args = list(args.user_args)
85 launch_multi_processes(args)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/cli/launcher/__init__.py b/colossalai/cli/launcher/__init__.py
--- a/colossalai/cli/launcher/__init__.py
+++ b/colossalai/cli/launcher/__init__.py
@@ -1,7 +1,9 @@
import click
-from .run import launch_multi_processes
+
from colossalai.context import Config
+from .run import launch_multi_processes
+
@click.command(help="Launch distributed training on a single node or multiple nodes",
context_settings=dict(ignore_unknown_options=True))
| {"golden_diff": "diff --git a/colossalai/cli/launcher/__init__.py b/colossalai/cli/launcher/__init__.py\n--- a/colossalai/cli/launcher/__init__.py\n+++ b/colossalai/cli/launcher/__init__.py\n@@ -1,7 +1,9 @@\n import click\n-from .run import launch_multi_processes\n+\n from colossalai.context import Config\n \n+from .run import launch_multi_processes\n+\n \n @click.command(help=\"Launch distributed training on a single node or multiple nodes\",\n context_settings=dict(ignore_unknown_options=True))\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import click\nfrom .run import launch_multi_processes\nfrom colossalai.context import Config\n\n\[email protected](help=\"Launch distributed training on a single node or multiple nodes\",\n context_settings=dict(ignore_unknown_options=True))\[email protected](\"-H\",\n \"-host\",\n \"--host\",\n type=str,\n default=None,\n help=\"the list of hostnames to launch in the format <host1>,<host2>\")\[email protected](\n \"--hostfile\",\n type=str,\n default=None,\n help=\"Hostfile path that defines the device pool available to the job, each line in the file is a hostname\")\[email protected](\"--include\",\n type=str,\n default=None,\n help=\"Specify computing devices to use during execution. String format is <host1>,<host2>,\"\n \" only effective when used with --hostfile.\")\[email protected](\n \"--exclude\",\n type=str,\n default=None,\n help=\n \"Specify computing devices to NOT use during execution. Mutually exclusive with --include. Formatting is the same as --includ,\"\n \" only effective when used with --hostfile.\")\[email protected](\"--num_nodes\",\n type=int,\n default=-1,\n help=\"Total number of worker nodes to use, only effective when used with --hostfile.\")\[email protected](\"--nproc_per_node\", type=int, default=None, help=\"Number of GPUs to use on each node.\")\[email protected](\"--master_port\",\n type=int,\n default=29500,\n help=\"(optional) Port used by PyTorch distributed for communication during distributed training.\")\[email protected](\"--master_addr\",\n type=str,\n default=\"127.0.0.1\",\n help=\"(optional) IP address of node 0, will be inferred via 'hostname -I' if not specified.\")\[email protected](\n \"--extra_launch_args\",\n type=str,\n default=None,\n help=\n \"Set additional torch distributed launcher arguments such as --standalone. The format is --extra_launch_args arg1=1,arg2=2. \"\n \"This will be converted to --arg1=1 --arg2=2 during execution\")\[email protected](\"--ssh-port\", type=int, default=None, help=\"(optional) the port used for ssh connection\")\[email protected](\"user_script\", type=str)\[email protected]('user_args', nargs=-1)\ndef run(host: str, hostfile: str, num_nodes: int, nproc_per_node: int, include: str, exclude: str, master_addr: str,\n master_port: int, extra_launch_args: str, ssh_port: int, user_script: str, user_args: str) -> None:\n \"\"\"\n To launch multiple processes on a single node or multiple nodes via command line.\n\n Usage::\n # run with 4 GPUs on the current node use default port 29500\n colossalai run --nprocs_per_node 4 train.py\n\n # run with 2 GPUs on the current node at port 29550\n colossalai run --nprocs_per_node 4 --master_port 29550 train.py\n\n # run on two nodes\n colossalai run --host <host1>,<host2> --master_addr host1 --nprocs_per_node 4 train.py\n\n # run with hostfile\n colossalai run --hostfile <file_path> --master_addr <host> --nprocs_per_node 4 train.py\n\n # run with hostfile with only included hosts\n colossalai run --hostfile <file_path> --master_addr host1 --include host1,host2 --nprocs_per_node 4 train.py\n\n # run with hostfile excluding the hosts selected\n colossalai run --hostfile <file_path> --master_addr host1 --exclude host2 --nprocs_per_node 4 train.py\n \"\"\"\n if not user_script.endswith('.py'):\n click.echo(f'Error: invalid Python file {user_script}. Did you use a wrong option? Try colossalai run --help')\n exit()\n\n args_dict = locals()\n args = Config(args_dict)\n args.user_args = list(args.user_args)\n launch_multi_processes(args)\n", "path": "colossalai/cli/launcher/__init__.py"}], "after_files": [{"content": "import click\n\nfrom colossalai.context import Config\n\nfrom .run import launch_multi_processes\n\n\[email protected](help=\"Launch distributed training on a single node or multiple nodes\",\n context_settings=dict(ignore_unknown_options=True))\[email protected](\"-H\",\n \"-host\",\n \"--host\",\n type=str,\n default=None,\n help=\"the list of hostnames to launch in the format <host1>,<host2>\")\[email protected](\n \"--hostfile\",\n type=str,\n default=None,\n help=\"Hostfile path that defines the device pool available to the job, each line in the file is a hostname\")\[email protected](\"--include\",\n type=str,\n default=None,\n help=\"Specify computing devices to use during execution. String format is <host1>,<host2>,\"\n \" only effective when used with --hostfile.\")\[email protected](\n \"--exclude\",\n type=str,\n default=None,\n help=\n \"Specify computing devices to NOT use during execution. Mutually exclusive with --include. Formatting is the same as --includ,\"\n \" only effective when used with --hostfile.\")\[email protected](\"--num_nodes\",\n type=int,\n default=-1,\n help=\"Total number of worker nodes to use, only effective when used with --hostfile.\")\[email protected](\"--nproc_per_node\", type=int, default=None, help=\"Number of GPUs to use on each node.\")\[email protected](\"--master_port\",\n type=int,\n default=29500,\n help=\"(optional) Port used by PyTorch distributed for communication during distributed training.\")\[email protected](\"--master_addr\",\n type=str,\n default=\"127.0.0.1\",\n help=\"(optional) IP address of node 0, will be inferred via 'hostname -I' if not specified.\")\[email protected](\n \"--extra_launch_args\",\n type=str,\n default=None,\n help=\n \"Set additional torch distributed launcher arguments such as --standalone. The format is --extra_launch_args arg1=1,arg2=2. \"\n \"This will be converted to --arg1=1 --arg2=2 during execution\")\[email protected](\"--ssh-port\", type=int, default=None, help=\"(optional) the port used for ssh connection\")\[email protected](\"user_script\", type=str)\[email protected]('user_args', nargs=-1)\ndef run(host: str, hostfile: str, num_nodes: int, nproc_per_node: int, include: str, exclude: str, master_addr: str,\n master_port: int, extra_launch_args: str, ssh_port: int, user_script: str, user_args: str) -> None:\n \"\"\"\n To launch multiple processes on a single node or multiple nodes via command line.\n\n Usage::\n # run with 4 GPUs on the current node use default port 29500\n colossalai run --nprocs_per_node 4 train.py\n\n # run with 2 GPUs on the current node at port 29550\n colossalai run --nprocs_per_node 4 --master_port 29550 train.py\n\n # run on two nodes\n colossalai run --host <host1>,<host2> --master_addr host1 --nprocs_per_node 4 train.py\n\n # run with hostfile\n colossalai run --hostfile <file_path> --master_addr <host> --nprocs_per_node 4 train.py\n\n # run with hostfile with only included hosts\n colossalai run --hostfile <file_path> --master_addr host1 --include host1,host2 --nprocs_per_node 4 train.py\n\n # run with hostfile excluding the hosts selected\n colossalai run --hostfile <file_path> --master_addr host1 --exclude host2 --nprocs_per_node 4 train.py\n \"\"\"\n if not user_script.endswith('.py'):\n click.echo(f'Error: invalid Python file {user_script}. Did you use a wrong option? Try colossalai run --help')\n exit()\n\n args_dict = locals()\n args = Config(args_dict)\n args.user_args = list(args.user_args)\n launch_multi_processes(args)\n", "path": "colossalai/cli/launcher/__init__.py"}]} | 1,354 | 121 |
gh_patches_debug_12616 | rasdani/github-patches | git_diff | mne-tools__mne-python-9042 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use bibtex in plot_evoked_whitening.py
convert references in `examples/visualization/plot_evoked_whitening.py` to use footcite / footbibliography
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/visualization/plot_evoked_whitening.py`
Content:
```
1 """
2 =============================================
3 Whitening evoked data with a noise covariance
4 =============================================
5
6 Evoked data are loaded and then whitened using a given noise covariance
7 matrix. It's an excellent quality check to see if baseline signals match
8 the assumption of Gaussian white noise during the baseline period.
9
10 Covariance estimation and diagnostic plots are based on [1]_.
11
12 References
13 ----------
14 .. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
15 covariance estimation and spatial whitening of MEG and EEG signals, vol.
16 108, 328-342, NeuroImage.
17
18 """
19 # Authors: Alexandre Gramfort <[email protected]>
20 # Denis A. Engemann <[email protected]>
21 #
22 # License: BSD (3-clause)
23
24 import mne
25
26 from mne import io
27 from mne.datasets import sample
28 from mne.cov import compute_covariance
29
30 print(__doc__)
31
32 ###############################################################################
33 # Set parameters
34
35 data_path = sample.data_path()
36 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
37 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
38
39 raw = io.read_raw_fif(raw_fname, preload=True)
40 raw.filter(1, 40, n_jobs=1, fir_design='firwin')
41 raw.info['bads'] += ['MEG 2443'] # bads + 1 more
42 events = mne.read_events(event_fname)
43
44 # let's look at rare events, button presses
45 event_id, tmin, tmax = 2, -0.2, 0.5
46 reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)
47
48 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),
49 baseline=None, reject=reject, preload=True)
50
51 # Uncomment next line to use fewer samples and study regularization effects
52 # epochs = epochs[:20] # For your data, use as many samples as you can!
53
54 ###############################################################################
55 # Compute covariance using automated regularization
56 method_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))
57 noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',
58 return_estimators=True, verbose=True, n_jobs=1,
59 projs=None, rank=None,
60 method_params=method_params)
61
62 # With "return_estimator=True" all estimated covariances sorted
63 # by log-likelihood are returned.
64
65 print('Covariance estimates sorted from best to worst')
66 for c in noise_covs:
67 print("%s : %s" % (c['method'], c['loglik']))
68
69 ###############################################################################
70 # Show the evoked data:
71
72 evoked = epochs.average()
73
74 evoked.plot(time_unit='s') # plot evoked response
75
76 ###############################################################################
77 # We can then show whitening for our various noise covariance estimates.
78 #
79 # Here we should look to see if baseline signals match the
80 # assumption of Gaussian white noise. we expect values centered at
81 # 0 within 2 standard deviations for 95% of the time points.
82 #
83 # For the Global field power we expect a value of 1.
84
85 evoked.plot_white(noise_covs, time_unit='s')
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/visualization/plot_evoked_whitening.py
--- a/examples/visualization/plot_evoked_whitening.py
+++ b/examples/visualization/plot_evoked_whitening.py
@@ -7,13 +7,12 @@
matrix. It's an excellent quality check to see if baseline signals match
the assumption of Gaussian white noise during the baseline period.
-Covariance estimation and diagnostic plots are based on [1]_.
+Covariance estimation and diagnostic plots are based on
+:footcite:`EngemannGramfort2015`.
References
----------
-.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
- covariance estimation and spatial whitening of MEG and EEG signals, vol.
- 108, 328-342, NeuroImage.
+.. footbibliography::
"""
# Authors: Alexandre Gramfort <[email protected]>
| {"golden_diff": "diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/visualization/plot_evoked_whitening.py\n--- a/examples/visualization/plot_evoked_whitening.py\n+++ b/examples/visualization/plot_evoked_whitening.py\n@@ -7,13 +7,12 @@\n matrix. It's an excellent quality check to see if baseline signals match\n the assumption of Gaussian white noise during the baseline period.\n \n-Covariance estimation and diagnostic plots are based on [1]_.\n+Covariance estimation and diagnostic plots are based on\n+:footcite:`EngemannGramfort2015`.\n \n References\n ----------\n-.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in\n- covariance estimation and spatial whitening of MEG and EEG signals, vol.\n- 108, 328-342, NeuroImage.\n+.. footbibliography::\n \n \"\"\"\n # Authors: Alexandre Gramfort <[email protected]>\n", "issue": "use bibtex in plot_evoked_whitening.py\nconvert references in `examples/visualization/plot_evoked_whitening.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "\"\"\"\n=============================================\nWhitening evoked data with a noise covariance\n=============================================\n\nEvoked data are loaded and then whitened using a given noise covariance\nmatrix. It's an excellent quality check to see if baseline signals match\nthe assumption of Gaussian white noise during the baseline period.\n\nCovariance estimation and diagnostic plots are based on [1]_.\n\nReferences\n----------\n.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in\n covariance estimation and spatial whitening of MEG and EEG signals, vol.\n 108, 328-342, NeuroImage.\n\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis A. Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\n\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.cov import compute_covariance\n\nprint(__doc__)\n\n###############################################################################\n# Set parameters\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(1, 40, n_jobs=1, fir_design='firwin')\nraw.info['bads'] += ['MEG 2443'] # bads + 1 more\nevents = mne.read_events(event_fname)\n\n# let's look at rare events, button presses\nevent_id, tmin, tmax = 2, -0.2, 0.5\nreject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)\n\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),\n baseline=None, reject=reject, preload=True)\n\n# Uncomment next line to use fewer samples and study regularization effects\n# epochs = epochs[:20] # For your data, use as many samples as you can!\n\n###############################################################################\n# Compute covariance using automated regularization\nmethod_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))\nnoise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',\n return_estimators=True, verbose=True, n_jobs=1,\n projs=None, rank=None,\n method_params=method_params)\n\n# With \"return_estimator=True\" all estimated covariances sorted\n# by log-likelihood are returned.\n\nprint('Covariance estimates sorted from best to worst')\nfor c in noise_covs:\n print(\"%s : %s\" % (c['method'], c['loglik']))\n\n###############################################################################\n# Show the evoked data:\n\nevoked = epochs.average()\n\nevoked.plot(time_unit='s') # plot evoked response\n\n###############################################################################\n# We can then show whitening for our various noise covariance estimates.\n#\n# Here we should look to see if baseline signals match the\n# assumption of Gaussian white noise. we expect values centered at\n# 0 within 2 standard deviations for 95% of the time points.\n#\n# For the Global field power we expect a value of 1.\n\nevoked.plot_white(noise_covs, time_unit='s')\n", "path": "examples/visualization/plot_evoked_whitening.py"}], "after_files": [{"content": "\"\"\"\n=============================================\nWhitening evoked data with a noise covariance\n=============================================\n\nEvoked data are loaded and then whitened using a given noise covariance\nmatrix. It's an excellent quality check to see if baseline signals match\nthe assumption of Gaussian white noise during the baseline period.\n\nCovariance estimation and diagnostic plots are based on\n:footcite:`EngemannGramfort2015`.\n\nReferences\n----------\n.. footbibliography::\n\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis A. Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\n\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.cov import compute_covariance\n\nprint(__doc__)\n\n###############################################################################\n# Set parameters\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(1, 40, n_jobs=1, fir_design='firwin')\nraw.info['bads'] += ['MEG 2443'] # bads + 1 more\nevents = mne.read_events(event_fname)\n\n# let's look at rare events, button presses\nevent_id, tmin, tmax = 2, -0.2, 0.5\nreject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)\n\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),\n baseline=None, reject=reject, preload=True)\n\n# Uncomment next line to use fewer samples and study regularization effects\n# epochs = epochs[:20] # For your data, use as many samples as you can!\n\n###############################################################################\n# Compute covariance using automated regularization\nmethod_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))\nnoise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',\n return_estimators=True, verbose=True, n_jobs=1,\n projs=None, rank=None,\n method_params=method_params)\n\n# With \"return_estimator=True\" all estimated covariances sorted\n# by log-likelihood are returned.\n\nprint('Covariance estimates sorted from best to worst')\nfor c in noise_covs:\n print(\"%s : %s\" % (c['method'], c['loglik']))\n\n###############################################################################\n# Show the evoked data:\n\nevoked = epochs.average()\n\nevoked.plot(time_unit='s') # plot evoked response\n\n###############################################################################\n# We can then show whitening for our various noise covariance estimates.\n#\n# Here we should look to see if baseline signals match the\n# assumption of Gaussian white noise. we expect values centered at\n# 0 within 2 standard deviations for 95% of the time points.\n#\n# For the Global field power we expect a value of 1.\n\nevoked.plot_white(noise_covs, time_unit='s')\n", "path": "examples/visualization/plot_evoked_whitening.py"}]} | 1,235 | 225 |
gh_patches_debug_7944 | rasdani/github-patches | git_diff | hylang__hy-1710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"\n" isn't mangled appropriately
=> (mangle "\n")
'hyx_XUnX'
=> (unmangle (mangle "\n"))
Traceback (most recent call last):
…
ValueError: invalid literal for int() with base 16: 'n'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/lex/__init__.py`
Content:
```
1 # Copyright 2018 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 from __future__ import unicode_literals
6
7 import re
8 import sys
9 import unicodedata
10
11 from hy._compat import str_type, isidentifier, UCS4
12 from hy.lex.exceptions import PrematureEndOfInput, LexException # NOQA
13 from hy.models import HyExpression, HySymbol
14
15 try:
16 from io import StringIO
17 except ImportError:
18 from StringIO import StringIO
19
20
21 def hy_parse(source):
22 """Parse a Hy source string.
23
24 Parameters
25 ----------
26 source: string
27 Source code to parse.
28
29 Returns
30 -------
31 out : instance of `types.CodeType`
32 """
33 source = re.sub(r'\A#!.*', '', source)
34 return HyExpression([HySymbol("do")] + tokenize(source + "\n"))
35
36
37 def tokenize(buf):
38 """
39 Tokenize a Lisp file or string buffer into internal Hy objects.
40 """
41 from hy.lex.lexer import lexer
42 from hy.lex.parser import parser
43 from rply.errors import LexingError
44 try:
45 return parser.parse(lexer.lex(buf))
46 except LexingError as e:
47 pos = e.getsourcepos()
48 raise LexException("Could not identify the next token.",
49 pos.lineno, pos.colno, buf)
50 except LexException as e:
51 if e.source is None:
52 e.source = buf
53 raise
54
55
56 mangle_delim = 'X'
57
58
59 def mangle(s):
60 """Stringify the argument and convert it to a valid Python identifier
61 according to Hy's mangling rules."""
62 def unicode_char_to_hex(uchr):
63 # Covert a unicode char to hex string, without prefix
64 return uchr.encode('unicode-escape').decode('utf-8').lstrip('\\U').lstrip('\\u').lstrip('0')
65
66 assert s
67
68 s = str_type(s)
69 s = s.replace("-", "_")
70 s2 = s.lstrip('_')
71 leading_underscores = '_' * (len(s) - len(s2))
72 s = s2
73
74 if s.endswith("?"):
75 s = 'is_' + s[:-1]
76 if not isidentifier(leading_underscores + s):
77 # Replace illegal characters with their Unicode character
78 # names, or hexadecimal if they don't have one.
79 s = 'hyx_' + ''.join(
80 c
81 if c != mangle_delim and isidentifier('S' + c)
82 # We prepend the "S" because some characters aren't
83 # allowed at the start of an identifier.
84 else '{0}{1}{0}'.format(mangle_delim,
85 unicodedata.name(c, '').lower().replace('-', 'H').replace(' ', '_')
86 or 'U{}'.format(unicode_char_to_hex(c)))
87 for c in unicode_to_ucs4iter(s))
88
89 s = leading_underscores + s
90 assert isidentifier(s)
91 return s
92
93
94 def unmangle(s):
95 """Stringify the argument and try to convert it to a pretty unmangled
96 form. This may not round-trip, because different Hy symbol names can
97 mangle to the same Python identifier."""
98
99 s = str_type(s)
100
101 s2 = s.lstrip('_')
102 leading_underscores = len(s) - len(s2)
103 s = s2
104
105 if s.startswith('hyx_'):
106 s = re.sub('{0}(U)?([_a-z0-9H]+?){0}'.format(mangle_delim),
107 lambda mo:
108 chr(int(mo.group(2), base=16))
109 if mo.group(1)
110 else unicodedata.lookup(
111 mo.group(2).replace('_', ' ').replace('H', '-').upper()),
112 s[len('hyx_'):])
113 if s.startswith('is_'):
114 s = s[len("is_"):] + "?"
115 s = s.replace('_', '-')
116
117 return '-' * leading_underscores + s
118
119
120 def unicode_to_ucs4iter(ustr):
121 # Covert a unicode string to an iterable object,
122 # elements in the object are single USC-4 unicode characters
123 if UCS4:
124 return ustr
125 ucs4_list = list(ustr)
126 for i, u in enumerate(ucs4_list):
127 if 0xD7FF < ord(u) < 0xDC00:
128 ucs4_list[i] += ucs4_list[i + 1]
129 del ucs4_list[i + 1]
130 return ucs4_list
131
132
133 def read(from_file=sys.stdin, eof=""):
134 """Read from input and returns a tokenized string.
135
136 Can take a given input buffer to read from, and a single byte as EOF
137 (defaults to an empty string).
138 """
139 buff = ""
140 while True:
141 inn = str(from_file.readline())
142 if inn == eof:
143 raise EOFError("Reached end of file")
144 buff += inn
145 try:
146 parsed = next(iter(tokenize(buff)), None)
147 except (PrematureEndOfInput, IndexError):
148 pass
149 else:
150 break
151 return parsed
152
153
154 def read_str(input):
155 return read(StringIO(str_type(input)))
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hy/lex/__init__.py b/hy/lex/__init__.py
--- a/hy/lex/__init__.py
+++ b/hy/lex/__init__.py
@@ -61,7 +61,10 @@
according to Hy's mangling rules."""
def unicode_char_to_hex(uchr):
# Covert a unicode char to hex string, without prefix
- return uchr.encode('unicode-escape').decode('utf-8').lstrip('\\U').lstrip('\\u').lstrip('0')
+ if len(uchr) == 1 and ord(uchr) < 128:
+ return format(ord(uchr), 'x')
+ return (uchr.encode('unicode-escape').decode('utf-8')
+ .lstrip('\\U').lstrip('\\u').lstrip('\\x').lstrip('0'))
assert s
| {"golden_diff": "diff --git a/hy/lex/__init__.py b/hy/lex/__init__.py\n--- a/hy/lex/__init__.py\n+++ b/hy/lex/__init__.py\n@@ -61,7 +61,10 @@\n according to Hy's mangling rules.\"\"\"\n def unicode_char_to_hex(uchr):\n # Covert a unicode char to hex string, without prefix\n- return uchr.encode('unicode-escape').decode('utf-8').lstrip('\\\\U').lstrip('\\\\u').lstrip('0')\n+ if len(uchr) == 1 and ord(uchr) < 128:\n+ return format(ord(uchr), 'x')\n+ return (uchr.encode('unicode-escape').decode('utf-8')\n+ .lstrip('\\\\U').lstrip('\\\\u').lstrip('\\\\x').lstrip('0'))\n \n assert s\n", "issue": "\"\\n\" isn't mangled appropriately\n => (mangle \"\\n\")\r\n 'hyx_XUnX'\r\n => (unmangle (mangle \"\\n\"))\r\n Traceback (most recent call last):\r\n \u2026\r\n ValueError: invalid literal for int() with base 16: 'n'\r\n\n", "before_files": [{"content": "# Copyright 2018 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nfrom __future__ import unicode_literals\n\nimport re\nimport sys\nimport unicodedata\n\nfrom hy._compat import str_type, isidentifier, UCS4\nfrom hy.lex.exceptions import PrematureEndOfInput, LexException # NOQA\nfrom hy.models import HyExpression, HySymbol\n\ntry:\n from io import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\n\ndef hy_parse(source):\n \"\"\"Parse a Hy source string.\n\n Parameters\n ----------\n source: string\n Source code to parse.\n\n Returns\n -------\n out : instance of `types.CodeType`\n \"\"\"\n source = re.sub(r'\\A#!.*', '', source)\n return HyExpression([HySymbol(\"do\")] + tokenize(source + \"\\n\"))\n\n\ndef tokenize(buf):\n \"\"\"\n Tokenize a Lisp file or string buffer into internal Hy objects.\n \"\"\"\n from hy.lex.lexer import lexer\n from hy.lex.parser import parser\n from rply.errors import LexingError\n try:\n return parser.parse(lexer.lex(buf))\n except LexingError as e:\n pos = e.getsourcepos()\n raise LexException(\"Could not identify the next token.\",\n pos.lineno, pos.colno, buf)\n except LexException as e:\n if e.source is None:\n e.source = buf\n raise\n\n\nmangle_delim = 'X'\n\n\ndef mangle(s):\n \"\"\"Stringify the argument and convert it to a valid Python identifier\n according to Hy's mangling rules.\"\"\"\n def unicode_char_to_hex(uchr):\n # Covert a unicode char to hex string, without prefix\n return uchr.encode('unicode-escape').decode('utf-8').lstrip('\\\\U').lstrip('\\\\u').lstrip('0')\n\n assert s\n\n s = str_type(s)\n s = s.replace(\"-\", \"_\")\n s2 = s.lstrip('_')\n leading_underscores = '_' * (len(s) - len(s2))\n s = s2\n\n if s.endswith(\"?\"):\n s = 'is_' + s[:-1]\n if not isidentifier(leading_underscores + s):\n # Replace illegal characters with their Unicode character\n # names, or hexadecimal if they don't have one.\n s = 'hyx_' + ''.join(\n c\n if c != mangle_delim and isidentifier('S' + c)\n # We prepend the \"S\" because some characters aren't\n # allowed at the start of an identifier.\n else '{0}{1}{0}'.format(mangle_delim,\n unicodedata.name(c, '').lower().replace('-', 'H').replace(' ', '_')\n or 'U{}'.format(unicode_char_to_hex(c)))\n for c in unicode_to_ucs4iter(s))\n\n s = leading_underscores + s\n assert isidentifier(s)\n return s\n\n\ndef unmangle(s):\n \"\"\"Stringify the argument and try to convert it to a pretty unmangled\n form. This may not round-trip, because different Hy symbol names can\n mangle to the same Python identifier.\"\"\"\n\n s = str_type(s)\n\n s2 = s.lstrip('_')\n leading_underscores = len(s) - len(s2)\n s = s2\n\n if s.startswith('hyx_'):\n s = re.sub('{0}(U)?([_a-z0-9H]+?){0}'.format(mangle_delim),\n lambda mo:\n chr(int(mo.group(2), base=16))\n if mo.group(1)\n else unicodedata.lookup(\n mo.group(2).replace('_', ' ').replace('H', '-').upper()),\n s[len('hyx_'):])\n if s.startswith('is_'):\n s = s[len(\"is_\"):] + \"?\"\n s = s.replace('_', '-')\n\n return '-' * leading_underscores + s\n\n\ndef unicode_to_ucs4iter(ustr):\n # Covert a unicode string to an iterable object,\n # elements in the object are single USC-4 unicode characters\n if UCS4:\n return ustr\n ucs4_list = list(ustr)\n for i, u in enumerate(ucs4_list):\n if 0xD7FF < ord(u) < 0xDC00:\n ucs4_list[i] += ucs4_list[i + 1]\n del ucs4_list[i + 1]\n return ucs4_list\n\n\ndef read(from_file=sys.stdin, eof=\"\"):\n \"\"\"Read from input and returns a tokenized string.\n\n Can take a given input buffer to read from, and a single byte as EOF\n (defaults to an empty string).\n \"\"\"\n buff = \"\"\n while True:\n inn = str(from_file.readline())\n if inn == eof:\n raise EOFError(\"Reached end of file\")\n buff += inn\n try:\n parsed = next(iter(tokenize(buff)), None)\n except (PrematureEndOfInput, IndexError):\n pass\n else:\n break\n return parsed\n\n\ndef read_str(input):\n return read(StringIO(str_type(input)))\n", "path": "hy/lex/__init__.py"}], "after_files": [{"content": "# Copyright 2018 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nfrom __future__ import unicode_literals\n\nimport re\nimport sys\nimport unicodedata\n\nfrom hy._compat import str_type, isidentifier, UCS4\nfrom hy.lex.exceptions import PrematureEndOfInput, LexException # NOQA\nfrom hy.models import HyExpression, HySymbol\n\ntry:\n from io import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\n\ndef hy_parse(source):\n \"\"\"Parse a Hy source string.\n\n Parameters\n ----------\n source: string\n Source code to parse.\n\n Returns\n -------\n out : instance of `types.CodeType`\n \"\"\"\n source = re.sub(r'\\A#!.*', '', source)\n return HyExpression([HySymbol(\"do\")] + tokenize(source + \"\\n\"))\n\n\ndef tokenize(buf):\n \"\"\"\n Tokenize a Lisp file or string buffer into internal Hy objects.\n \"\"\"\n from hy.lex.lexer import lexer\n from hy.lex.parser import parser\n from rply.errors import LexingError\n try:\n return parser.parse(lexer.lex(buf))\n except LexingError as e:\n pos = e.getsourcepos()\n raise LexException(\"Could not identify the next token.\",\n pos.lineno, pos.colno, buf)\n except LexException as e:\n if e.source is None:\n e.source = buf\n raise\n\n\nmangle_delim = 'X'\n\n\ndef mangle(s):\n \"\"\"Stringify the argument and convert it to a valid Python identifier\n according to Hy's mangling rules.\"\"\"\n def unicode_char_to_hex(uchr):\n # Covert a unicode char to hex string, without prefix\n if len(uchr) == 1 and ord(uchr) < 128:\n return format(ord(uchr), 'x')\n return (uchr.encode('unicode-escape').decode('utf-8')\n .lstrip('\\\\U').lstrip('\\\\u').lstrip('\\\\x').lstrip('0'))\n\n assert s\n\n s = str_type(s)\n s = s.replace(\"-\", \"_\")\n s2 = s.lstrip('_')\n leading_underscores = '_' * (len(s) - len(s2))\n s = s2\n\n if s.endswith(\"?\"):\n s = 'is_' + s[:-1]\n if not isidentifier(leading_underscores + s):\n # Replace illegal characters with their Unicode character\n # names, or hexadecimal if they don't have one.\n s = 'hyx_' + ''.join(\n c\n if c != mangle_delim and isidentifier('S' + c)\n # We prepend the \"S\" because some characters aren't\n # allowed at the start of an identifier.\n else '{0}{1}{0}'.format(mangle_delim,\n unicodedata.name(c, '').lower().replace('-', 'H').replace(' ', '_')\n or 'U{}'.format(unicode_char_to_hex(c)))\n for c in unicode_to_ucs4iter(s))\n\n s = leading_underscores + s\n assert isidentifier(s)\n return s\n\n\ndef unmangle(s):\n \"\"\"Stringify the argument and try to convert it to a pretty unmangled\n form. This may not round-trip, because different Hy symbol names can\n mangle to the same Python identifier.\"\"\"\n\n s = str_type(s)\n\n s2 = s.lstrip('_')\n leading_underscores = len(s) - len(s2)\n s = s2\n\n if s.startswith('hyx_'):\n s = re.sub('{0}(U)?([_a-z0-9H]+?){0}'.format(mangle_delim),\n lambda mo:\n chr(int(mo.group(2), base=16))\n if mo.group(1)\n else unicodedata.lookup(\n mo.group(2).replace('_', ' ').replace('H', '-').upper()),\n s[len('hyx_'):])\n if s.startswith('is_'):\n s = s[len(\"is_\"):] + \"?\"\n s = s.replace('_', '-')\n\n return '-' * leading_underscores + s\n\n\ndef unicode_to_ucs4iter(ustr):\n # Covert a unicode string to an iterable object,\n # elements in the object are single USC-4 unicode characters\n if UCS4:\n return ustr\n ucs4_list = list(ustr)\n for i, u in enumerate(ucs4_list):\n if 0xD7FF < ord(u) < 0xDC00:\n ucs4_list[i] += ucs4_list[i + 1]\n del ucs4_list[i + 1]\n return ucs4_list\n\n\ndef read(from_file=sys.stdin, eof=\"\"):\n \"\"\"Read from input and returns a tokenized string.\n\n Can take a given input buffer to read from, and a single byte as EOF\n (defaults to an empty string).\n \"\"\"\n buff = \"\"\n while True:\n inn = str(from_file.readline())\n if inn == eof:\n raise EOFError(\"Reached end of file\")\n buff += inn\n try:\n parsed = next(iter(tokenize(buff)), None)\n except (PrematureEndOfInput, IndexError):\n pass\n else:\n break\n return parsed\n\n\ndef read_str(input):\n return read(StringIO(str_type(input)))\n", "path": "hy/lex/__init__.py"}]} | 1,844 | 203 |
gh_patches_debug_15369 | rasdani/github-patches | git_diff | ibis-project__ibis-3044 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: isolated dask backend tests fail due to removed imports
For some reason lines 6 and 8 here: https://github.com/ibis-project/ibis/commit/a1262410310bb4d638a73e1cdfbe93c2b4089905#diff-96d84d9b6e9e84a2be7a046dc9853df1ca5fc6e894307339b02cd61e666c0149L6-L8
were removed.
This causes dasks tests to fail when they are run in isolation from other tests that (transitively) import from the pandas backend.
This is both a ci bug and a bug in the code, since we're not testing backends independently. Perhaps unsurprisingly I discovered the bug in #2937, which fixes the CI part of this problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/dask/__init__.py`
Content:
```
1 from typing import Mapping
2
3 import dask
4 import dask.dataframe as dd
5 import pandas as pd
6 import toolz
7 from dask.base import DaskMethodsMixin
8
9 import ibis.common.exceptions as com
10 import ibis.config
11 import ibis.expr.schema as sch
12 import ibis.expr.types as ir
13 from ibis.backends.pandas import BasePandasBackend
14
15 from .client import DaskDatabase, DaskTable, ibis_schema_to_dask
16 from .core import execute_and_reset
17
18 # Make sure that the pandas backend is loaded, dispatching has been
19 # executed, and options have been loaded
20 ibis.pandas
21
22
23 class Backend(BasePandasBackend):
24 name = 'dask'
25 database_class = DaskDatabase
26 table_class = DaskTable
27
28 def connect(self, dictionary):
29 # register dispatchers
30 from . import udf # noqa: F401
31
32 return super().connect(dictionary)
33
34 @property
35 def version(self):
36 return dask.__version__
37
38 def execute(
39 self,
40 query: ir.Expr,
41 params: Mapping[ir.Expr, object] = None,
42 limit: str = 'default',
43 **kwargs,
44 ):
45 if limit != 'default':
46 raise ValueError(
47 'limit parameter to execute is not yet implemented in the '
48 'dask backend'
49 )
50
51 if not isinstance(query, ir.Expr):
52 raise TypeError(
53 "`query` has type {!r}, expected ibis.expr.types.Expr".format(
54 type(query).__name__
55 )
56 )
57
58 result = self.compile(query, params, **kwargs)
59 if isinstance(result, DaskMethodsMixin):
60 return result.compute()
61 else:
62 return result
63
64 def compile(
65 self, query: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs
66 ):
67 """Compile `expr`.
68
69 Notes
70 -----
71 For the dask backend returns a dask graph that you can run ``.compute``
72 on to get a pandas object.
73
74 """
75 return execute_and_reset(query, params=params, **kwargs)
76
77 def create_table(
78 self,
79 table_name: str,
80 obj: dd.DataFrame = None,
81 schema: sch.Schema = None,
82 ):
83 """Create a table."""
84 if obj is not None:
85 df = obj
86 elif schema is not None:
87 dtypes = ibis_schema_to_dask(schema)
88 df = schema.apply_to(
89 dd.from_pandas(
90 pd.DataFrame(columns=list(map(toolz.first, dtypes))),
91 npartitions=1,
92 )
93 )
94 else:
95 raise com.IbisError('Must pass expr or schema')
96
97 self.dictionary[table_name] = df
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/backends/dask/__init__.py b/ibis/backends/dask/__init__.py
--- a/ibis/backends/dask/__init__.py
+++ b/ibis/backends/dask/__init__.py
@@ -6,6 +6,9 @@
import toolz
from dask.base import DaskMethodsMixin
+# import the pandas execution module to register dispatched implementations of
+# execute_node that the dask backend will later override
+import ibis.backends.pandas.execution # noqa: F401
import ibis.common.exceptions as com
import ibis.config
import ibis.expr.schema as sch
@@ -15,8 +18,7 @@
from .client import DaskDatabase, DaskTable, ibis_schema_to_dask
from .core import execute_and_reset
-# Make sure that the pandas backend is loaded, dispatching has been
-# executed, and options have been loaded
+# Make sure that the pandas backend options have been loaded
ibis.pandas
| {"golden_diff": "diff --git a/ibis/backends/dask/__init__.py b/ibis/backends/dask/__init__.py\n--- a/ibis/backends/dask/__init__.py\n+++ b/ibis/backends/dask/__init__.py\n@@ -6,6 +6,9 @@\n import toolz\n from dask.base import DaskMethodsMixin\n \n+# import the pandas execution module to register dispatched implementations of\n+# execute_node that the dask backend will later override\n+import ibis.backends.pandas.execution # noqa: F401\n import ibis.common.exceptions as com\n import ibis.config\n import ibis.expr.schema as sch\n@@ -15,8 +18,7 @@\n from .client import DaskDatabase, DaskTable, ibis_schema_to_dask\n from .core import execute_and_reset\n \n-# Make sure that the pandas backend is loaded, dispatching has been\n-# executed, and options have been loaded\n+# Make sure that the pandas backend options have been loaded\n ibis.pandas\n", "issue": "bug: isolated dask backend tests fail due to removed imports\nFor some reason lines 6 and 8 here: https://github.com/ibis-project/ibis/commit/a1262410310bb4d638a73e1cdfbe93c2b4089905#diff-96d84d9b6e9e84a2be7a046dc9853df1ca5fc6e894307339b02cd61e666c0149L6-L8\r\n\r\nwere removed.\r\n\r\nThis causes dasks tests to fail when they are run in isolation from other tests that (transitively) import from the pandas backend.\r\n\r\nThis is both a ci bug and a bug in the code, since we're not testing backends independently. Perhaps unsurprisingly I discovered the bug in #2937, which fixes the CI part of this problem.\n", "before_files": [{"content": "from typing import Mapping\n\nimport dask\nimport dask.dataframe as dd\nimport pandas as pd\nimport toolz\nfrom dask.base import DaskMethodsMixin\n\nimport ibis.common.exceptions as com\nimport ibis.config\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.backends.pandas import BasePandasBackend\n\nfrom .client import DaskDatabase, DaskTable, ibis_schema_to_dask\nfrom .core import execute_and_reset\n\n# Make sure that the pandas backend is loaded, dispatching has been\n# executed, and options have been loaded\nibis.pandas\n\n\nclass Backend(BasePandasBackend):\n name = 'dask'\n database_class = DaskDatabase\n table_class = DaskTable\n\n def connect(self, dictionary):\n # register dispatchers\n from . import udf # noqa: F401\n\n return super().connect(dictionary)\n\n @property\n def version(self):\n return dask.__version__\n\n def execute(\n self,\n query: ir.Expr,\n params: Mapping[ir.Expr, object] = None,\n limit: str = 'default',\n **kwargs,\n ):\n if limit != 'default':\n raise ValueError(\n 'limit parameter to execute is not yet implemented in the '\n 'dask backend'\n )\n\n if not isinstance(query, ir.Expr):\n raise TypeError(\n \"`query` has type {!r}, expected ibis.expr.types.Expr\".format(\n type(query).__name__\n )\n )\n\n result = self.compile(query, params, **kwargs)\n if isinstance(result, DaskMethodsMixin):\n return result.compute()\n else:\n return result\n\n def compile(\n self, query: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs\n ):\n \"\"\"Compile `expr`.\n\n Notes\n -----\n For the dask backend returns a dask graph that you can run ``.compute``\n on to get a pandas object.\n\n \"\"\"\n return execute_and_reset(query, params=params, **kwargs)\n\n def create_table(\n self,\n table_name: str,\n obj: dd.DataFrame = None,\n schema: sch.Schema = None,\n ):\n \"\"\"Create a table.\"\"\"\n if obj is not None:\n df = obj\n elif schema is not None:\n dtypes = ibis_schema_to_dask(schema)\n df = schema.apply_to(\n dd.from_pandas(\n pd.DataFrame(columns=list(map(toolz.first, dtypes))),\n npartitions=1,\n )\n )\n else:\n raise com.IbisError('Must pass expr or schema')\n\n self.dictionary[table_name] = df\n", "path": "ibis/backends/dask/__init__.py"}], "after_files": [{"content": "from typing import Mapping\n\nimport dask\nimport dask.dataframe as dd\nimport pandas as pd\nimport toolz\nfrom dask.base import DaskMethodsMixin\n\n# import the pandas execution module to register dispatched implementations of\n# execute_node that the dask backend will later override\nimport ibis.backends.pandas.execution # noqa: F401\nimport ibis.common.exceptions as com\nimport ibis.config\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.backends.pandas import BasePandasBackend\n\nfrom .client import DaskDatabase, DaskTable, ibis_schema_to_dask\nfrom .core import execute_and_reset\n\n# Make sure that the pandas backend options have been loaded\nibis.pandas\n\n\nclass Backend(BasePandasBackend):\n name = 'dask'\n database_class = DaskDatabase\n table_class = DaskTable\n\n def connect(self, dictionary):\n # register dispatchers\n from . import udf # noqa: F401\n\n return super().connect(dictionary)\n\n @property\n def version(self):\n return dask.__version__\n\n def execute(\n self,\n query: ir.Expr,\n params: Mapping[ir.Expr, object] = None,\n limit: str = 'default',\n **kwargs,\n ):\n if limit != 'default':\n raise ValueError(\n 'limit parameter to execute is not yet implemented in the '\n 'dask backend'\n )\n\n if not isinstance(query, ir.Expr):\n raise TypeError(\n \"`query` has type {!r}, expected ibis.expr.types.Expr\".format(\n type(query).__name__\n )\n )\n\n result = self.compile(query, params, **kwargs)\n if isinstance(result, DaskMethodsMixin):\n return result.compute()\n else:\n return result\n\n def compile(\n self, query: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs\n ):\n \"\"\"Compile `expr`.\n\n Notes\n -----\n For the dask backend returns a dask graph that you can run ``.compute``\n on to get a pandas object.\n\n \"\"\"\n return execute_and_reset(query, params=params, **kwargs)\n\n def create_table(\n self,\n table_name: str,\n obj: dd.DataFrame = None,\n schema: sch.Schema = None,\n ):\n \"\"\"Create a table.\"\"\"\n if obj is not None:\n df = obj\n elif schema is not None:\n dtypes = ibis_schema_to_dask(schema)\n df = schema.apply_to(\n dd.from_pandas(\n pd.DataFrame(columns=list(map(toolz.first, dtypes))),\n npartitions=1,\n )\n )\n else:\n raise com.IbisError('Must pass expr or schema')\n\n self.dictionary[table_name] = df\n", "path": "ibis/backends/dask/__init__.py"}]} | 1,257 | 224 |
gh_patches_debug_5574 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2384 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Viewing of "file"-kind of CIV in archive items leads to 403
I created an archive item and added a file type CIV but when trying to view the file it leads to a permission denied error. It seems that the permission check when serving a CIV file is missing a check for archive item viewing. It only checks for algorithm jobs and evaluations:
https://github.com/comic/grand-challenge.org/blob/9322d09c0859998a77accb5c13d6db675504a9c1/app/grandchallenge/serving/views.py#L94-L117
Permissions for archives are only done on archive level (vs. archive item level) so we need to add a check here to see if the CIV belongs to an archive item and if the user has the `archives.view_archive` permission for that archive.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/serving/views.py`
Content:
```
1 import posixpath
2
3 from django.conf import settings
4 from django.core.exceptions import MultipleObjectsReturned, PermissionDenied
5 from django.db.models import F, Q
6 from django.http import Http404, HttpResponseRedirect
7 from django.utils._os import safe_join
8 from guardian.shortcuts import get_objects_for_user
9 from knox.auth import TokenAuthentication
10 from rest_framework.exceptions import AuthenticationFailed
11
12 from grandchallenge.cases.models import Image
13 from grandchallenge.challenges.models import ChallengeRequest
14 from grandchallenge.components.models import ComponentInterfaceValue
15 from grandchallenge.core.storage import internal_protected_s3_storage
16 from grandchallenge.evaluation.models import Submission
17 from grandchallenge.serving.models import Download
18
19
20 def protected_storage_redirect(*, name):
21 # Get the storage with the internal redirect and auth. This will prepend
22 # settings.AWS_S3_ENDPOINT_URL to the url
23 if not internal_protected_s3_storage.exists(name=name):
24 raise Http404("File not found.")
25
26 if settings.PROTECTED_S3_STORAGE_USE_CLOUDFRONT:
27 response = HttpResponseRedirect(
28 internal_protected_s3_storage.cloudfront_signed_url(name=name)
29 )
30 else:
31 url = internal_protected_s3_storage.url(name=name)
32 response = HttpResponseRedirect(url)
33
34 return response
35
36
37 def serve_images(request, *, pk, path, pa="", pb=""):
38 document_root = safe_join(
39 f"/{settings.IMAGE_FILES_SUBDIRECTORY}", pa, pb, str(pk)
40 )
41 path = posixpath.normpath(path).lstrip("/")
42 name = safe_join(document_root, path)
43
44 try:
45 image = Image.objects.get(pk=pk)
46 except Image.DoesNotExist:
47 raise Http404("Image not found.")
48
49 try:
50 user, _ = TokenAuthentication().authenticate(request)
51 except (AuthenticationFailed, TypeError):
52 user = request.user
53
54 if user.has_perm("view_image", image):
55 _create_download(creator_id=user.pk, image_id=image.pk)
56 return protected_storage_redirect(name=name)
57
58 raise PermissionDenied
59
60
61 def serve_submissions(request, *, submission_pk, **_):
62 try:
63 submission = Submission.objects.get(pk=submission_pk)
64 except Submission.DoesNotExist:
65 raise Http404("Submission not found.")
66
67 if request.user.has_perm("view_submission", submission):
68 _create_download(
69 creator_id=request.user.pk, submission_id=submission.pk
70 )
71 return protected_storage_redirect(
72 name=submission.predictions_file.name
73 )
74
75 raise PermissionDenied
76
77
78 def serve_component_interface_value(
79 request, *, component_interface_value_pk, **_
80 ):
81 try:
82 user, _ = TokenAuthentication().authenticate(request)
83 except (AuthenticationFailed, TypeError):
84 user = request.user
85
86 try:
87 # output should only be connected to a single job; throw error if not?
88 civ = ComponentInterfaceValue.objects.get(
89 pk=component_interface_value_pk
90 )
91 except (MultipleObjectsReturned, ComponentInterfaceValue.DoesNotExist):
92 raise Http404("No ComponentInterfaceValue found.")
93
94 if (
95 get_objects_for_user(
96 user=user, perms="algorithms.view_job", accept_global_perms=False
97 )
98 .filter(
99 Q(outputs__pk=component_interface_value_pk)
100 | Q(inputs__pk=component_interface_value_pk)
101 )
102 .exists()
103 ):
104 return protected_storage_redirect(name=civ.file.name)
105 elif (
106 get_objects_for_user(
107 user=user,
108 perms="evaluation.view_evaluation",
109 accept_global_perms=False,
110 )
111 .filter(
112 Q(outputs__pk=component_interface_value_pk)
113 | Q(inputs__pk=component_interface_value_pk)
114 )
115 .exists()
116 ):
117 return protected_storage_redirect(name=civ.file.name)
118
119 raise PermissionDenied
120
121
122 def _create_download(*, creator_id, image_id=None, submission_id=None):
123 kwargs = {"creator_id": creator_id}
124
125 if image_id is not None:
126 kwargs["image_id"] = image_id
127
128 if submission_id is not None:
129 kwargs["submission_id"] = submission_id
130
131 n_updated = Download.objects.filter(**kwargs).update(count=F("count") + 1)
132
133 if n_updated == 0:
134 Download.objects.create(**kwargs)
135
136
137 def serve_structured_challenge_submission_form(
138 request, *, challenge_request_pk, **_
139 ):
140 try:
141 challenge_request = ChallengeRequest.objects.get(
142 pk=challenge_request_pk
143 )
144 except ChallengeRequest.DoesNotExist:
145 raise Http404("Challenge request not found.")
146
147 if request.user.has_perm("challenges.view_challengerequest"):
148 return protected_storage_redirect(
149 name=challenge_request.structured_challenge_submission_form.name
150 )
151 else:
152 raise PermissionDenied
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/serving/views.py b/app/grandchallenge/serving/views.py
--- a/app/grandchallenge/serving/views.py
+++ b/app/grandchallenge/serving/views.py
@@ -115,6 +115,16 @@
.exists()
):
return protected_storage_redirect(name=civ.file.name)
+ elif (
+ get_objects_for_user(
+ user=user,
+ perms="archives.view_archive",
+ accept_global_perms=False,
+ )
+ .filter(items__values__pk=component_interface_value_pk)
+ .exists()
+ ):
+ return protected_storage_redirect(name=civ.file.name)
raise PermissionDenied
| {"golden_diff": "diff --git a/app/grandchallenge/serving/views.py b/app/grandchallenge/serving/views.py\n--- a/app/grandchallenge/serving/views.py\n+++ b/app/grandchallenge/serving/views.py\n@@ -115,6 +115,16 @@\n .exists()\n ):\n return protected_storage_redirect(name=civ.file.name)\n+ elif (\n+ get_objects_for_user(\n+ user=user,\n+ perms=\"archives.view_archive\",\n+ accept_global_perms=False,\n+ )\n+ .filter(items__values__pk=component_interface_value_pk)\n+ .exists()\n+ ):\n+ return protected_storage_redirect(name=civ.file.name)\n \n raise PermissionDenied\n", "issue": "Viewing of \"file\"-kind of CIV in archive items leads to 403\nI created an archive item and added a file type CIV but when trying to view the file it leads to a permission denied error. It seems that the permission check when serving a CIV file is missing a check for archive item viewing. It only checks for algorithm jobs and evaluations:\r\n\r\nhttps://github.com/comic/grand-challenge.org/blob/9322d09c0859998a77accb5c13d6db675504a9c1/app/grandchallenge/serving/views.py#L94-L117\r\n\r\nPermissions for archives are only done on archive level (vs. archive item level) so we need to add a check here to see if the CIV belongs to an archive item and if the user has the `archives.view_archive` permission for that archive.\n", "before_files": [{"content": "import posixpath\n\nfrom django.conf import settings\nfrom django.core.exceptions import MultipleObjectsReturned, PermissionDenied\nfrom django.db.models import F, Q\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.utils._os import safe_join\nfrom guardian.shortcuts import get_objects_for_user\nfrom knox.auth import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\n\nfrom grandchallenge.cases.models import Image\nfrom grandchallenge.challenges.models import ChallengeRequest\nfrom grandchallenge.components.models import ComponentInterfaceValue\nfrom grandchallenge.core.storage import internal_protected_s3_storage\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.serving.models import Download\n\n\ndef protected_storage_redirect(*, name):\n # Get the storage with the internal redirect and auth. This will prepend\n # settings.AWS_S3_ENDPOINT_URL to the url\n if not internal_protected_s3_storage.exists(name=name):\n raise Http404(\"File not found.\")\n\n if settings.PROTECTED_S3_STORAGE_USE_CLOUDFRONT:\n response = HttpResponseRedirect(\n internal_protected_s3_storage.cloudfront_signed_url(name=name)\n )\n else:\n url = internal_protected_s3_storage.url(name=name)\n response = HttpResponseRedirect(url)\n\n return response\n\n\ndef serve_images(request, *, pk, path, pa=\"\", pb=\"\"):\n document_root = safe_join(\n f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", pa, pb, str(pk)\n )\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n raise Http404(\"Image not found.\")\n\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n if user.has_perm(\"view_image\", image):\n _create_download(creator_id=user.pk, image_id=image.pk)\n return protected_storage_redirect(name=name)\n\n raise PermissionDenied\n\n\ndef serve_submissions(request, *, submission_pk, **_):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n raise Http404(\"Submission not found.\")\n\n if request.user.has_perm(\"view_submission\", submission):\n _create_download(\n creator_id=request.user.pk, submission_id=submission.pk\n )\n return protected_storage_redirect(\n name=submission.predictions_file.name\n )\n\n raise PermissionDenied\n\n\ndef serve_component_interface_value(\n request, *, component_interface_value_pk, **_\n):\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n try:\n # output should only be connected to a single job; throw error if not?\n civ = ComponentInterfaceValue.objects.get(\n pk=component_interface_value_pk\n )\n except (MultipleObjectsReturned, ComponentInterfaceValue.DoesNotExist):\n raise Http404(\"No ComponentInterfaceValue found.\")\n\n if (\n get_objects_for_user(\n user=user, perms=\"algorithms.view_job\", accept_global_perms=False\n )\n .filter(\n Q(outputs__pk=component_interface_value_pk)\n | Q(inputs__pk=component_interface_value_pk)\n )\n .exists()\n ):\n return protected_storage_redirect(name=civ.file.name)\n elif (\n get_objects_for_user(\n user=user,\n perms=\"evaluation.view_evaluation\",\n accept_global_perms=False,\n )\n .filter(\n Q(outputs__pk=component_interface_value_pk)\n | Q(inputs__pk=component_interface_value_pk)\n )\n .exists()\n ):\n return protected_storage_redirect(name=civ.file.name)\n\n raise PermissionDenied\n\n\ndef _create_download(*, creator_id, image_id=None, submission_id=None):\n kwargs = {\"creator_id\": creator_id}\n\n if image_id is not None:\n kwargs[\"image_id\"] = image_id\n\n if submission_id is not None:\n kwargs[\"submission_id\"] = submission_id\n\n n_updated = Download.objects.filter(**kwargs).update(count=F(\"count\") + 1)\n\n if n_updated == 0:\n Download.objects.create(**kwargs)\n\n\ndef serve_structured_challenge_submission_form(\n request, *, challenge_request_pk, **_\n):\n try:\n challenge_request = ChallengeRequest.objects.get(\n pk=challenge_request_pk\n )\n except ChallengeRequest.DoesNotExist:\n raise Http404(\"Challenge request not found.\")\n\n if request.user.has_perm(\"challenges.view_challengerequest\"):\n return protected_storage_redirect(\n name=challenge_request.structured_challenge_submission_form.name\n )\n else:\n raise PermissionDenied\n", "path": "app/grandchallenge/serving/views.py"}], "after_files": [{"content": "import posixpath\n\nfrom django.conf import settings\nfrom django.core.exceptions import MultipleObjectsReturned, PermissionDenied\nfrom django.db.models import F, Q\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.utils._os import safe_join\nfrom guardian.shortcuts import get_objects_for_user\nfrom knox.auth import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\n\nfrom grandchallenge.cases.models import Image\nfrom grandchallenge.challenges.models import ChallengeRequest\nfrom grandchallenge.components.models import ComponentInterfaceValue\nfrom grandchallenge.core.storage import internal_protected_s3_storage\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.serving.models import Download\n\n\ndef protected_storage_redirect(*, name):\n # Get the storage with the internal redirect and auth. This will prepend\n # settings.AWS_S3_ENDPOINT_URL to the url\n if not internal_protected_s3_storage.exists(name=name):\n raise Http404(\"File not found.\")\n\n if settings.PROTECTED_S3_STORAGE_USE_CLOUDFRONT:\n response = HttpResponseRedirect(\n internal_protected_s3_storage.cloudfront_signed_url(name=name)\n )\n else:\n url = internal_protected_s3_storage.url(name=name)\n response = HttpResponseRedirect(url)\n\n return response\n\n\ndef serve_images(request, *, pk, path, pa=\"\", pb=\"\"):\n document_root = safe_join(\n f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", pa, pb, str(pk)\n )\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n raise Http404(\"Image not found.\")\n\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n if user.has_perm(\"view_image\", image):\n _create_download(creator_id=user.pk, image_id=image.pk)\n return protected_storage_redirect(name=name)\n\n raise PermissionDenied\n\n\ndef serve_submissions(request, *, submission_pk, **_):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n raise Http404(\"Submission not found.\")\n\n if request.user.has_perm(\"view_submission\", submission):\n _create_download(\n creator_id=request.user.pk, submission_id=submission.pk\n )\n return protected_storage_redirect(\n name=submission.predictions_file.name\n )\n\n raise PermissionDenied\n\n\ndef serve_component_interface_value(\n request, *, component_interface_value_pk, **_\n):\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n try:\n # output should only be connected to a single job; throw error if not?\n civ = ComponentInterfaceValue.objects.get(\n pk=component_interface_value_pk\n )\n except (MultipleObjectsReturned, ComponentInterfaceValue.DoesNotExist):\n raise Http404(\"No ComponentInterfaceValue found.\")\n\n if (\n get_objects_for_user(\n user=user, perms=\"algorithms.view_job\", accept_global_perms=False\n )\n .filter(\n Q(outputs__pk=component_interface_value_pk)\n | Q(inputs__pk=component_interface_value_pk)\n )\n .exists()\n ):\n return protected_storage_redirect(name=civ.file.name)\n elif (\n get_objects_for_user(\n user=user,\n perms=\"evaluation.view_evaluation\",\n accept_global_perms=False,\n )\n .filter(\n Q(outputs__pk=component_interface_value_pk)\n | Q(inputs__pk=component_interface_value_pk)\n )\n .exists()\n ):\n return protected_storage_redirect(name=civ.file.name)\n elif (\n get_objects_for_user(\n user=user,\n perms=\"archives.view_archive\",\n accept_global_perms=False,\n )\n .filter(items__values__pk=component_interface_value_pk)\n .exists()\n ):\n return protected_storage_redirect(name=civ.file.name)\n\n raise PermissionDenied\n\n\ndef _create_download(*, creator_id, image_id=None, submission_id=None):\n kwargs = {\"creator_id\": creator_id}\n\n if image_id is not None:\n kwargs[\"image_id\"] = image_id\n\n if submission_id is not None:\n kwargs[\"submission_id\"] = submission_id\n\n n_updated = Download.objects.filter(**kwargs).update(count=F(\"count\") + 1)\n\n if n_updated == 0:\n Download.objects.create(**kwargs)\n\n\ndef serve_structured_challenge_submission_form(\n request, *, challenge_request_pk, **_\n):\n try:\n challenge_request = ChallengeRequest.objects.get(\n pk=challenge_request_pk\n )\n except ChallengeRequest.DoesNotExist:\n raise Http404(\"Challenge request not found.\")\n\n if request.user.has_perm(\"challenges.view_challengerequest\"):\n return protected_storage_redirect(\n name=challenge_request.structured_challenge_submission_form.name\n )\n else:\n raise PermissionDenied\n", "path": "app/grandchallenge/serving/views.py"}]} | 1,811 | 151 |
gh_patches_debug_6667 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-633 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version on GitHub different from version on PyPI
# Brief Description of Fix
<!-- Please describe the fix in terms of a "before" and "after". In other words, what's not so good about the current docs
page, and what you would like to see it become.
Example starter wording is provided. -->
Currently, the version in the repo is "0.19.0", whereas it's "0.20.0" on PyPI.
I would like to propose a change, such that the version is updated here.
# Relevant Context
<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available
to get you started. -->
- [Link to PyPI](https://pypi.org/project/pyjanitor/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from pathlib import Path
3
4 from setuptools import setup
5
6
7 def requirements():
8 with open("requirements.txt", "r+") as f:
9 return f.read()
10
11
12 def generate_long_description() -> str:
13 """
14 Extra chunks from README for PyPI description.
15
16 Target chunks must be contained within `.. pypi-doc` pair comments,
17 so there must be an even number of comments in README.
18
19 :returns: Extracted description from README
20
21 """
22 # Read the contents of README file
23 this_directory = Path(__file__).parent
24 with open(this_directory / "README.rst", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Find pypi-doc comments in README
28 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
29 if len(indices) % 2 != 0:
30 raise Exception("Odd number of `.. pypi-doc` comments in README")
31
32 # Loop through pairs of comments and save text between pairs
33 long_description = ""
34 for i in range(0, len(indices), 2):
35 start_index = indices[i] + 11
36 end_index = indices[i + 1]
37 long_description += readme[start_index:end_index]
38 return long_description
39
40
41 extra_spark = ["pyspark"]
42 extra_biology = ["biopython"]
43 extra_chemistry = ["rdkit"]
44 extra_engineering = ["unyt"]
45 extra_all = extra_biology + extra_engineering + extra_spark
46
47 setup(
48 name="pyjanitor",
49 version="0.19.0",
50 description="Tools for cleaning pandas DataFrames",
51 author="Eric J. Ma",
52 author_email="[email protected]",
53 url="https://github.com/ericmjl/pyjanitor",
54 packages=["janitor"],
55 install_requires=requirements(),
56 extras_require={
57 "all": extra_all,
58 "biology": extra_biology,
59 # "chemistry": extra_chemistry, should be inserted once rdkit
60 # fixes https://github.com/rdkit/rdkit/issues/1812
61 "engineering": extra_engineering,
62 "spark": extra_spark,
63 },
64 python_requires=">=3.6",
65 long_description=generate_long_description(),
66 long_description_content_type="text/x-rst",
67 )
68
```
Path: `janitor/__init__.py`
Content:
```
1 try:
2 import janitor.xarray
3 except ImportError:
4 pass
5
6 from .functions import * # noqa: F403, F401
7 from .math import *
8 from .ml import get_features_targets as _get_features_targets
9 from .utils import refactored_function
10
11 # from .dataframe import JanitorDataFrame as DataFrame # noqa: F401
12 # from .dataframe import JanitorSeries as Series # noqa: F401
13
14
15 @refactored_function(
16 "get_features_targets() has moved. Please use ml.get_features_targets()."
17 )
18 def get_features_targets(*args, **kwargs):
19 return _get_features_targets(*args, **kwargs)
20
21
22 __version__ = "0.19.0"
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/__init__.py b/janitor/__init__.py
--- a/janitor/__init__.py
+++ b/janitor/__init__.py
@@ -19,4 +19,4 @@
return _get_features_targets(*args, **kwargs)
-__version__ = "0.19.0"
+__version__ = "0.20.0"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
setup(
name="pyjanitor",
- version="0.19.0",
+ version="0.20.0",
description="Tools for cleaning pandas DataFrames",
author="Eric J. Ma",
author_email="[email protected]",
| {"golden_diff": "diff --git a/janitor/__init__.py b/janitor/__init__.py\n--- a/janitor/__init__.py\n+++ b/janitor/__init__.py\n@@ -19,4 +19,4 @@\n return _get_features_targets(*args, **kwargs)\n \n \n-__version__ = \"0.19.0\"\n+__version__ = \"0.20.0\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n \n setup(\n name=\"pyjanitor\",\n- version=\"0.19.0\",\n+ version=\"0.20.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n", "issue": "Version on GitHub different from version on PyPI\n# Brief Description of Fix\r\n\r\n<!-- Please describe the fix in terms of a \"before\" and \"after\". In other words, what's not so good about the current docs\r\npage, and what you would like to see it become.\r\n\r\nExample starter wording is provided. -->\r\n\r\nCurrently, the version in the repo is \"0.19.0\", whereas it's \"0.20.0\" on PyPI.\r\n\r\nI would like to propose a change, such that the version is updated here.\r\n\r\n# Relevant Context\r\n\r\n<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available\r\nto get you started. -->\r\n\r\n- [Link to PyPI](https://pypi.org/project/pyjanitor/)\r\n\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.19.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}, {"content": "try:\n import janitor.xarray\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .math import *\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\n\n# from .dataframe import JanitorDataFrame as DataFrame # noqa: F401\n# from .dataframe import JanitorSeries as Series # noqa: F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.19.0\"\n", "path": "janitor/__init__.py"}], "after_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.20.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}, {"content": "try:\n import janitor.xarray\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .math import *\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\n\n# from .dataframe import JanitorDataFrame as DataFrame # noqa: F401\n# from .dataframe import JanitorSeries as Series # noqa: F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.20.0\"\n", "path": "janitor/__init__.py"}]} | 1,275 | 184 |
gh_patches_debug_33758 | rasdani/github-patches | git_diff | kedro-org__kedro-2587 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update default suggestions in `settings.py` to ones that work
## Description
Update docs and default suggestions in `settings.py`, because currently some of those suggestions don't actually work.
Currently, the `BaseSessionStore` is the default session store. The other possible stores a user can use are the `ShelveStore` and the `SQLiteStore` (currently part of viz).
The `ShelveStore` is the default suggestion to override the default in `settings.py`, but when users are using some sort of multiprocessing this store type will not work. See: https://github.com/kedro-org/kedro/issues/1442
Also look at the other default suggestions and verify that they make sense.
(Later consideration, but not part of this work)
If we move the `SQLiteStore` from viz to kedro core, we could add that as the default suggestion instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py`
Content:
```
1 """Project settings. There is no need to edit this file unless you want to change values
2 from the Kedro defaults. For further information, including these default values, see
3 https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html."""
4
5 # Instantiated project hooks.
6 # from {{cookiecutter.python_package}}.hooks import ProjectHooks
7 # HOOKS = (ProjectHooks(),)
8
9 # Installed plugins for which to disable hook auto-registration.
10 # DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",)
11
12 # Class that manages storing KedroSession data.
13 # from kedro.framework.session.shelvestore import ShelveStore
14 # SESSION_STORE_CLASS = ShelveStore
15 # Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.
16 # SESSION_STORE_ARGS = {
17 # "path": "./sessions"
18 # }
19
20 # Class that manages Kedro's library components.
21 # from kedro.framework.context import KedroContext
22 # CONTEXT_CLASS = KedroContext
23
24 # Directory that holds configuration.
25 # CONF_SOURCE = "conf"
26
27 # Class that manages how configuration is loaded.
28 # CONFIG_LOADER_CLASS = ConfigLoader
29 # Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.
30 # CONFIG_LOADER_ARGS = {
31 # "config_patterns": {
32 # "spark" : ["spark*/"],
33 # "parameters": ["parameters*", "parameters*/**", "**/parameters*"],
34 # }
35 # }
36
37 # Class that manages the Data Catalog.
38 # from kedro.io import DataCatalog
39 # DATA_CATALOG_CLASS = DataCatalog
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
--- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
+++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
@@ -3,6 +3,7 @@
https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html."""
# Instantiated project hooks.
+# For example, after creating a hooks.py and defining a ProjectHooks class there, do
# from {{cookiecutter.python_package}}.hooks import ProjectHooks
# HOOKS = (ProjectHooks(),)
@@ -10,22 +11,19 @@
# DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",)
# Class that manages storing KedroSession data.
-# from kedro.framework.session.shelvestore import ShelveStore
-# SESSION_STORE_CLASS = ShelveStore
+# from kedro.framework.session.store import BaseSessionStore
+# SESSION_STORE_CLASS = BaseSessionStore
# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.
# SESSION_STORE_ARGS = {
# "path": "./sessions"
# }
-# Class that manages Kedro's library components.
-# from kedro.framework.context import KedroContext
-# CONTEXT_CLASS = KedroContext
-
# Directory that holds configuration.
# CONF_SOURCE = "conf"
# Class that manages how configuration is loaded.
-# CONFIG_LOADER_CLASS = ConfigLoader
+# from kedro.config import OmegaConfigLoader
+# CONFIG_LOADER_CLASS = OmegaConfigLoader
# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.
# CONFIG_LOADER_ARGS = {
# "config_patterns": {
@@ -34,6 +32,10 @@
# }
# }
+# Class that manages Kedro's library components.
+# from kedro.framework.context import KedroContext
+# CONTEXT_CLASS = KedroContext
+
# Class that manages the Data Catalog.
# from kedro.io import DataCatalog
# DATA_CATALOG_CLASS = DataCatalog
| {"golden_diff": "diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\n--- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\t\n+++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\t\n@@ -3,6 +3,7 @@\n https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n \n # Instantiated project hooks.\n+# For example, after creating a hooks.py and defining a ProjectHooks class there, do\n # from {{cookiecutter.python_package}}.hooks import ProjectHooks\n # HOOKS = (ProjectHooks(),)\n \n@@ -10,22 +11,19 @@\n # DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n \n # Class that manages storing KedroSession data.\n-# from kedro.framework.session.shelvestore import ShelveStore\n-# SESSION_STORE_CLASS = ShelveStore\n+# from kedro.framework.session.store import BaseSessionStore\n+# SESSION_STORE_CLASS = BaseSessionStore\n # Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n # SESSION_STORE_ARGS = {\n # \"path\": \"./sessions\"\n # }\n \n-# Class that manages Kedro's library components.\n-# from kedro.framework.context import KedroContext\n-# CONTEXT_CLASS = KedroContext\n-\n # Directory that holds configuration.\n # CONF_SOURCE = \"conf\"\n \n # Class that manages how configuration is loaded.\n-# CONFIG_LOADER_CLASS = ConfigLoader\n+# from kedro.config import OmegaConfigLoader\n+# CONFIG_LOADER_CLASS = OmegaConfigLoader\n # Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n # CONFIG_LOADER_ARGS = {\n # \"config_patterns\": {\n@@ -34,6 +32,10 @@\n # }\n # }\n \n+# Class that manages Kedro's library components.\n+# from kedro.framework.context import KedroContext\n+# CONTEXT_CLASS = KedroContext\n+\n # Class that manages the Data Catalog.\n # from kedro.io import DataCatalog\n # DATA_CATALOG_CLASS = DataCatalog\n", "issue": "Update default suggestions in `settings.py` to ones that work\n## Description\r\nUpdate docs and default suggestions in `settings.py`, because currently some of those suggestions don't actually work. \r\n\r\nCurrently, the `BaseSessionStore` is the default session store. The other possible stores a user can use are the `ShelveStore` and the `SQLiteStore` (currently part of viz).\r\n\r\nThe `ShelveStore` is the default suggestion to override the default in `settings.py`, but when users are using some sort of multiprocessing this store type will not work. See: https://github.com/kedro-org/kedro/issues/1442\r\n\r\nAlso look at the other default suggestions and verify that they make sense. \r\n\r\n(Later consideration, but not part of this work)\r\nIf we move the `SQLiteStore` from viz to kedro core, we could add that as the default suggestion instead. \r\n\n", "before_files": [{"content": "\"\"\"Project settings. There is no need to edit this file unless you want to change values\nfrom the Kedro defaults. For further information, including these default values, see\nhttps://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n\n# Instantiated project hooks.\n# from {{cookiecutter.python_package}}.hooks import ProjectHooks\n# HOOKS = (ProjectHooks(),)\n\n# Installed plugins for which to disable hook auto-registration.\n# DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n\n# Class that manages storing KedroSession data.\n# from kedro.framework.session.shelvestore import ShelveStore\n# SESSION_STORE_CLASS = ShelveStore\n# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n# SESSION_STORE_ARGS = {\n# \"path\": \"./sessions\"\n# }\n\n# Class that manages Kedro's library components.\n# from kedro.framework.context import KedroContext\n# CONTEXT_CLASS = KedroContext\n\n# Directory that holds configuration.\n# CONF_SOURCE = \"conf\"\n\n# Class that manages how configuration is loaded.\n# CONFIG_LOADER_CLASS = ConfigLoader\n# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n# CONFIG_LOADER_ARGS = {\n# \"config_patterns\": {\n# \"spark\" : [\"spark*/\"],\n# \"parameters\": [\"parameters*\", \"parameters*/**\", \"**/parameters*\"],\n# }\n# }\n\n# Class that manages the Data Catalog.\n# from kedro.io import DataCatalog\n# DATA_CATALOG_CLASS = DataCatalog\n", "path": "kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py"}], "after_files": [{"content": "\"\"\"Project settings. There is no need to edit this file unless you want to change values\nfrom the Kedro defaults. For further information, including these default values, see\nhttps://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n\n# Instantiated project hooks.\n# For example, after creating a hooks.py and defining a ProjectHooks class there, do\n# from {{cookiecutter.python_package}}.hooks import ProjectHooks\n# HOOKS = (ProjectHooks(),)\n\n# Installed plugins for which to disable hook auto-registration.\n# DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n\n# Class that manages storing KedroSession data.\n# from kedro.framework.session.store import BaseSessionStore\n# SESSION_STORE_CLASS = BaseSessionStore\n# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n# SESSION_STORE_ARGS = {\n# \"path\": \"./sessions\"\n# }\n\n# Directory that holds configuration.\n# CONF_SOURCE = \"conf\"\n\n# Class that manages how configuration is loaded.\n# from kedro.config import OmegaConfigLoader\n# CONFIG_LOADER_CLASS = OmegaConfigLoader\n# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n# CONFIG_LOADER_ARGS = {\n# \"config_patterns\": {\n# \"spark\" : [\"spark*/\"],\n# \"parameters\": [\"parameters*\", \"parameters*/**\", \"**/parameters*\"],\n# }\n# }\n\n# Class that manages Kedro's library components.\n# from kedro.framework.context import KedroContext\n# CONTEXT_CLASS = KedroContext\n\n# Class that manages the Data Catalog.\n# from kedro.io import DataCatalog\n# DATA_CATALOG_CLASS = DataCatalog\n", "path": "kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py"}]} | 871 | 496 |
gh_patches_debug_51452 | rasdani/github-patches | git_diff | lutris__lutris-389 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create desktop/application menu shortcut writes a bad .desktop file
File contents:
```
[Desktop Entry]
Type=Application
Name=%s
Icon=%s
Exec=lutris lutris:%s
Categories=Game
```
**How to reproduce**
Right click a game and select Create desktop shortcut.
Navigate to ~/Desktop
You see a file with name `gameslug-id.desktop` but it contains what's above. If you're in a file manager you see the game title instead of the filename, so it appears as `%s` there.
**Lutris debug output**
```
[system]:Executing which xdg-user-dir
```
Operating system: Arch Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/shortcuts.py`
Content:
```
1 """Desktop file creator."""
2 import os
3 import stat
4 import shutil
5 import subprocess
6
7 from textwrap import dedent
8 from xdg import BaseDirectory
9 from gi.repository import GLib
10
11 from lutris.util import system
12 from lutris.util.log import logger
13 from lutris.settings import CACHE_DIR
14
15
16 def get_xdg_basename(game_slug, game_id, legacy=False):
17 if legacy:
18 filename = "{}.desktop".format(game_slug)
19 else:
20 filename = "{}-{}.desktop".format(game_slug, game_id)
21 return filename
22
23
24 def create_launcher(game_slug, game_id, game_name, desktop=False, menu=False):
25 """Create a .desktop file."""
26 desktop_dir = (
27 GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)
28 )
29 launcher_content = dedent(
30 """
31 [Desktop Entry]
32 Type=Application
33 Name=%s
34 Icon=%s
35 Exec=lutris lutris:%s
36 Categories=Game
37 """.format(game_name, 'lutris_{}'.format(game_slug), game_id)
38 )
39
40 launcher_filename = get_xdg_basename(game_slug, game_id, legacy=False)
41 tmp_launcher_path = os.path.join(CACHE_DIR, launcher_filename)
42 tmp_launcher = open(tmp_launcher_path, "w")
43 tmp_launcher.write(launcher_content)
44 tmp_launcher.close()
45 os.chmod(tmp_launcher_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC |
46 stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
47
48 if desktop:
49 shutil.copy(tmp_launcher_path,
50 os.path.join(desktop_dir, launcher_filename))
51 if menu:
52 menu_path = os.path.join(GLib.get_user_data_dir(), 'applications')
53 shutil.copy(tmp_launcher_path,
54 os.path.join(menu_path, launcher_filename))
55 os.remove(tmp_launcher_path)
56
57
58 def get_launcher_path(game_slug, game_id):
59 """Return the path of a XDG game launcher.
60 When legacy is set, it will return the old path with only the slug,
61 otherwise it will return the path with slug + id
62 """
63 xdg_executable = 'xdg-user-dir'
64 if not system.find_executable(xdg_executable):
65 logger.error("%s not found", xdg_executable)
66 return
67 desktop_dir = subprocess.Popen([xdg_executable, 'DESKTOP'],
68 stdout=subprocess.PIPE).communicate()[0]
69 desktop_dir = str(desktop_dir).strip()
70
71 legacy_launcher_path = os.path.join(
72 desktop_dir, get_xdg_basename(game_slug, game_id, legacy=True)
73 )
74 # First check if legacy path exists, for backward compatibility
75 if system.path_exists(legacy_launcher_path):
76 return legacy_launcher_path
77 # Otherwise return new path, whether it exists or not
78 return os.path.join(
79 desktop_dir, get_xdg_basename(game_slug, game_id, legacy=False)
80 )
81
82
83 def get_menu_launcher_path(game_slug, game_id):
84 """Return the path to a XDG menu launcher, prioritizing legacy paths if
85 they exist
86 """
87 menu_dir = os.path.join(BaseDirectory.xdg_data_home, 'applications')
88 menu_path = os.path.join(
89 menu_dir, get_xdg_basename(game_slug, game_id, legacy=True)
90 )
91 if system.path_exists(menu_path):
92 return menu_path
93 return os.path.join(
94 menu_dir, get_xdg_basename(game_slug, game_id, legacy=False)
95 )
96
97
98 def desktop_launcher_exists(game_slug, game_id):
99 return system.path_exists(get_launcher_path(game_slug, game_id))
100
101
102 def menu_launcher_exists(game_slug, game_id):
103 return system.path_exists(get_menu_launcher_path(game_slug, game_id))
104
105
106 def remove_launcher(game_slug, game_id, desktop=False, menu=False):
107 """Remove existing .desktop file."""
108 if desktop:
109 launcher_path = get_launcher_path(game_slug, game_id)
110 if system.path_exists(launcher_path):
111 os.remove(launcher_path)
112
113 if menu:
114 menu_path = get_menu_launcher_path(game_slug, game_id)
115 if system.path_exists(menu_path):
116 os.remove(menu_path)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/shortcuts.py b/lutris/shortcuts.py
--- a/lutris/shortcuts.py
+++ b/lutris/shortcuts.py
@@ -30,9 +30,9 @@
"""
[Desktop Entry]
Type=Application
- Name=%s
- Icon=%s
- Exec=lutris lutris:%s
+ Name={}
+ Icon={}
+ Exec=lutris lutris:{}
Categories=Game
""".format(game_name, 'lutris_{}'.format(game_slug), game_id)
)
| {"golden_diff": "diff --git a/lutris/shortcuts.py b/lutris/shortcuts.py\n--- a/lutris/shortcuts.py\n+++ b/lutris/shortcuts.py\n@@ -30,9 +30,9 @@\n \"\"\"\n [Desktop Entry]\n Type=Application\n- Name=%s\n- Icon=%s\n- Exec=lutris lutris:%s\n+ Name={}\n+ Icon={}\n+ Exec=lutris lutris:{}\n Categories=Game\n \"\"\".format(game_name, 'lutris_{}'.format(game_slug), game_id)\n )\n", "issue": "Create desktop/application menu shortcut writes a bad .desktop file\nFile contents:\n\n```\n[Desktop Entry]\nType=Application\nName=%s\nIcon=%s\nExec=lutris lutris:%s\nCategories=Game\n```\n\n**How to reproduce**\nRight click a game and select Create desktop shortcut.\nNavigate to ~/Desktop\nYou see a file with name `gameslug-id.desktop` but it contains what's above. If you're in a file manager you see the game title instead of the filename, so it appears as `%s` there.\n\n**Lutris debug output**\n\n```\n[system]:Executing which xdg-user-dir\n```\n\nOperating system: Arch Linux\n\n", "before_files": [{"content": "\"\"\"Desktop file creator.\"\"\"\nimport os\nimport stat\nimport shutil\nimport subprocess\n\nfrom textwrap import dedent\nfrom xdg import BaseDirectory\nfrom gi.repository import GLib\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.settings import CACHE_DIR\n\n\ndef get_xdg_basename(game_slug, game_id, legacy=False):\n if legacy:\n filename = \"{}.desktop\".format(game_slug)\n else:\n filename = \"{}-{}.desktop\".format(game_slug, game_id)\n return filename\n\n\ndef create_launcher(game_slug, game_id, game_name, desktop=False, menu=False):\n \"\"\"Create a .desktop file.\"\"\"\n desktop_dir = (\n GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)\n )\n launcher_content = dedent(\n \"\"\"\n [Desktop Entry]\n Type=Application\n Name=%s\n Icon=%s\n Exec=lutris lutris:%s\n Categories=Game\n \"\"\".format(game_name, 'lutris_{}'.format(game_slug), game_id)\n )\n\n launcher_filename = get_xdg_basename(game_slug, game_id, legacy=False)\n tmp_launcher_path = os.path.join(CACHE_DIR, launcher_filename)\n tmp_launcher = open(tmp_launcher_path, \"w\")\n tmp_launcher.write(launcher_content)\n tmp_launcher.close()\n os.chmod(tmp_launcher_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC |\n stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)\n\n if desktop:\n shutil.copy(tmp_launcher_path,\n os.path.join(desktop_dir, launcher_filename))\n if menu:\n menu_path = os.path.join(GLib.get_user_data_dir(), 'applications')\n shutil.copy(tmp_launcher_path,\n os.path.join(menu_path, launcher_filename))\n os.remove(tmp_launcher_path)\n\n\ndef get_launcher_path(game_slug, game_id):\n \"\"\"Return the path of a XDG game launcher.\n When legacy is set, it will return the old path with only the slug,\n otherwise it will return the path with slug + id\n \"\"\"\n xdg_executable = 'xdg-user-dir'\n if not system.find_executable(xdg_executable):\n logger.error(\"%s not found\", xdg_executable)\n return\n desktop_dir = subprocess.Popen([xdg_executable, 'DESKTOP'],\n stdout=subprocess.PIPE).communicate()[0]\n desktop_dir = str(desktop_dir).strip()\n\n legacy_launcher_path = os.path.join(\n desktop_dir, get_xdg_basename(game_slug, game_id, legacy=True)\n )\n # First check if legacy path exists, for backward compatibility\n if system.path_exists(legacy_launcher_path):\n return legacy_launcher_path\n # Otherwise return new path, whether it exists or not\n return os.path.join(\n desktop_dir, get_xdg_basename(game_slug, game_id, legacy=False)\n )\n\n\ndef get_menu_launcher_path(game_slug, game_id):\n \"\"\"Return the path to a XDG menu launcher, prioritizing legacy paths if\n they exist\n \"\"\"\n menu_dir = os.path.join(BaseDirectory.xdg_data_home, 'applications')\n menu_path = os.path.join(\n menu_dir, get_xdg_basename(game_slug, game_id, legacy=True)\n )\n if system.path_exists(menu_path):\n return menu_path\n return os.path.join(\n menu_dir, get_xdg_basename(game_slug, game_id, legacy=False)\n )\n\n\ndef desktop_launcher_exists(game_slug, game_id):\n return system.path_exists(get_launcher_path(game_slug, game_id))\n\n\ndef menu_launcher_exists(game_slug, game_id):\n return system.path_exists(get_menu_launcher_path(game_slug, game_id))\n\n\ndef remove_launcher(game_slug, game_id, desktop=False, menu=False):\n \"\"\"Remove existing .desktop file.\"\"\"\n if desktop:\n launcher_path = get_launcher_path(game_slug, game_id)\n if system.path_exists(launcher_path):\n os.remove(launcher_path)\n\n if menu:\n menu_path = get_menu_launcher_path(game_slug, game_id)\n if system.path_exists(menu_path):\n os.remove(menu_path)\n", "path": "lutris/shortcuts.py"}], "after_files": [{"content": "\"\"\"Desktop file creator.\"\"\"\nimport os\nimport stat\nimport shutil\nimport subprocess\n\nfrom textwrap import dedent\nfrom xdg import BaseDirectory\nfrom gi.repository import GLib\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.settings import CACHE_DIR\n\n\ndef get_xdg_basename(game_slug, game_id, legacy=False):\n if legacy:\n filename = \"{}.desktop\".format(game_slug)\n else:\n filename = \"{}-{}.desktop\".format(game_slug, game_id)\n return filename\n\n\ndef create_launcher(game_slug, game_id, game_name, desktop=False, menu=False):\n \"\"\"Create a .desktop file.\"\"\"\n desktop_dir = (\n GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)\n )\n launcher_content = dedent(\n \"\"\"\n [Desktop Entry]\n Type=Application\n Name={}\n Icon={}\n Exec=lutris lutris:{}\n Categories=Game\n \"\"\".format(game_name, 'lutris_{}'.format(game_slug), game_id)\n )\n\n launcher_filename = get_xdg_basename(game_slug, game_id, legacy=False)\n tmp_launcher_path = os.path.join(CACHE_DIR, launcher_filename)\n tmp_launcher = open(tmp_launcher_path, \"w\")\n tmp_launcher.write(launcher_content)\n tmp_launcher.close()\n os.chmod(tmp_launcher_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC |\n stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)\n\n if desktop:\n shutil.copy(tmp_launcher_path,\n os.path.join(desktop_dir, launcher_filename))\n if menu:\n menu_path = os.path.join(GLib.get_user_data_dir(), 'applications')\n shutil.copy(tmp_launcher_path,\n os.path.join(menu_path, launcher_filename))\n os.remove(tmp_launcher_path)\n\n\ndef get_launcher_path(game_slug, game_id):\n \"\"\"Return the path of a XDG game launcher.\n When legacy is set, it will return the old path with only the slug,\n otherwise it will return the path with slug + id\n \"\"\"\n xdg_executable = 'xdg-user-dir'\n if not system.find_executable(xdg_executable):\n logger.error(\"%s not found\", xdg_executable)\n return\n desktop_dir = subprocess.Popen([xdg_executable, 'DESKTOP'],\n stdout=subprocess.PIPE).communicate()[0]\n desktop_dir = str(desktop_dir).strip()\n\n legacy_launcher_path = os.path.join(\n desktop_dir, get_xdg_basename(game_slug, game_id, legacy=True)\n )\n # First check if legacy path exists, for backward compatibility\n if system.path_exists(legacy_launcher_path):\n return legacy_launcher_path\n # Otherwise return new path, whether it exists or not\n return os.path.join(\n desktop_dir, get_xdg_basename(game_slug, game_id, legacy=False)\n )\n\n\ndef get_menu_launcher_path(game_slug, game_id):\n \"\"\"Return the path to a XDG menu launcher, prioritizing legacy paths if\n they exist\n \"\"\"\n menu_dir = os.path.join(BaseDirectory.xdg_data_home, 'applications')\n menu_path = os.path.join(\n menu_dir, get_xdg_basename(game_slug, game_id, legacy=True)\n )\n if system.path_exists(menu_path):\n return menu_path\n return os.path.join(\n menu_dir, get_xdg_basename(game_slug, game_id, legacy=False)\n )\n\n\ndef desktop_launcher_exists(game_slug, game_id):\n return system.path_exists(get_launcher_path(game_slug, game_id))\n\n\ndef menu_launcher_exists(game_slug, game_id):\n return system.path_exists(get_menu_launcher_path(game_slug, game_id))\n\n\ndef remove_launcher(game_slug, game_id, desktop=False, menu=False):\n \"\"\"Remove existing .desktop file.\"\"\"\n if desktop:\n launcher_path = get_launcher_path(game_slug, game_id)\n if system.path_exists(launcher_path):\n os.remove(launcher_path)\n\n if menu:\n menu_path = get_menu_launcher_path(game_slug, game_id)\n if system.path_exists(menu_path):\n os.remove(menu_path)\n", "path": "lutris/shortcuts.py"}]} | 1,537 | 130 |
gh_patches_debug_4426 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-569 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error while running the exported model
Hi,
I was trying to run the example given [https://github.com/OpenNMT/OpenNMT-tf/tree/master/examples/serving/python](url).
I am getting the following error.
> Source: I am going.
Traceback (most recent call last):
File "ende_client.py", line 66, in <module>
main()
File "ende_client.py", line 60, in main
output = translator.translate([text])
File "ende_client.py", line 22, in translate
return self._postprocess(outputs)
File "ende_client.py", line 47, in _postprocess
texts.append(self._tokenizer.detokenize(tokens))
TypeError: detokenize(): incompatible function arguments. The following argument types are supported:
1. (self: pyonmttok.Tokenizer, tokens: list, features: object = None) -> str
> Invoked with: <pyonmttok.Tokenizer object at 0x147d10d0d538>, array([b'\xe2\x96\x81Ich', b'\xe2\x96\x81gehe', b'.'], dtype=object)
> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.features_inputter.ids_to_tokens._initializer
> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.labels_inputter.ids_to_tokens._initializer
> WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/alpha/guide/checkpoints#loading_mechanics for details.
>
I have the updated version of pyonmttok.
Thanks,
Sriram
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/serving/python/ende_client.py`
Content:
```
1 import argparse
2 import os
3
4 import tensorflow as tf
5 import tensorflow_addons as tfa # Register TensorFlow Addons kernels.
6
7 import pyonmttok
8
9
10 class EnDeTranslator(object):
11
12 def __init__(self, export_dir):
13 imported = tf.saved_model.load(export_dir)
14 self._translate_fn = imported.signatures["serving_default"]
15 sp_model_path = os.path.join(export_dir, "assets.extra", "wmtende.model")
16 self._tokenizer = pyonmttok.Tokenizer("none", sp_model_path=sp_model_path)
17
18 def translate(self, texts):
19 """Translates a batch of texts."""
20 inputs = self._preprocess(texts)
21 outputs = self._translate_fn(**inputs)
22 return self._postprocess(outputs)
23
24 def _preprocess(self, texts):
25 all_tokens = []
26 lengths = []
27 max_length = 0
28 for text in texts:
29 tokens, _ = self._tokenizer.tokenize(text)
30 length = len(tokens)
31 all_tokens.append(tokens)
32 lengths.append(length)
33 max_length = max(max_length, length)
34 for tokens, length in zip(all_tokens, lengths):
35 if length < max_length:
36 tokens += [""] * (max_length - length)
37
38 inputs = {
39 "tokens": tf.constant(all_tokens, dtype=tf.string),
40 "length": tf.constant(lengths, dtype=tf.int32)}
41 return inputs
42
43 def _postprocess(self, outputs):
44 texts = []
45 for tokens, length in zip(outputs["tokens"].numpy(), outputs["length"].numpy()):
46 tokens = tokens[0][:length[0]]
47 texts.append(self._tokenizer.detokenize(tokens))
48 return texts
49
50
51 def main():
52 parser = argparse.ArgumentParser(description="Translation client example")
53 parser.add_argument("export_dir", help="Saved model directory")
54 args = parser.parse_args()
55
56 translator = EnDeTranslator(args.export_dir)
57
58 while True:
59 text = input("Source: ")
60 output = translator.translate([text])
61 print("Target: %s" % output[0])
62 print("")
63
64
65 if __name__ == "__main__":
66 main()
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/serving/python/ende_client.py b/examples/serving/python/ende_client.py
--- a/examples/serving/python/ende_client.py
+++ b/examples/serving/python/ende_client.py
@@ -43,7 +43,7 @@
def _postprocess(self, outputs):
texts = []
for tokens, length in zip(outputs["tokens"].numpy(), outputs["length"].numpy()):
- tokens = tokens[0][:length[0]]
+ tokens = tokens[0][:length[0]].tolist()
texts.append(self._tokenizer.detokenize(tokens))
return texts
| {"golden_diff": "diff --git a/examples/serving/python/ende_client.py b/examples/serving/python/ende_client.py\n--- a/examples/serving/python/ende_client.py\n+++ b/examples/serving/python/ende_client.py\n@@ -43,7 +43,7 @@\n def _postprocess(self, outputs):\n texts = []\n for tokens, length in zip(outputs[\"tokens\"].numpy(), outputs[\"length\"].numpy()):\n- tokens = tokens[0][:length[0]]\n+ tokens = tokens[0][:length[0]].tolist()\n texts.append(self._tokenizer.detokenize(tokens))\n return texts\n", "issue": "Error while running the exported model \nHi,\r\n\r\nI was trying to run the example given [https://github.com/OpenNMT/OpenNMT-tf/tree/master/examples/serving/python](url).\r\n\r\nI am getting the following error.\r\n\r\n> Source: I am going.\r\nTraceback (most recent call last):\r\n File \"ende_client.py\", line 66, in <module>\r\n main()\r\n File \"ende_client.py\", line 60, in main\r\n output = translator.translate([text])\r\n File \"ende_client.py\", line 22, in translate\r\n return self._postprocess(outputs)\r\n File \"ende_client.py\", line 47, in _postprocess\r\n texts.append(self._tokenizer.detokenize(tokens))\r\nTypeError: detokenize(): incompatible function arguments. The following argument types are supported:\r\n 1. (self: pyonmttok.Tokenizer, tokens: list, features: object = None) -> str\r\n\r\n> Invoked with: <pyonmttok.Tokenizer object at 0x147d10d0d538>, array([b'\\xe2\\x96\\x81Ich', b'\\xe2\\x96\\x81gehe', b'.'], dtype=object)\r\n> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.features_inputter.ids_to_tokens._initializer\r\n> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.labels_inputter.ids_to_tokens._initializer\r\n> WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/alpha/guide/checkpoints#loading_mechanics for details.\r\n> \r\n\r\nI have the updated version of pyonmttok.\r\n\r\nThanks,\r\nSriram\n", "before_files": [{"content": "import argparse\nimport os\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa # Register TensorFlow Addons kernels.\n\nimport pyonmttok\n\n\nclass EnDeTranslator(object):\n\n def __init__(self, export_dir):\n imported = tf.saved_model.load(export_dir)\n self._translate_fn = imported.signatures[\"serving_default\"]\n sp_model_path = os.path.join(export_dir, \"assets.extra\", \"wmtende.model\")\n self._tokenizer = pyonmttok.Tokenizer(\"none\", sp_model_path=sp_model_path)\n\n def translate(self, texts):\n \"\"\"Translates a batch of texts.\"\"\"\n inputs = self._preprocess(texts)\n outputs = self._translate_fn(**inputs)\n return self._postprocess(outputs)\n\n def _preprocess(self, texts):\n all_tokens = []\n lengths = []\n max_length = 0\n for text in texts:\n tokens, _ = self._tokenizer.tokenize(text)\n length = len(tokens)\n all_tokens.append(tokens)\n lengths.append(length)\n max_length = max(max_length, length)\n for tokens, length in zip(all_tokens, lengths):\n if length < max_length:\n tokens += [\"\"] * (max_length - length)\n\n inputs = {\n \"tokens\": tf.constant(all_tokens, dtype=tf.string),\n \"length\": tf.constant(lengths, dtype=tf.int32)}\n return inputs\n\n def _postprocess(self, outputs):\n texts = []\n for tokens, length in zip(outputs[\"tokens\"].numpy(), outputs[\"length\"].numpy()):\n tokens = tokens[0][:length[0]]\n texts.append(self._tokenizer.detokenize(tokens))\n return texts\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Translation client example\")\n parser.add_argument(\"export_dir\", help=\"Saved model directory\")\n args = parser.parse_args()\n\n translator = EnDeTranslator(args.export_dir)\n\n while True:\n text = input(\"Source: \")\n output = translator.translate([text])\n print(\"Target: %s\" % output[0])\n print(\"\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/serving/python/ende_client.py"}], "after_files": [{"content": "import argparse\nimport os\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa # Register TensorFlow Addons kernels.\n\nimport pyonmttok\n\n\nclass EnDeTranslator(object):\n\n def __init__(self, export_dir):\n imported = tf.saved_model.load(export_dir)\n self._translate_fn = imported.signatures[\"serving_default\"]\n sp_model_path = os.path.join(export_dir, \"assets.extra\", \"wmtende.model\")\n self._tokenizer = pyonmttok.Tokenizer(\"none\", sp_model_path=sp_model_path)\n\n def translate(self, texts):\n \"\"\"Translates a batch of texts.\"\"\"\n inputs = self._preprocess(texts)\n outputs = self._translate_fn(**inputs)\n return self._postprocess(outputs)\n\n def _preprocess(self, texts):\n all_tokens = []\n lengths = []\n max_length = 0\n for text in texts:\n tokens, _ = self._tokenizer.tokenize(text)\n length = len(tokens)\n all_tokens.append(tokens)\n lengths.append(length)\n max_length = max(max_length, length)\n for tokens, length in zip(all_tokens, lengths):\n if length < max_length:\n tokens += [\"\"] * (max_length - length)\n\n inputs = {\n \"tokens\": tf.constant(all_tokens, dtype=tf.string),\n \"length\": tf.constant(lengths, dtype=tf.int32)}\n return inputs\n\n def _postprocess(self, outputs):\n texts = []\n for tokens, length in zip(outputs[\"tokens\"].numpy(), outputs[\"length\"].numpy()):\n tokens = tokens[0][:length[0]].tolist()\n texts.append(self._tokenizer.detokenize(tokens))\n return texts\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Translation client example\")\n parser.add_argument(\"export_dir\", help=\"Saved model directory\")\n args = parser.parse_args()\n\n translator = EnDeTranslator(args.export_dir)\n\n while True:\n text = input(\"Source: \")\n output = translator.translate([text])\n print(\"Target: %s\" % output[0])\n print(\"\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/serving/python/ende_client.py"}]} | 1,284 | 129 |
gh_patches_debug_22338 | rasdani/github-patches | git_diff | Kinto__kinto-554 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
id and last_modified should be stripped before validating the JSON schema
Otherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.
- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties
- See #256
- See #548
``` diff
try:
- jsonschema.validate(new, schema)
+ stripped = copy.deepcopy(new)
+ stripped.pop(self.model.id_field, None)
+ stripped.pop(self.model.modified_field, None)
+ jsonschema.validate(stripped, schema)
```
id and last_modified should be stripped before validating the JSON schema
Otherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.
- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties
- See #256
- See #548
``` diff
try:
- jsonschema.validate(new, schema)
+ stripped = copy.deepcopy(new)
+ stripped.pop(self.model.id_field, None)
+ stripped.pop(self.model.modified_field, None)
+ jsonschema.validate(stripped, schema)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/records.py`
Content:
```
1 import jsonschema
2 from cliquet import resource
3 from cliquet.errors import raise_invalid
4 from jsonschema import exceptions as jsonschema_exceptions
5 from pyramid.security import Authenticated
6 from pyramid.settings import asbool
7
8 from kinto.views import object_exists_or_404
9
10
11 class RecordSchema(resource.ResourceSchema):
12 class Options:
13 preserve_unknown = True
14
15
16 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
17
18
19 @resource.register(name='record',
20 collection_path=_parent_path + '/records',
21 record_path=_parent_path + '/records/{{id}}')
22 class Record(resource.ShareableResource):
23
24 mapping = RecordSchema()
25 schema_field = 'schema'
26
27 def __init__(self, *args, **kwargs):
28 super(Record, self).__init__(*args, **kwargs)
29
30 # Check if already fetched before (in batch).
31 collections = self.request.bound_data.setdefault('collections', {})
32 collection_uri = self.get_parent_id(self.request)
33 if collection_uri not in collections:
34 # Unknown yet, fetch from storage.
35 collection_parent_id = '/buckets/%s' % self.bucket_id
36 collection = object_exists_or_404(self.request,
37 collection_id='collection',
38 parent_id=collection_parent_id,
39 object_id=self.collection_id)
40 collections[collection_uri] = collection
41
42 self._collection = collections[collection_uri]
43
44 def get_parent_id(self, request):
45 self.bucket_id = request.matchdict['bucket_id']
46 self.collection_id = request.matchdict['collection_id']
47 return '/buckets/%s/collections/%s' % (self.bucket_id,
48 self.collection_id)
49
50 def is_known_field(self, field_name):
51 """Without schema, any field is considered as known."""
52 return True
53
54 def process_record(self, new, old=None):
55 """Validate records against collection schema, if any."""
56 new = super(Record, self).process_record(new, old)
57
58 schema = self._collection.get('schema')
59 settings = self.request.registry.settings
60 schema_validation = 'experimental_collection_schema_validation'
61 if not schema or not asbool(settings.get(schema_validation)):
62 return new
63
64 collection_timestamp = self._collection[self.model.modified_field]
65
66 try:
67 jsonschema.validate(new, schema)
68 new[self.schema_field] = collection_timestamp
69 except jsonschema_exceptions.ValidationError as e:
70 field = e.path.pop() if e.path else e.validator_value.pop()
71 raise_invalid(self.request, name=field, description=e.message)
72
73 return new
74
75 def collection_get(self):
76 result = super(Record, self).collection_get()
77 self._handle_cache_expires(self.request.response)
78 return result
79
80 def get(self):
81 result = super(Record, self).get()
82 self._handle_cache_expires(self.request.response)
83 return result
84
85 def _handle_cache_expires(self, response):
86 """If the parent collection defines a ``cache_expires`` attribute,
87 then cache-control response headers are sent.
88
89 .. note::
90
91 Those headers are also sent if the
92 ``kinto.record_cache_expires_seconds`` setting is defined.
93 """
94 is_anonymous = Authenticated not in self.request.effective_principals
95 if not is_anonymous:
96 return
97
98 cache_expires = self._collection.get('cache_expires')
99 if cache_expires is None:
100 by_bucket = 'kinto.%s_record_cache_expires_seconds' % (
101 self.bucket_id)
102 by_collection = '%s_%s_record_cache_expires_seconds' % (
103 self.bucket_id, self.collection_id)
104 settings = self.request.registry.settings
105 cache_expires = settings.get(by_collection,
106 settings.get(by_bucket))
107
108 if cache_expires is not None:
109 response.cache_expires(seconds=cache_expires)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -1,3 +1,5 @@
+import copy
+
import jsonschema
from cliquet import resource
from cliquet.errors import raise_invalid
@@ -64,12 +66,17 @@
collection_timestamp = self._collection[self.model.modified_field]
try:
- jsonschema.validate(new, schema)
- new[self.schema_field] = collection_timestamp
+ stripped = copy.deepcopy(new)
+ stripped.pop(self.model.id_field, None)
+ stripped.pop(self.model.modified_field, None)
+ stripped.pop(self.model.permissions_field, None)
+ stripped.pop(self.schema_field, None)
+ jsonschema.validate(stripped, schema)
except jsonschema_exceptions.ValidationError as e:
field = e.path.pop() if e.path else e.validator_value.pop()
raise_invalid(self.request, name=field, description=e.message)
+ new[self.schema_field] = collection_timestamp
return new
def collection_get(self):
| {"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -1,3 +1,5 @@\n+import copy\n+\n import jsonschema\n from cliquet import resource\n from cliquet.errors import raise_invalid\n@@ -64,12 +66,17 @@\n collection_timestamp = self._collection[self.model.modified_field]\n \n try:\n- jsonschema.validate(new, schema)\n- new[self.schema_field] = collection_timestamp\n+ stripped = copy.deepcopy(new)\n+ stripped.pop(self.model.id_field, None)\n+ stripped.pop(self.model.modified_field, None)\n+ stripped.pop(self.model.permissions_field, None)\n+ stripped.pop(self.schema_field, None)\n+ jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n \n+ new[self.schema_field] = collection_timestamp\n return new\n \n def collection_get(self):\n", "issue": "id and last_modified should be stripped before validating the JSON schema\nOtherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.\n- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties\n- See #256 \n- See #548 \n\n``` diff\n try:\n- jsonschema.validate(new, schema)\n+ stripped = copy.deepcopy(new)\n+ stripped.pop(self.model.id_field, None)\n+ stripped.pop(self.model.modified_field, None)\n+ jsonschema.validate(stripped, schema)\n```\n\nid and last_modified should be stripped before validating the JSON schema\nOtherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.\n- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties\n- See #256 \n- See #548 \n\n``` diff\n try:\n- jsonschema.validate(new, schema)\n+ stripped = copy.deepcopy(new)\n+ stripped.pop(self.model.id_field, None)\n+ stripped.pop(self.model.modified_field, None)\n+ jsonschema.validate(stripped, schema)\n```\n\n", "before_files": [{"content": "import jsonschema\nfrom cliquet import resource\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = 'kinto.%s_record_cache_expires_seconds' % (\n self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=cache_expires)\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import copy\n\nimport jsonschema\nfrom cliquet import resource\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = 'kinto.%s_record_cache_expires_seconds' % (\n self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=cache_expires)\n", "path": "kinto/views/records.py"}]} | 1,565 | 242 |
gh_patches_debug_31727 | rasdani/github-patches | git_diff | onnx__onnx-5555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use pillow to replace opencv in reference evaluator
Caveat: https://github.com/python-pillow/Pillow/issues/6047#issuecomment-1038150443
cc @jcwchen
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onnx/reference/ops/op_image_decoder.py`
Content:
```
1 # Copyright (c) ONNX Project Contributors
2
3 # SPDX-License-Identifier: Apache-2.0
4 # pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613
5
6 import numpy as np
7
8 from onnx.reference.op_run import OpRun
9
10
11 class ImageDecoder(OpRun):
12 def _run( # type: ignore
13 self,
14 encoded,
15 pixel_format="RGB",
16 ):
17 try:
18 # pylint: disable=import-outside-toplevel`
19 import cv2
20 except ImportError as e:
21 raise ImportError(
22 "opencv-python must be installed to use the reference implementation of the ImageDecoder operator"
23 ) from e
24 decoded = None
25 if pixel_format == "BGR":
26 decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
27 elif pixel_format == "RGB":
28 decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
29 decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)
30 elif pixel_format == "Grayscale":
31 decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)
32 decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)
33 else:
34 raise RuntimeError(f"pixel_format={pixel_format!r} is not supported.")
35 return (decoded,)
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onnx/reference/ops/op_image_decoder.py b/onnx/reference/ops/op_image_decoder.py
--- a/onnx/reference/ops/op_image_decoder.py
+++ b/onnx/reference/ops/op_image_decoder.py
@@ -1,7 +1,10 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
-# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613
+
+from __future__ import annotations
+
+import io
import numpy as np
@@ -9,27 +12,22 @@
class ImageDecoder(OpRun):
- def _run( # type: ignore
- self,
- encoded,
- pixel_format="RGB",
- ):
+ def _run(self, encoded: np.ndarray, pixel_format="RGB") -> tuple[np.ndarray]: # type: ignore
try:
- # pylint: disable=import-outside-toplevel`
- import cv2
+ import PIL.Image # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError(
- "opencv-python must be installed to use the reference implementation of the ImageDecoder operator"
+ "Pillow must be installed to use the reference implementation of the ImageDecoder operator"
) from e
- decoded = None
+ img = PIL.Image.open(io.BytesIO(encoded.tobytes()))
if pixel_format == "BGR":
- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
+ decoded = np.array(img)[:, :, ::-1]
elif pixel_format == "RGB":
- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
- decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)
+ decoded = np.array(img)
elif pixel_format == "Grayscale":
- decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)
+ img = img.convert("L")
+ decoded = np.array(img)
decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)
else:
- raise RuntimeError(f"pixel_format={pixel_format!r} is not supported.")
+ raise ValueError(f"pixel_format={pixel_format!r} is not supported.")
return (decoded,)
| {"golden_diff": "diff --git a/onnx/reference/ops/op_image_decoder.py b/onnx/reference/ops/op_image_decoder.py\n--- a/onnx/reference/ops/op_image_decoder.py\n+++ b/onnx/reference/ops/op_image_decoder.py\n@@ -1,7 +1,10 @@\n # Copyright (c) ONNX Project Contributors\n \n # SPDX-License-Identifier: Apache-2.0\n-# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613\n+\n+from __future__ import annotations\n+\n+import io\n \n import numpy as np\n \n@@ -9,27 +12,22 @@\n \n \n class ImageDecoder(OpRun):\n- def _run( # type: ignore\n- self,\n- encoded,\n- pixel_format=\"RGB\",\n- ):\n+ def _run(self, encoded: np.ndarray, pixel_format=\"RGB\") -> tuple[np.ndarray]: # type: ignore\n try:\n- # pylint: disable=import-outside-toplevel`\n- import cv2\n+ import PIL.Image # pylint: disable=import-outside-toplevel\n except ImportError as e:\n raise ImportError(\n- \"opencv-python must be installed to use the reference implementation of the ImageDecoder operator\"\n+ \"Pillow must be installed to use the reference implementation of the ImageDecoder operator\"\n ) from e\n- decoded = None\n+ img = PIL.Image.open(io.BytesIO(encoded.tobytes()))\n if pixel_format == \"BGR\":\n- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n+ decoded = np.array(img)[:, :, ::-1]\n elif pixel_format == \"RGB\":\n- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n- decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)\n+ decoded = np.array(img)\n elif pixel_format == \"Grayscale\":\n- decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)\n+ img = img.convert(\"L\")\n+ decoded = np.array(img)\n decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)\n else:\n- raise RuntimeError(f\"pixel_format={pixel_format!r} is not supported.\")\n+ raise ValueError(f\"pixel_format={pixel_format!r} is not supported.\")\n return (decoded,)\n", "issue": "Use pillow to replace opencv in reference evaluator\nCaveat: https://github.com/python-pillow/Pillow/issues/6047#issuecomment-1038150443\r\n\r\ncc @jcwchen \n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n\n# SPDX-License-Identifier: Apache-2.0\n# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613\n\nimport numpy as np\n\nfrom onnx.reference.op_run import OpRun\n\n\nclass ImageDecoder(OpRun):\n def _run( # type: ignore\n self,\n encoded,\n pixel_format=\"RGB\",\n ):\n try:\n # pylint: disable=import-outside-toplevel`\n import cv2\n except ImportError as e:\n raise ImportError(\n \"opencv-python must be installed to use the reference implementation of the ImageDecoder operator\"\n ) from e\n decoded = None\n if pixel_format == \"BGR\":\n decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n elif pixel_format == \"RGB\":\n decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)\n elif pixel_format == \"Grayscale\":\n decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)\n decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)\n else:\n raise RuntimeError(f\"pixel_format={pixel_format!r} is not supported.\")\n return (decoded,)\n", "path": "onnx/reference/ops/op_image_decoder.py"}], "after_files": [{"content": "# Copyright (c) ONNX Project Contributors\n\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import annotations\n\nimport io\n\nimport numpy as np\n\nfrom onnx.reference.op_run import OpRun\n\n\nclass ImageDecoder(OpRun):\n def _run(self, encoded: np.ndarray, pixel_format=\"RGB\") -> tuple[np.ndarray]: # type: ignore\n try:\n import PIL.Image # pylint: disable=import-outside-toplevel\n except ImportError as e:\n raise ImportError(\n \"Pillow must be installed to use the reference implementation of the ImageDecoder operator\"\n ) from e\n img = PIL.Image.open(io.BytesIO(encoded.tobytes()))\n if pixel_format == \"BGR\":\n decoded = np.array(img)[:, :, ::-1]\n elif pixel_format == \"RGB\":\n decoded = np.array(img)\n elif pixel_format == \"Grayscale\":\n img = img.convert(\"L\")\n decoded = np.array(img)\n decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)\n else:\n raise ValueError(f\"pixel_format={pixel_format!r} is not supported.\")\n return (decoded,)\n", "path": "onnx/reference/ops/op_image_decoder.py"}]} | 691 | 538 |
gh_patches_debug_30815 | rasdani/github-patches | git_diff | PrefectHQ__prefect-238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement `map` for `LocalExecutor`
For some reason we avoided doing this, but it's actually entirely possible to do! Would be great for local debugging.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/engine/executors/local.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
2
3 from prefect.engine.executors.base import Executor
4
5
6 class LocalExecutor(Executor):
7 """
8 An executor that runs all functions synchronously and immediately in
9 the local thread. To be used mainly for debugging purposes.
10 """
11
12 def submit(self, fn, *args, **kwargs):
13 """
14 Submit a function to the executor for execution. Returns the result of the computation.
15
16 Args:
17 - fn (Callable): function which is being submitted for execution
18 - *args (Any): arguments to be passed to `fn`
19 - **kwargs (Any): keyword arguments to be passed to `fn`
20
21 Returns:
22 - Any: the result of `fn(*args, **kwargs)`
23 """
24 return fn(*args, **kwargs)
25
26 def wait(self, futures, timeout=None):
27 """
28 Returns:
29 - Any: whatever `futures` were provided
30 """
31 return futures
32
```
Path: `src/prefect/engine/executors/__init__.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
2
3 """
4 Prefect Executors implement the logic for how Tasks are run. The standard interface
5 for an Executor consists of the following methods:
6
7 - `submit(fn, *args, **kwargs)`: submit `fn(*args, **kwargs)` for execution;
8 note that this function is (in general) non-blocking, meaning that `executor.submit(...)`
9 will _immediately_ return a future-like object regardless of whether `fn(*args, **kwargs)`
10 has completed running
11 - `submit_with_context(fn, *args, context, **kwargs)`: submit `fn(*args,
12 **kwargs)` for execution with the provided `prefect.context`
13 - `wait(object)`: resolves any objects returned by `executor.submit` to
14 their values; this function _will_ block until execution of `object` is complete
15 - `map(fn, *args, upstream_states, **kwargs)`: submit function to be mapped
16 over based on the edge information contained in `upstream_states`. Any "mapped" Edge
17 will be converted into multiple function submissions, one for each value of the upstream mapped tasks.
18
19 Currently, the available executor options are:
20
21 - `LocalExecutor`: the no frills, straightforward executor - great for simple
22 debugging; tasks are executed immediately upon being called by `executor.submit()`.
23 Note that the `map` feature is currently _not_ supported with this executor.
24 - `SynchronousExecutor`: an executor that runs on `dask` primitives with the
25 synchronous dask scheduler; currently the default executor
26 - `DaskExecutor`: the most feature-rich of the executors, this executor runs
27 on `dask.distributed` and has support for multiprocessing, multithreading, and distributed execution.
28
29 Which executor you choose depends on whether you intend to use things like parallelism
30 of task execution.
31 """
32 import sys
33
34 from warnings import warn as _warn
35 from importlib import import_module as _import_module
36
37 import prefect as _prefect
38 from prefect.engine.executors.base import Executor
39 from prefect.engine.executors.local import LocalExecutor
40 from prefect.engine.executors.sync import SynchronousExecutor
41
42 if sys.version_info >= (3, 5):
43 from prefect.engine.executors.dask import DaskExecutor
44
45 try:
46 cfg_exec = _prefect.config.engine.executor
47 *module, cls_name = cfg_exec.split(".")
48 module = _import_module(".".join(module))
49 DEFAULT_EXECUTOR = getattr(module, cls_name)()
50 except:
51 _warn(
52 "Could not import {}, using prefect.engine.executors.LocalExecutor instead.".format(
53 _prefect.config.engine.executor
54 )
55 )
56 DEFAULT_EXECUTOR = LocalExecutor()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/engine/executors/__init__.py b/src/prefect/engine/executors/__init__.py
--- a/src/prefect/engine/executors/__init__.py
+++ b/src/prefect/engine/executors/__init__.py
@@ -20,7 +20,6 @@
- `LocalExecutor`: the no frills, straightforward executor - great for simple
debugging; tasks are executed immediately upon being called by `executor.submit()`.
- Note that the `map` feature is currently _not_ supported with this executor.
- `SynchronousExecutor`: an executor that runs on `dask` primitives with the
synchronous dask scheduler; currently the default executor
- `DaskExecutor`: the most feature-rich of the executors, this executor runs
diff --git a/src/prefect/engine/executors/local.py b/src/prefect/engine/executors/local.py
--- a/src/prefect/engine/executors/local.py
+++ b/src/prefect/engine/executors/local.py
@@ -1,6 +1,9 @@
# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
+from typing import Any, Callable, Iterable
+
from prefect.engine.executors.base import Executor
+from prefect.utilities.executors import dict_to_list
class LocalExecutor(Executor):
@@ -9,6 +12,17 @@
the local thread. To be used mainly for debugging purposes.
"""
+ def map(
+ self, fn: Callable, *args: Any, upstream_states=None, **kwargs: Any
+ ) -> Iterable[Any]:
+
+ states = dict_to_list(upstream_states)
+ results = []
+ for elem in states:
+ results.append(self.submit(fn, *args, upstream_states=elem, **kwargs))
+
+ return results
+
def submit(self, fn, *args, **kwargs):
"""
Submit a function to the executor for execution. Returns the result of the computation.
| {"golden_diff": "diff --git a/src/prefect/engine/executors/__init__.py b/src/prefect/engine/executors/__init__.py\n--- a/src/prefect/engine/executors/__init__.py\n+++ b/src/prefect/engine/executors/__init__.py\n@@ -20,7 +20,6 @@\n \n - `LocalExecutor`: the no frills, straightforward executor - great for simple\n debugging; tasks are executed immediately upon being called by `executor.submit()`.\n- Note that the `map` feature is currently _not_ supported with this executor.\n - `SynchronousExecutor`: an executor that runs on `dask` primitives with the\n synchronous dask scheduler; currently the default executor\n - `DaskExecutor`: the most feature-rich of the executors, this executor runs\ndiff --git a/src/prefect/engine/executors/local.py b/src/prefect/engine/executors/local.py\n--- a/src/prefect/engine/executors/local.py\n+++ b/src/prefect/engine/executors/local.py\n@@ -1,6 +1,9 @@\n # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n \n+from typing import Any, Callable, Iterable\n+\n from prefect.engine.executors.base import Executor\n+from prefect.utilities.executors import dict_to_list\n \n \n class LocalExecutor(Executor):\n@@ -9,6 +12,17 @@\n the local thread. To be used mainly for debugging purposes.\n \"\"\"\n \n+ def map(\n+ self, fn: Callable, *args: Any, upstream_states=None, **kwargs: Any\n+ ) -> Iterable[Any]:\n+\n+ states = dict_to_list(upstream_states)\n+ results = []\n+ for elem in states:\n+ results.append(self.submit(fn, *args, upstream_states=elem, **kwargs))\n+\n+ return results\n+\n def submit(self, fn, *args, **kwargs):\n \"\"\"\n Submit a function to the executor for execution. Returns the result of the computation.\n", "issue": "Implement `map` for `LocalExecutor`\nFor some reason we avoided doing this, but it's actually entirely possible to do! Would be great for local debugging.\n", "before_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\nfrom prefect.engine.executors.base import Executor\n\n\nclass LocalExecutor(Executor):\n \"\"\"\n An executor that runs all functions synchronously and immediately in\n the local thread. To be used mainly for debugging purposes.\n \"\"\"\n\n def submit(self, fn, *args, **kwargs):\n \"\"\"\n Submit a function to the executor for execution. Returns the result of the computation.\n\n Args:\n - fn (Callable): function which is being submitted for execution\n - *args (Any): arguments to be passed to `fn`\n - **kwargs (Any): keyword arguments to be passed to `fn`\n\n Returns:\n - Any: the result of `fn(*args, **kwargs)`\n \"\"\"\n return fn(*args, **kwargs)\n\n def wait(self, futures, timeout=None):\n \"\"\"\n Returns:\n - Any: whatever `futures` were provided\n \"\"\"\n return futures\n", "path": "src/prefect/engine/executors/local.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\n\"\"\"\nPrefect Executors implement the logic for how Tasks are run. The standard interface\nfor an Executor consists of the following methods:\n\n- `submit(fn, *args, **kwargs)`: submit `fn(*args, **kwargs)` for execution;\n note that this function is (in general) non-blocking, meaning that `executor.submit(...)`\n will _immediately_ return a future-like object regardless of whether `fn(*args, **kwargs)`\n has completed running\n- `submit_with_context(fn, *args, context, **kwargs)`: submit `fn(*args,\n **kwargs)` for execution with the provided `prefect.context`\n- `wait(object)`: resolves any objects returned by `executor.submit` to\n their values; this function _will_ block until execution of `object` is complete\n- `map(fn, *args, upstream_states, **kwargs)`: submit function to be mapped\n over based on the edge information contained in `upstream_states`. Any \"mapped\" Edge\n will be converted into multiple function submissions, one for each value of the upstream mapped tasks.\n\nCurrently, the available executor options are:\n\n- `LocalExecutor`: the no frills, straightforward executor - great for simple\n debugging; tasks are executed immediately upon being called by `executor.submit()`.\n Note that the `map` feature is currently _not_ supported with this executor.\n- `SynchronousExecutor`: an executor that runs on `dask` primitives with the\n synchronous dask scheduler; currently the default executor\n- `DaskExecutor`: the most feature-rich of the executors, this executor runs\n on `dask.distributed` and has support for multiprocessing, multithreading, and distributed execution.\n\nWhich executor you choose depends on whether you intend to use things like parallelism\nof task execution.\n\"\"\"\nimport sys\n\nfrom warnings import warn as _warn\nfrom importlib import import_module as _import_module\n\nimport prefect as _prefect\nfrom prefect.engine.executors.base import Executor\nfrom prefect.engine.executors.local import LocalExecutor\nfrom prefect.engine.executors.sync import SynchronousExecutor\n\nif sys.version_info >= (3, 5):\n from prefect.engine.executors.dask import DaskExecutor\n\ntry:\n cfg_exec = _prefect.config.engine.executor\n *module, cls_name = cfg_exec.split(\".\")\n module = _import_module(\".\".join(module))\n DEFAULT_EXECUTOR = getattr(module, cls_name)()\nexcept:\n _warn(\n \"Could not import {}, using prefect.engine.executors.LocalExecutor instead.\".format(\n _prefect.config.engine.executor\n )\n )\n DEFAULT_EXECUTOR = LocalExecutor()\n", "path": "src/prefect/engine/executors/__init__.py"}], "after_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\nfrom typing import Any, Callable, Iterable\n\nfrom prefect.engine.executors.base import Executor\nfrom prefect.utilities.executors import dict_to_list\n\n\nclass LocalExecutor(Executor):\n \"\"\"\n An executor that runs all functions synchronously and immediately in\n the local thread. To be used mainly for debugging purposes.\n \"\"\"\n\n def map(\n self, fn: Callable, *args: Any, upstream_states=None, **kwargs: Any\n ) -> Iterable[Any]:\n\n states = dict_to_list(upstream_states)\n results = []\n for elem in states:\n results.append(self.submit(fn, *args, upstream_states=elem, **kwargs))\n\n return results\n\n def submit(self, fn, *args, **kwargs):\n \"\"\"\n Submit a function to the executor for execution. Returns the result of the computation.\n\n Args:\n - fn (Callable): function which is being submitted for execution\n - *args (Any): arguments to be passed to `fn`\n - **kwargs (Any): keyword arguments to be passed to `fn`\n\n Returns:\n - Any: the result of `fn(*args, **kwargs)`\n \"\"\"\n return fn(*args, **kwargs)\n\n def wait(self, futures, timeout=None):\n \"\"\"\n Returns:\n - Any: whatever `futures` were provided\n \"\"\"\n return futures\n", "path": "src/prefect/engine/executors/local.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\n\"\"\"\nPrefect Executors implement the logic for how Tasks are run. The standard interface\nfor an Executor consists of the following methods:\n\n- `submit(fn, *args, **kwargs)`: submit `fn(*args, **kwargs)` for execution;\n note that this function is (in general) non-blocking, meaning that `executor.submit(...)`\n will _immediately_ return a future-like object regardless of whether `fn(*args, **kwargs)`\n has completed running\n- `submit_with_context(fn, *args, context, **kwargs)`: submit `fn(*args,\n **kwargs)` for execution with the provided `prefect.context`\n- `wait(object)`: resolves any objects returned by `executor.submit` to\n their values; this function _will_ block until execution of `object` is complete\n- `map(fn, *args, upstream_states, **kwargs)`: submit function to be mapped\n over based on the edge information contained in `upstream_states`. Any \"mapped\" Edge\n will be converted into multiple function submissions, one for each value of the upstream mapped tasks.\n\nCurrently, the available executor options are:\n\n- `LocalExecutor`: the no frills, straightforward executor - great for simple\n debugging; tasks are executed immediately upon being called by `executor.submit()`.\n- `SynchronousExecutor`: an executor that runs on `dask` primitives with the\n synchronous dask scheduler; currently the default executor\n- `DaskExecutor`: the most feature-rich of the executors, this executor runs\n on `dask.distributed` and has support for multiprocessing, multithreading, and distributed execution.\n\nWhich executor you choose depends on whether you intend to use things like parallelism\nof task execution.\n\"\"\"\nimport sys\n\nfrom warnings import warn as _warn\nfrom importlib import import_module as _import_module\n\nimport prefect as _prefect\nfrom prefect.engine.executors.base import Executor\nfrom prefect.engine.executors.local import LocalExecutor\nfrom prefect.engine.executors.sync import SynchronousExecutor\n\nif sys.version_info >= (3, 5):\n from prefect.engine.executors.dask import DaskExecutor\n\ntry:\n cfg_exec = _prefect.config.engine.executor\n *module, cls_name = cfg_exec.split(\".\")\n module = _import_module(\".\".join(module))\n DEFAULT_EXECUTOR = getattr(module, cls_name)()\nexcept:\n _warn(\n \"Could not import {}, using prefect.engine.executors.LocalExecutor instead.\".format(\n _prefect.config.engine.executor\n )\n )\n DEFAULT_EXECUTOR = LocalExecutor()\n", "path": "src/prefect/engine/executors/__init__.py"}]} | 1,280 | 432 |
gh_patches_debug_17691 | rasdani/github-patches | git_diff | docker__docker-py-867 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No documentation for network api
The following have missing documentation ([readthedocs](http://docker-py.readthedocs.org/)).
- [x] `Client.networks`
- [x] `Client.create_network`
- [x] `Client.remove_network`
- [x] `Client.inspect_network`
- [x] `Client.connect_container_to_network`
- [x] `Client.disconnect_container_from_network`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/volume.py`
Content:
```
1 from .. import utils
2
3
4 class VolumeApiMixin(object):
5 @utils.minimum_version('1.21')
6 def volumes(self, filters=None):
7 params = {
8 'filters': utils.convert_filters(filters) if filters else None
9 }
10 url = self._url('/volumes')
11 return self._result(self._get(url, params=params), True)
12
13 @utils.minimum_version('1.21')
14 def create_volume(self, name, driver=None, driver_opts=None):
15 url = self._url('/volumes/create')
16 if driver_opts is not None and not isinstance(driver_opts, dict):
17 raise TypeError('driver_opts must be a dictionary')
18
19 data = {
20 'Name': name,
21 'Driver': driver,
22 'DriverOpts': driver_opts,
23 }
24 return self._result(self._post_json(url, data=data), True)
25
26 @utils.minimum_version('1.21')
27 def inspect_volume(self, name):
28 url = self._url('/volumes/{0}', name)
29 return self._result(self._get(url), True)
30
31 @utils.minimum_version('1.21')
32 def remove_volume(self, name):
33 url = self._url('/volumes/{0}', name)
34 resp = self._delete(url)
35 self._raise_for_status(resp)
36 return True
37
```
Path: `docker/api/network.py`
Content:
```
1 import json
2
3 from ..utils import check_resource, minimum_version
4
5
6 class NetworkApiMixin(object):
7 @minimum_version('1.21')
8 def networks(self, names=None, ids=None):
9 filters = {}
10 if names:
11 filters['name'] = names
12 if ids:
13 filters['id'] = ids
14
15 params = {'filters': json.dumps(filters)}
16
17 url = self._url("/networks")
18 res = self._get(url, params=params)
19 return self._result(res, json=True)
20
21 @minimum_version('1.21')
22 def create_network(self, name, driver=None):
23 data = {
24 'name': name,
25 'driver': driver,
26 }
27 url = self._url("/networks/create")
28 res = self._post_json(url, data=data)
29 return self._result(res, json=True)
30
31 @minimum_version('1.21')
32 def remove_network(self, net_id):
33 url = self._url("/networks/{0}", net_id)
34 res = self._delete(url)
35 self._raise_for_status(res)
36
37 @minimum_version('1.21')
38 def inspect_network(self, net_id):
39 url = self._url("/networks/{0}", net_id)
40 res = self._get(url)
41 return self._result(res, json=True)
42
43 @check_resource
44 @minimum_version('1.21')
45 def connect_container_to_network(self, container, net_id):
46 data = {"container": container}
47 url = self._url("/networks/{0}/connect", net_id)
48 self._post_json(url, data=data)
49
50 @check_resource
51 @minimum_version('1.21')
52 def disconnect_container_from_network(self, container, net_id):
53 data = {"container": container}
54 url = self._url("/networks/{0}/disconnect", net_id)
55 self._post_json(url, data=data)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/network.py b/docker/api/network.py
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -19,10 +19,14 @@
return self._result(res, json=True)
@minimum_version('1.21')
- def create_network(self, name, driver=None):
+ def create_network(self, name, driver=None, options=None):
+ if options is not None and not isinstance(options, dict):
+ raise TypeError('options must be a dictionary')
+
data = {
'name': name,
'driver': driver,
+ 'options': options
}
url = self._url("/networks/create")
res = self._post_json(url, data=data)
diff --git a/docker/api/volume.py b/docker/api/volume.py
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -33,4 +33,3 @@
url = self._url('/volumes/{0}', name)
resp = self._delete(url)
self._raise_for_status(resp)
- return True
| {"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -19,10 +19,14 @@\n return self._result(res, json=True)\n \n @minimum_version('1.21')\n- def create_network(self, name, driver=None):\n+ def create_network(self, name, driver=None, options=None):\n+ if options is not None and not isinstance(options, dict):\n+ raise TypeError('options must be a dictionary')\n+\n data = {\n 'name': name,\n 'driver': driver,\n+ 'options': options\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\ndiff --git a/docker/api/volume.py b/docker/api/volume.py\n--- a/docker/api/volume.py\n+++ b/docker/api/volume.py\n@@ -33,4 +33,3 @@\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n- return True\n", "issue": "No documentation for network api\nThe following have missing documentation ([readthedocs](http://docker-py.readthedocs.org/)).\n- [x] `Client.networks`\n- [x] `Client.create_network`\n- [x] `Client.remove_network`\n- [x] `Client.inspect_network`\n- [x] `Client.connect_container_to_network`\n- [x] `Client.disconnect_container_from_network`\n\n", "before_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n return True\n", "path": "docker/api/volume.py"}, {"content": "import json\n\nfrom ..utils import check_resource, minimum_version\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None):\n data = {\n 'name': name,\n 'driver': driver,\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/connect\", net_id)\n self._post_json(url, data=data)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n self._post_json(url, data=data)\n", "path": "docker/api/network.py"}], "after_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n", "path": "docker/api/volume.py"}, {"content": "import json\n\nfrom ..utils import check_resource, minimum_version\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'name': name,\n 'driver': driver,\n 'options': options\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/connect\", net_id)\n self._post_json(url, data=data)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n self._post_json(url, data=data)\n", "path": "docker/api/network.py"}]} | 1,231 | 243 |
gh_patches_debug_4081 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8054 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pin click
resolves #8048
### Description
Pin main to `click>=8.1.1,<8.1.4`
### Checklist
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR
- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 7, 2):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.7.2 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.3.4"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.main:main"],
47 },
48 install_requires=[
49 "Jinja2==3.1.2",
50 "agate>=1.6,<1.6.4",
51 "click>=7.0,<9",
52 "colorama>=0.3.9,<0.4.6",
53 "hologram>=0.0.14,<=0.0.15",
54 "isodate>=0.6,<0.7",
55 "logbook>=1.5,<1.6",
56 "mashumaro[msgpack]==3.0.4",
57 "minimal-snowplow-tracker==0.0.2",
58 "networkx>=2.3,<2.8.1;python_version<'3.8'",
59 "networkx>=2.3,<3;python_version>='3.8'",
60 "packaging>=20.9,<22.0",
61 "sqlparse>=0.2.3,<0.4.4",
62 "dbt-extractor~=0.4.1",
63 "typing-extensions>=3.7.4",
64 "werkzeug>=1,<3",
65 "pathspec~=0.9.0",
66 "pytz>=2015.7",
67 # the following are all to match snowflake-connector-python
68 "requests<3.0.0",
69 "idna>=2.5,<4",
70 "cffi>=1.9,<2.0.0",
71 "pyyaml>=6.0",
72 ],
73 zip_safe=False,
74 classifiers=[
75 "Development Status :: 5 - Production/Stable",
76 "License :: OSI Approved :: Apache Software License",
77 "Operating System :: Microsoft :: Windows",
78 "Operating System :: MacOS :: MacOS X",
79 "Operating System :: POSIX :: Linux",
80 "Programming Language :: Python :: 3.7",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 ],
85 python_requires=">=3.7.2",
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -48,7 +48,8 @@
install_requires=[
"Jinja2==3.1.2",
"agate>=1.6,<1.6.4",
- "click>=7.0,<9",
+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558
+ "click>=7.0,<8.1.4",
"colorama>=0.3.9,<0.4.6",
"hologram>=0.0.14,<=0.0.15",
"isodate>=0.6,<0.7",
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -48,7 +48,8 @@\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n- \"click>=7.0,<9\",\n+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558\n+ \"click>=7.0,<8.1.4\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n", "issue": "pin click\nresolves #8048 \r\n\r\n### Description\r\n\r\nPin main to `click>=8.1.1,<8.1.4`\r\n\r\n### Checklist\r\n\r\n- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me\r\n- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)\r\n- [ ] I have run this code in development and it appears to resolve the stated issue\r\n- [ ] This PR includes tests, or tests are not required/relevant for this PR\r\n- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR\r\n- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.3.4\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.main:main\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.0.4\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>=20.9,<22.0\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec~=0.9.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.3.4\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.main:main\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558\n \"click>=7.0,<8.1.4\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.0.4\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>=20.9,<22.0\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec~=0.9.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]} | 1,438 | 171 |
gh_patches_debug_1005 | rasdani/github-patches | git_diff | Pycord-Development__pycord-1218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mypy can't type check pycord when namespace_packages are enabled
### Summary
Mypy errors when using pycord with namespace_packages flag enabled
### Reproduction Steps
Run mypy against a simple pycord setup.
An example set up is as follows:
```
my-repo/
├─ my_bot/
│ ├─ bot.py
.mypy.ini
```
Run mypy via: `mypy my_bot/`
Mypy config:
```ini
[mypy]
namespace_packages = True
ignore_missing_imports = True
```
### Minimal Reproducible Code
```python
`from discord import ApplicationCommand` in bot.py
```
### Expected Results
Type checking works as expected with `namespace_packages` enabled
### Actual Results
Type checking errors with:
```sh
virtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: "discord.commands.__init__" and "discord.commands"
Found 1 error in 1 file (errors prevented further checking)
```
### Intents
N/A
### System Information
```yaml
- Python v3.9.5-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b3
- aiohttp v3.8.1
- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64
```
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
Mypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:
```sh
app/bot.py:1: error: Module "discord" has no attribute "ApplicationCommand"; maybe "ApplicationCommandMixin"?
```
This issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.
Mypy can't type check pycord when namespace_packages are enabled
### Summary
Mypy errors when using pycord with namespace_packages flag enabled
### Reproduction Steps
Run mypy against a simple pycord setup.
An example set up is as follows:
```
my-repo/
├─ my_bot/
│ ├─ bot.py
.mypy.ini
```
Run mypy via: `mypy my_bot/`
Mypy config:
```ini
[mypy]
namespace_packages = True
ignore_missing_imports = True
```
### Minimal Reproducible Code
```python
`from discord import ApplicationCommand` in bot.py
```
### Expected Results
Type checking works as expected with `namespace_packages` enabled
### Actual Results
Type checking errors with:
```sh
virtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: "discord.commands.__init__" and "discord.commands"
Found 1 error in 1 file (errors prevented further checking)
```
### Intents
N/A
### System Information
```yaml
- Python v3.9.5-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b3
- aiohttp v3.8.1
- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64
```
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
Mypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:
```sh
app/bot.py:1: error: Module "discord" has no attribute "ApplicationCommand"; maybe "ApplicationCommandMixin"?
```
This issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/__init__.py`
Content:
```
1 """
2 Discord API Wrapper
3 ~~~~~~~~~~~~~~~~~~~
4
5 A basic wrapper for the Discord API.
6
7 :copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development
8 :license: MIT, see LICENSE for more details.
9
10 """
11
12 __title__ = "pycord"
13 __author__ = "Pycord Development"
14 __license__ = "MIT"
15 __copyright__ = "Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development"
16 __version__ = "2.0.0b5"
17
18 __path__ = __import__("pkgutil").extend_path(__path__, __name__)
19
20 import logging
21 from typing import Literal, NamedTuple
22
23 from . import abc, opus, sinks, ui, utils
24 from .activity import *
25 from .appinfo import *
26 from .asset import *
27 from .audit_logs import *
28 from .bot import *
29 from .channel import *
30 from .client import *
31 from .cog import Cog
32 from .colour import *
33 from .commands.__init__ import *
34 from .components import *
35 from .embeds import *
36 from .emoji import *
37 from .enums import *
38 from .errors import *
39 from .file import *
40 from .flags import *
41 from .guild import *
42 from .http import *
43 from .integrations import *
44 from .interactions import *
45 from .invite import *
46 from .member import *
47 from .mentions import *
48 from .message import *
49 from .object import *
50 from .partial_emoji import *
51 from .permissions import *
52 from .player import *
53 from .raw_models import *
54 from .reaction import *
55 from .role import *
56 from .scheduled_events import *
57 from .shard import *
58 from .stage_instance import *
59 from .sticker import *
60 from .team import *
61 from .template import *
62 from .threads import *
63 from .user import *
64 from .voice_client import *
65 from .webhook import *
66 from .welcome_screen import *
67 from .widget import *
68
69
70 class VersionInfo(NamedTuple):
71 major: int
72 minor: int
73 micro: int
74 releaselevel: Literal["alpha", "beta", "candidate", "final"]
75 serial: int
76
77
78 version_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel="beta", serial=5)
79
80 logging.getLogger(__name__).addHandler(logging.NullHandler())
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/__init__.py b/discord/__init__.py
--- a/discord/__init__.py
+++ b/discord/__init__.py
@@ -30,7 +30,7 @@
from .client import *
from .cog import Cog
from .colour import *
-from .commands.__init__ import *
+from .commands import *
from .components import *
from .embeds import *
from .emoji import *
| {"golden_diff": "diff --git a/discord/__init__.py b/discord/__init__.py\n--- a/discord/__init__.py\n+++ b/discord/__init__.py\n@@ -30,7 +30,7 @@\n from .client import *\n from .cog import Cog\n from .colour import *\n-from .commands.__init__ import *\n+from .commands import *\n from .components import *\n from .embeds import *\n from .emoji import *\n", "issue": "Mypy can't type check pycord when namespace_packages are enabled\n### Summary\r\n\r\nMypy errors when using pycord with namespace_packages flag enabled\r\n\r\n### Reproduction Steps\r\n\r\nRun mypy against a simple pycord setup.\r\n\r\nAn example set up is as follows:\r\n\r\n```\r\nmy-repo/\r\n\u251c\u2500 my_bot/\r\n\u2502 \u251c\u2500 bot.py\r\n.mypy.ini\r\n```\r\n\r\nRun mypy via: `mypy my_bot/`\r\n\r\nMypy config:\r\n```ini\r\n[mypy]\r\nnamespace_packages = True\r\nignore_missing_imports = True\r\n```\r\n\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\n`from discord import ApplicationCommand` in bot.py\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nType checking works as expected with `namespace_packages` enabled\r\n\r\n### Actual Results\r\n\r\nType checking errors with:\r\n```sh\r\nvirtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: \"discord.commands.__init__\" and \"discord.commands\"\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### Intents\r\n\r\nN/A\r\n\r\n### System Information\r\n\r\n```yaml\r\n- Python v3.9.5-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b3\r\n- aiohttp v3.8.1\r\n- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64\r\n```\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nMypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:\r\n```sh\r\napp/bot.py:1: error: Module \"discord\" has no attribute \"ApplicationCommand\"; maybe \"ApplicationCommandMixin\"?\r\n```\r\n\r\nThis issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.\nMypy can't type check pycord when namespace_packages are enabled\n### Summary\r\n\r\nMypy errors when using pycord with namespace_packages flag enabled\r\n\r\n### Reproduction Steps\r\n\r\nRun mypy against a simple pycord setup.\r\n\r\nAn example set up is as follows:\r\n\r\n```\r\nmy-repo/\r\n\u251c\u2500 my_bot/\r\n\u2502 \u251c\u2500 bot.py\r\n.mypy.ini\r\n```\r\n\r\nRun mypy via: `mypy my_bot/`\r\n\r\nMypy config:\r\n```ini\r\n[mypy]\r\nnamespace_packages = True\r\nignore_missing_imports = True\r\n```\r\n\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\n`from discord import ApplicationCommand` in bot.py\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nType checking works as expected with `namespace_packages` enabled\r\n\r\n### Actual Results\r\n\r\nType checking errors with:\r\n```sh\r\nvirtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: \"discord.commands.__init__\" and \"discord.commands\"\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### Intents\r\n\r\nN/A\r\n\r\n### System Information\r\n\r\n```yaml\r\n- Python v3.9.5-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b3\r\n- aiohttp v3.8.1\r\n- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64\r\n```\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nMypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:\r\n```sh\r\napp/bot.py:1: error: Module \"discord\" has no attribute \"ApplicationCommand\"; maybe \"ApplicationCommandMixin\"?\r\n```\r\n\r\nThis issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.\n", "before_files": [{"content": "\"\"\"\nDiscord API Wrapper\n~~~~~~~~~~~~~~~~~~~\n\nA basic wrapper for the Discord API.\n\n:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = \"pycord\"\n__author__ = \"Pycord Development\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development\"\n__version__ = \"2.0.0b5\"\n\n__path__ = __import__(\"pkgutil\").extend_path(__path__, __name__)\n\nimport logging\nfrom typing import Literal, NamedTuple\n\nfrom . import abc, opus, sinks, ui, utils\nfrom .activity import *\nfrom .appinfo import *\nfrom .asset import *\nfrom .audit_logs import *\nfrom .bot import *\nfrom .channel import *\nfrom .client import *\nfrom .cog import Cog\nfrom .colour import *\nfrom .commands.__init__ import *\nfrom .components import *\nfrom .embeds import *\nfrom .emoji import *\nfrom .enums import *\nfrom .errors import *\nfrom .file import *\nfrom .flags import *\nfrom .guild import *\nfrom .http import *\nfrom .integrations import *\nfrom .interactions import *\nfrom .invite import *\nfrom .member import *\nfrom .mentions import *\nfrom .message import *\nfrom .object import *\nfrom .partial_emoji import *\nfrom .permissions import *\nfrom .player import *\nfrom .raw_models import *\nfrom .reaction import *\nfrom .role import *\nfrom .scheduled_events import *\nfrom .shard import *\nfrom .stage_instance import *\nfrom .sticker import *\nfrom .team import *\nfrom .template import *\nfrom .threads import *\nfrom .user import *\nfrom .voice_client import *\nfrom .webhook import *\nfrom .welcome_screen import *\nfrom .widget import *\n\n\nclass VersionInfo(NamedTuple):\n major: int\n minor: int\n micro: int\n releaselevel: Literal[\"alpha\", \"beta\", \"candidate\", \"final\"]\n serial: int\n\n\nversion_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel=\"beta\", serial=5)\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "discord/__init__.py"}], "after_files": [{"content": "\"\"\"\nDiscord API Wrapper\n~~~~~~~~~~~~~~~~~~~\n\nA basic wrapper for the Discord API.\n\n:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = \"pycord\"\n__author__ = \"Pycord Development\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development\"\n__version__ = \"2.0.0b5\"\n\n__path__ = __import__(\"pkgutil\").extend_path(__path__, __name__)\n\nimport logging\nfrom typing import Literal, NamedTuple\n\nfrom . import abc, opus, sinks, ui, utils\nfrom .activity import *\nfrom .appinfo import *\nfrom .asset import *\nfrom .audit_logs import *\nfrom .bot import *\nfrom .channel import *\nfrom .client import *\nfrom .cog import Cog\nfrom .colour import *\nfrom .commands import *\nfrom .components import *\nfrom .embeds import *\nfrom .emoji import *\nfrom .enums import *\nfrom .errors import *\nfrom .file import *\nfrom .flags import *\nfrom .guild import *\nfrom .http import *\nfrom .integrations import *\nfrom .interactions import *\nfrom .invite import *\nfrom .member import *\nfrom .mentions import *\nfrom .message import *\nfrom .object import *\nfrom .partial_emoji import *\nfrom .permissions import *\nfrom .player import *\nfrom .raw_models import *\nfrom .reaction import *\nfrom .role import *\nfrom .scheduled_events import *\nfrom .shard import *\nfrom .stage_instance import *\nfrom .sticker import *\nfrom .team import *\nfrom .template import *\nfrom .threads import *\nfrom .user import *\nfrom .voice_client import *\nfrom .webhook import *\nfrom .welcome_screen import *\nfrom .widget import *\n\n\nclass VersionInfo(NamedTuple):\n major: int\n minor: int\n micro: int\n releaselevel: Literal[\"alpha\", \"beta\", \"candidate\", \"final\"]\n serial: int\n\n\nversion_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel=\"beta\", serial=5)\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "discord/__init__.py"}]} | 2,000 | 96 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.