problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_9723
|
rasdani/github-patches
|
git_diff
|
dask__dask-3157
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LZ4_compress and LZ4_uncompress removed
Since commit python-lz4/python-lz4@d62fdc50c0e183d7260961f09d4e0701fbdf0c5c LZ4_compress and LZ4_decompress have been removed (they've been deprecated for a while). With the version of python-lz4 released on pypi, it means we can't use lz4 compression with dask, and worse importing dask.bytes.compression errors out.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dask/bytes/compression.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import bz2
4 import sys
5 import zlib
6
7 from toolz import identity
8
9 from ..compatibility import gzip_compress, gzip_decompress, GzipFile
10 from ..utils import ignoring
11
12
13 def noop_file(file, **kwargs):
14 return file
15
16
17 compress = {'gzip': gzip_compress,
18 'zlib': zlib.compress,
19 'bz2': bz2.compress,
20 None: identity}
21 decompress = {'gzip': gzip_decompress,
22 'zlib': zlib.decompress,
23 'bz2': bz2.decompress,
24 None: identity}
25 files = {'gzip': lambda f, **kwargs: GzipFile(fileobj=f, **kwargs),
26 None: noop_file}
27 seekable_files = {None: noop_file}
28
29
30 with ignoring(ImportError):
31 import snappy
32 compress['snappy'] = snappy.compress
33 decompress['snappy'] = snappy.decompress
34
35
36 with ignoring(ImportError):
37 import lz4
38 compress['lz4'] = lz4.LZ4_compress
39 decompress['lz4'] = lz4.LZ4_uncompress
40
41 with ignoring(ImportError):
42 from ..compatibility import LZMAFile, lzma_compress, lzma_decompress
43 compress['xz'] = lzma_compress
44 decompress['xz'] = lzma_decompress
45 files['xz'] = LZMAFile
46
47 # Seekable xz files actually tend to scan whole file - see `get_xz_blocks`
48 # with ignoring(ImportError):
49 # import lzma
50 # seekable_files['xz'] = lzma.LZMAFile
51 #
52 # with ignoring(ImportError):
53 # import lzmaffi
54 # seekable_files['xz'] = lzmaffi.LZMAFile
55
56
57 if sys.version_info[0] >= 3:
58 import bz2
59 files['bz2'] = bz2.BZ2File
60
61
62 def get_xz_blocks(fp):
63 from lzmaffi import (STREAM_HEADER_SIZE, decode_stream_footer,
64 decode_index, LZMAError)
65 fp.seek(0, 2)
66
67 def _peek(f, size):
68 data = f.read(size)
69 f.seek(-size, 1)
70 return data
71
72 if fp.tell() < 2 * STREAM_HEADER_SIZE:
73 raise LZMAError("file too small")
74
75 # read stream paddings (4 bytes each)
76 fp.seek(-4, 1)
77 padding = 0
78 while _peek(fp, 4) == b'\x00\x00\x00\x00':
79 fp.seek(-4, 1)
80 padding += 4
81
82 fp.seek(-STREAM_HEADER_SIZE + 4, 1)
83
84 stream_flags = decode_stream_footer(_peek(fp, STREAM_HEADER_SIZE))
85 fp.seek(-stream_flags.backward_size, 1)
86
87 index = decode_index(_peek(fp, stream_flags.backward_size), padding)
88 return {'offsets': [b.compressed_file_offset for i, b in index],
89 'lengths': [b.unpadded_size for i, b in index],
90 'check': stream_flags.check}
91
92
93 def xz_decompress(data, check):
94 from lzmaffi import decode_block_header_size, LZMADecompressor, FORMAT_BLOCK
95 hsize = decode_block_header_size(data[:1])
96 header = data[:hsize]
97 dc = LZMADecompressor(format=FORMAT_BLOCK, header=header,
98 unpadded_size=len(data), check=check)
99 return dc.decompress(data[len(header):])
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dask/bytes/compression.py b/dask/bytes/compression.py
--- a/dask/bytes/compression.py
+++ b/dask/bytes/compression.py
@@ -33,10 +33,17 @@
decompress['snappy'] = snappy.decompress
-with ignoring(ImportError):
- import lz4
- compress['lz4'] = lz4.LZ4_compress
- decompress['lz4'] = lz4.LZ4_uncompress
+try:
+ import lz4.block
+ compress['lz4'] = lz4.block.compress
+ compress['lz4'] = lz4.block.decompress
+except ImportError:
+ try:
+ import lz4
+ compress['lz4'] = lz4.LZ4_compress
+ compress['lz4'] = lz4.LZ4_uncompress
+ except ImportError:
+ pass
with ignoring(ImportError):
from ..compatibility import LZMAFile, lzma_compress, lzma_decompress
|
{"golden_diff": "diff --git a/dask/bytes/compression.py b/dask/bytes/compression.py\n--- a/dask/bytes/compression.py\n+++ b/dask/bytes/compression.py\n@@ -33,10 +33,17 @@\n decompress['snappy'] = snappy.decompress\n \n \n-with ignoring(ImportError):\n- import lz4\n- compress['lz4'] = lz4.LZ4_compress\n- decompress['lz4'] = lz4.LZ4_uncompress\n+try:\n+ import lz4.block\n+ compress['lz4'] = lz4.block.compress\n+ compress['lz4'] = lz4.block.decompress\n+except ImportError:\n+ try:\n+ import lz4\n+ compress['lz4'] = lz4.LZ4_compress\n+ compress['lz4'] = lz4.LZ4_uncompress\n+ except ImportError:\n+ pass\n \n with ignoring(ImportError):\n from ..compatibility import LZMAFile, lzma_compress, lzma_decompress\n", "issue": "LZ4_compress and LZ4_uncompress removed\nSince commit python-lz4/python-lz4@d62fdc50c0e183d7260961f09d4e0701fbdf0c5c LZ4_compress and LZ4_decompress have been removed (they've been deprecated for a while). With the version of python-lz4 released on pypi, it means we can't use lz4 compression with dask, and worse importing dask.bytes.compression errors out.\r\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport bz2\nimport sys\nimport zlib\n\nfrom toolz import identity\n\nfrom ..compatibility import gzip_compress, gzip_decompress, GzipFile\nfrom ..utils import ignoring\n\n\ndef noop_file(file, **kwargs):\n return file\n\n\ncompress = {'gzip': gzip_compress,\n 'zlib': zlib.compress,\n 'bz2': bz2.compress,\n None: identity}\ndecompress = {'gzip': gzip_decompress,\n 'zlib': zlib.decompress,\n 'bz2': bz2.decompress,\n None: identity}\nfiles = {'gzip': lambda f, **kwargs: GzipFile(fileobj=f, **kwargs),\n None: noop_file}\nseekable_files = {None: noop_file}\n\n\nwith ignoring(ImportError):\n import snappy\n compress['snappy'] = snappy.compress\n decompress['snappy'] = snappy.decompress\n\n\nwith ignoring(ImportError):\n import lz4\n compress['lz4'] = lz4.LZ4_compress\n decompress['lz4'] = lz4.LZ4_uncompress\n\nwith ignoring(ImportError):\n from ..compatibility import LZMAFile, lzma_compress, lzma_decompress\n compress['xz'] = lzma_compress\n decompress['xz'] = lzma_decompress\n files['xz'] = LZMAFile\n\n# Seekable xz files actually tend to scan whole file - see `get_xz_blocks`\n# with ignoring(ImportError):\n# import lzma\n# seekable_files['xz'] = lzma.LZMAFile\n#\n# with ignoring(ImportError):\n# import lzmaffi\n# seekable_files['xz'] = lzmaffi.LZMAFile\n\n\nif sys.version_info[0] >= 3:\n import bz2\n files['bz2'] = bz2.BZ2File\n\n\ndef get_xz_blocks(fp):\n from lzmaffi import (STREAM_HEADER_SIZE, decode_stream_footer,\n decode_index, LZMAError)\n fp.seek(0, 2)\n\n def _peek(f, size):\n data = f.read(size)\n f.seek(-size, 1)\n return data\n\n if fp.tell() < 2 * STREAM_HEADER_SIZE:\n raise LZMAError(\"file too small\")\n\n # read stream paddings (4 bytes each)\n fp.seek(-4, 1)\n padding = 0\n while _peek(fp, 4) == b'\\x00\\x00\\x00\\x00':\n fp.seek(-4, 1)\n padding += 4\n\n fp.seek(-STREAM_HEADER_SIZE + 4, 1)\n\n stream_flags = decode_stream_footer(_peek(fp, STREAM_HEADER_SIZE))\n fp.seek(-stream_flags.backward_size, 1)\n\n index = decode_index(_peek(fp, stream_flags.backward_size), padding)\n return {'offsets': [b.compressed_file_offset for i, b in index],\n 'lengths': [b.unpadded_size for i, b in index],\n 'check': stream_flags.check}\n\n\ndef xz_decompress(data, check):\n from lzmaffi import decode_block_header_size, LZMADecompressor, FORMAT_BLOCK\n hsize = decode_block_header_size(data[:1])\n header = data[:hsize]\n dc = LZMADecompressor(format=FORMAT_BLOCK, header=header,\n unpadded_size=len(data), check=check)\n return dc.decompress(data[len(header):])\n", "path": "dask/bytes/compression.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport bz2\nimport sys\nimport zlib\n\nfrom toolz import identity\n\nfrom ..compatibility import gzip_compress, gzip_decompress, GzipFile\nfrom ..utils import ignoring\n\n\ndef noop_file(file, **kwargs):\n return file\n\n\ncompress = {'gzip': gzip_compress,\n 'zlib': zlib.compress,\n 'bz2': bz2.compress,\n None: identity}\ndecompress = {'gzip': gzip_decompress,\n 'zlib': zlib.decompress,\n 'bz2': bz2.decompress,\n None: identity}\nfiles = {'gzip': lambda f, **kwargs: GzipFile(fileobj=f, **kwargs),\n None: noop_file}\nseekable_files = {None: noop_file}\n\n\nwith ignoring(ImportError):\n import snappy\n compress['snappy'] = snappy.compress\n decompress['snappy'] = snappy.decompress\n\n\ntry:\n import lz4.block\n compress['lz4'] = lz4.block.compress\n compress['lz4'] = lz4.block.decompress\nexcept ImportError:\n try:\n import lz4\n compress['lz4'] = lz4.LZ4_compress\n compress['lz4'] = lz4.LZ4_uncompress\n except ImportError:\n pass\n\nwith ignoring(ImportError):\n from ..compatibility import LZMAFile, lzma_compress, lzma_decompress\n compress['xz'] = lzma_compress\n decompress['xz'] = lzma_decompress\n files['xz'] = LZMAFile\n\n# Seekable xz files actually tend to scan whole file - see `get_xz_blocks`\n# with ignoring(ImportError):\n# import lzma\n# seekable_files['xz'] = lzma.LZMAFile\n#\n# with ignoring(ImportError):\n# import lzmaffi\n# seekable_files['xz'] = lzmaffi.LZMAFile\n\n\nif sys.version_info[0] >= 3:\n import bz2\n files['bz2'] = bz2.BZ2File\n\n\ndef get_xz_blocks(fp):\n from lzmaffi import (STREAM_HEADER_SIZE, decode_stream_footer,\n decode_index, LZMAError)\n fp.seek(0, 2)\n\n def _peek(f, size):\n data = f.read(size)\n f.seek(-size, 1)\n return data\n\n if fp.tell() < 2 * STREAM_HEADER_SIZE:\n raise LZMAError(\"file too small\")\n\n # read stream paddings (4 bytes each)\n fp.seek(-4, 1)\n padding = 0\n while _peek(fp, 4) == b'\\x00\\x00\\x00\\x00':\n fp.seek(-4, 1)\n padding += 4\n\n fp.seek(-STREAM_HEADER_SIZE + 4, 1)\n\n stream_flags = decode_stream_footer(_peek(fp, STREAM_HEADER_SIZE))\n fp.seek(-stream_flags.backward_size, 1)\n\n index = decode_index(_peek(fp, stream_flags.backward_size), padding)\n return {'offsets': [b.compressed_file_offset for i, b in index],\n 'lengths': [b.unpadded_size for i, b in index],\n 'check': stream_flags.check}\n\n\ndef xz_decompress(data, check):\n from lzmaffi import decode_block_header_size, LZMADecompressor, FORMAT_BLOCK\n hsize = decode_block_header_size(data[:1])\n header = data[:hsize]\n dc = LZMADecompressor(format=FORMAT_BLOCK, header=header,\n unpadded_size=len(data), check=check)\n return dc.decompress(data[len(header):])\n", "path": "dask/bytes/compression.py"}]}
| 1,347 | 228 |
gh_patches_debug_12311
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-4269
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Discussion] Drop support for Node versions that passed endoflife
## Proposal
Drop support for Node < 18 from the next Pyodide major release (0.25.0)
## Background
The endoflife of [Node.js 14 and 16 passed a few days ago](https://nodejs.dev/en/about/releases/). We just finished a major release, so I think this is a good time to talk about Node version support.

Here are the things that are currently associated with Node.js versions in Pyodide.
- The documentation mentions [how to run Pyodide in Node.js < 0.18](https://pyodide.org/en/stable/usage/index.html#node-js-versions-0-17)
- We have some compat codes for older Node versions
- https://github.com/pyodide/pyodide/pull/4100#discussion_r1308449699
-[compat.ts](https://github.com/pyodide/pyodide/blob/e2c2884e7a9afd90b23849220ee578bd2d523458/src/js/compat.ts#L40)
- JSPI requires Node >= 20
- The Docker image used for Pyodide CI uses Node 20
### Benefits from dropping Node < 18 support
- No more `node-fetch`.
- No more `--experimental-wasm-bigint` flags
- Better `MessageChannel` support (https://github.com/pyodide/pyodide/issues/4006)
- Some useful methods like [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
### Drawbacks
- People who were using Node.js < 18 with Pyodide won't like it.
## Migration plan
- Mention minimal Node.js version support in docs
- Remove compat codes for old Node.js versions
- pytest-pyodide?
WDYT?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide-build/pyodide_build/create_xbuildenv.py`
Content:
```
1 import shutil
2 import subprocess
3 from pathlib import Path
4
5 from .build_env import (
6 get_build_flag,
7 get_pyodide_root,
8 get_unisolated_packages,
9 )
10 from .common import exit_with_stdio
11 from .logger import logger
12 from .recipe import load_all_recipes
13
14
15 def _copy_xbuild_files(
16 pyodide_root: Path, xbuildenv_path: Path, skip_missing_files: bool = False
17 ) -> None:
18 site_packages = Path(get_build_flag("HOSTSITEPACKAGES"))
19 # Store package cross-build-files into site_packages_extras in the same tree
20 # structure as they would appear in the real package.
21 # In install_xbuildenv, we will use:
22 # pip install -t $HOSTSITEPACKAGES -r requirements.txt
23 # cp site-packages-extras $HOSTSITEPACKAGES
24 site_packages_extras = xbuildenv_path / "site-packages-extras"
25 recipes = load_all_recipes(pyodide_root / "packages")
26 for recipe in recipes.values():
27 xbuild_files = recipe.build.cross_build_files
28 for path in xbuild_files:
29 source = site_packages / path
30 target = site_packages_extras / path
31 target.parent.mkdir(parents=True, exist_ok=True)
32
33 if not source.exists():
34 if skip_missing_files:
35 logger.warning(f"Cross-build file '{path}' not found")
36 continue
37
38 raise FileNotFoundError(f"Cross-build file '{path}' not found")
39
40 shutil.copy(source, target)
41
42
43 def _copy_wasm_libs(
44 pyodide_root: Path, xbuildenv_root: Path, skip_missing_files: bool = False
45 ) -> None:
46 def get_relative_path(pyodide_root: Path, flag: str) -> Path:
47 return Path(get_build_flag(flag)).relative_to(pyodide_root)
48
49 pythoninclude = get_relative_path(pyodide_root, "PYTHONINCLUDE")
50 wasm_lib_dir = get_relative_path(pyodide_root, "WASM_LIBRARY_DIR")
51 sysconfig_dir = get_relative_path(pyodide_root, "SYSCONFIGDATA_DIR")
52 to_copy: list[Path] = [
53 pythoninclude,
54 sysconfig_dir,
55 Path("Makefile.envs"),
56 wasm_lib_dir / "cmake",
57 Path("dist/pyodide-lock.json"),
58 Path("dist/python"),
59 Path("dist/python_stdlib.zip"),
60 Path("tools/constraints.txt"),
61 ]
62 to_copy.extend(
63 x.relative_to(pyodide_root) for x in (pyodide_root / "dist").glob("pyodide.*")
64 )
65 # Some ad-hoc stuff here to moderate size. We'd like to include all of
66 # wasm_lib_dir but there's 180mb of it. Better to leave out all the video
67 # codecs and stuff.
68 for pkg in ["ssl", "libcrypto", "zlib", "xml", "mpfr", "lapack", "blas", "f2c"]:
69 to_copy.extend(
70 x.relative_to(pyodide_root)
71 for x in (pyodide_root / wasm_lib_dir / "include").glob(f"**/*{pkg}*")
72 if "boost" not in str(x)
73 )
74 to_copy.extend(
75 x.relative_to(pyodide_root)
76 for x in (pyodide_root / wasm_lib_dir / "lib").glob(f"**/*{pkg}*")
77 )
78
79 for path in to_copy:
80 if not (pyodide_root / path).exists():
81 if skip_missing_files:
82 logger.warning(f"Cross-build file '{path}' not found")
83 continue
84
85 raise FileNotFoundError(f"Cross-build file '{path}' not found")
86
87 if (pyodide_root / path).is_dir():
88 shutil.copytree(
89 pyodide_root / path, xbuildenv_root / path, dirs_exist_ok=True
90 )
91 else:
92 (xbuildenv_root / path).parent.mkdir(exist_ok=True, parents=True)
93 shutil.copy(pyodide_root / path, xbuildenv_root / path)
94
95
96 def create(
97 path: str | Path,
98 pyodide_root: Path | None = None,
99 *,
100 skip_missing_files: bool = False,
101 ) -> None:
102 if pyodide_root is None:
103 pyodide_root = get_pyodide_root()
104
105 xbuildenv_path = Path(path) / "xbuildenv"
106 xbuildenv_root = xbuildenv_path / "pyodide-root"
107
108 shutil.rmtree(xbuildenv_path, ignore_errors=True)
109 xbuildenv_path.mkdir(parents=True, exist_ok=True)
110 xbuildenv_root.mkdir()
111
112 _copy_xbuild_files(pyodide_root, xbuildenv_path, skip_missing_files)
113 _copy_wasm_libs(pyodide_root, xbuildenv_root, skip_missing_files)
114
115 (xbuildenv_root / "package.json").write_text("{}")
116 res = subprocess.run(
117 ["npm", "i", "node-fetch@2"],
118 cwd=xbuildenv_root,
119 capture_output=True,
120 encoding="utf8",
121 )
122 if res.returncode != 0:
123 logger.error("Failed to install node-fetch:")
124 exit_with_stdio(res)
125
126 res = subprocess.run(
127 ["pip", "freeze", "--path", get_build_flag("HOSTSITEPACKAGES")],
128 capture_output=True,
129 encoding="utf8",
130 )
131 if res.returncode != 0:
132 logger.error("Failed to run pip freeze:")
133 exit_with_stdio(res)
134
135 (xbuildenv_path / "requirements.txt").write_text(res.stdout)
136 (xbuildenv_root / "unisolated.txt").write_text("\n".join(get_unisolated_packages()))
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyodide-build/pyodide_build/create_xbuildenv.py b/pyodide-build/pyodide_build/create_xbuildenv.py
--- a/pyodide-build/pyodide_build/create_xbuildenv.py
+++ b/pyodide-build/pyodide_build/create_xbuildenv.py
@@ -113,16 +113,6 @@
_copy_wasm_libs(pyodide_root, xbuildenv_root, skip_missing_files)
(xbuildenv_root / "package.json").write_text("{}")
- res = subprocess.run(
- ["npm", "i", "node-fetch@2"],
- cwd=xbuildenv_root,
- capture_output=True,
- encoding="utf8",
- )
- if res.returncode != 0:
- logger.error("Failed to install node-fetch:")
- exit_with_stdio(res)
-
res = subprocess.run(
["pip", "freeze", "--path", get_build_flag("HOSTSITEPACKAGES")],
capture_output=True,
|
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/create_xbuildenv.py b/pyodide-build/pyodide_build/create_xbuildenv.py\n--- a/pyodide-build/pyodide_build/create_xbuildenv.py\n+++ b/pyodide-build/pyodide_build/create_xbuildenv.py\n@@ -113,16 +113,6 @@\n _copy_wasm_libs(pyodide_root, xbuildenv_root, skip_missing_files)\n \n (xbuildenv_root / \"package.json\").write_text(\"{}\")\n- res = subprocess.run(\n- [\"npm\", \"i\", \"node-fetch@2\"],\n- cwd=xbuildenv_root,\n- capture_output=True,\n- encoding=\"utf8\",\n- )\n- if res.returncode != 0:\n- logger.error(\"Failed to install node-fetch:\")\n- exit_with_stdio(res)\n-\n res = subprocess.run(\n [\"pip\", \"freeze\", \"--path\", get_build_flag(\"HOSTSITEPACKAGES\")],\n capture_output=True,\n", "issue": "[Discussion] Drop support for Node versions that passed endoflife\n## Proposal\r\n\r\nDrop support for Node < 18 from the next Pyodide major release (0.25.0)\r\n\r\n## Background\r\n\r\nThe endoflife of [Node.js 14 and 16 passed a few days ago](https://nodejs.dev/en/about/releases/). We just finished a major release, so I think this is a good time to talk about Node version support.\r\n\r\n\r\n\r\nHere are the things that are currently associated with Node.js versions in Pyodide.\r\n\r\n- The documentation mentions [how to run Pyodide in Node.js < 0.18](https://pyodide.org/en/stable/usage/index.html#node-js-versions-0-17)\r\n- We have some compat codes for older Node versions\r\n - https://github.com/pyodide/pyodide/pull/4100#discussion_r1308449699\r\n -[compat.ts](https://github.com/pyodide/pyodide/blob/e2c2884e7a9afd90b23849220ee578bd2d523458/src/js/compat.ts#L40)\r\n- JSPI requires Node >= 20\r\n- The Docker image used for Pyodide CI uses Node 20\r\n\r\n### Benefits from dropping Node < 18 support\r\n\r\n- No more `node-fetch`.\r\n- No more `--experimental-wasm-bigint` flags\r\n- Better `MessageChannel` support (https://github.com/pyodide/pyodide/issues/4006)\r\n- Some useful methods like [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)\r\n\r\n### Drawbacks\r\n\r\n- People who were using Node.js < 18 with Pyodide won't like it.\r\n\r\n## Migration plan\r\n\r\n- Mention minimal Node.js version support in docs\r\n- Remove compat codes for old Node.js versions\r\n - pytest-pyodide?\r\n\r\nWDYT?\r\n\r\n\n", "before_files": [{"content": "import shutil\nimport subprocess\nfrom pathlib import Path\n\nfrom .build_env import (\n get_build_flag,\n get_pyodide_root,\n get_unisolated_packages,\n)\nfrom .common import exit_with_stdio\nfrom .logger import logger\nfrom .recipe import load_all_recipes\n\n\ndef _copy_xbuild_files(\n pyodide_root: Path, xbuildenv_path: Path, skip_missing_files: bool = False\n) -> None:\n site_packages = Path(get_build_flag(\"HOSTSITEPACKAGES\"))\n # Store package cross-build-files into site_packages_extras in the same tree\n # structure as they would appear in the real package.\n # In install_xbuildenv, we will use:\n # pip install -t $HOSTSITEPACKAGES -r requirements.txt\n # cp site-packages-extras $HOSTSITEPACKAGES\n site_packages_extras = xbuildenv_path / \"site-packages-extras\"\n recipes = load_all_recipes(pyodide_root / \"packages\")\n for recipe in recipes.values():\n xbuild_files = recipe.build.cross_build_files\n for path in xbuild_files:\n source = site_packages / path\n target = site_packages_extras / path\n target.parent.mkdir(parents=True, exist_ok=True)\n\n if not source.exists():\n if skip_missing_files:\n logger.warning(f\"Cross-build file '{path}' not found\")\n continue\n\n raise FileNotFoundError(f\"Cross-build file '{path}' not found\")\n\n shutil.copy(source, target)\n\n\ndef _copy_wasm_libs(\n pyodide_root: Path, xbuildenv_root: Path, skip_missing_files: bool = False\n) -> None:\n def get_relative_path(pyodide_root: Path, flag: str) -> Path:\n return Path(get_build_flag(flag)).relative_to(pyodide_root)\n\n pythoninclude = get_relative_path(pyodide_root, \"PYTHONINCLUDE\")\n wasm_lib_dir = get_relative_path(pyodide_root, \"WASM_LIBRARY_DIR\")\n sysconfig_dir = get_relative_path(pyodide_root, \"SYSCONFIGDATA_DIR\")\n to_copy: list[Path] = [\n pythoninclude,\n sysconfig_dir,\n Path(\"Makefile.envs\"),\n wasm_lib_dir / \"cmake\",\n Path(\"dist/pyodide-lock.json\"),\n Path(\"dist/python\"),\n Path(\"dist/python_stdlib.zip\"),\n Path(\"tools/constraints.txt\"),\n ]\n to_copy.extend(\n x.relative_to(pyodide_root) for x in (pyodide_root / \"dist\").glob(\"pyodide.*\")\n )\n # Some ad-hoc stuff here to moderate size. We'd like to include all of\n # wasm_lib_dir but there's 180mb of it. Better to leave out all the video\n # codecs and stuff.\n for pkg in [\"ssl\", \"libcrypto\", \"zlib\", \"xml\", \"mpfr\", \"lapack\", \"blas\", \"f2c\"]:\n to_copy.extend(\n x.relative_to(pyodide_root)\n for x in (pyodide_root / wasm_lib_dir / \"include\").glob(f\"**/*{pkg}*\")\n if \"boost\" not in str(x)\n )\n to_copy.extend(\n x.relative_to(pyodide_root)\n for x in (pyodide_root / wasm_lib_dir / \"lib\").glob(f\"**/*{pkg}*\")\n )\n\n for path in to_copy:\n if not (pyodide_root / path).exists():\n if skip_missing_files:\n logger.warning(f\"Cross-build file '{path}' not found\")\n continue\n\n raise FileNotFoundError(f\"Cross-build file '{path}' not found\")\n\n if (pyodide_root / path).is_dir():\n shutil.copytree(\n pyodide_root / path, xbuildenv_root / path, dirs_exist_ok=True\n )\n else:\n (xbuildenv_root / path).parent.mkdir(exist_ok=True, parents=True)\n shutil.copy(pyodide_root / path, xbuildenv_root / path)\n\n\ndef create(\n path: str | Path,\n pyodide_root: Path | None = None,\n *,\n skip_missing_files: bool = False,\n) -> None:\n if pyodide_root is None:\n pyodide_root = get_pyodide_root()\n\n xbuildenv_path = Path(path) / \"xbuildenv\"\n xbuildenv_root = xbuildenv_path / \"pyodide-root\"\n\n shutil.rmtree(xbuildenv_path, ignore_errors=True)\n xbuildenv_path.mkdir(parents=True, exist_ok=True)\n xbuildenv_root.mkdir()\n\n _copy_xbuild_files(pyodide_root, xbuildenv_path, skip_missing_files)\n _copy_wasm_libs(pyodide_root, xbuildenv_root, skip_missing_files)\n\n (xbuildenv_root / \"package.json\").write_text(\"{}\")\n res = subprocess.run(\n [\"npm\", \"i\", \"node-fetch@2\"],\n cwd=xbuildenv_root,\n capture_output=True,\n encoding=\"utf8\",\n )\n if res.returncode != 0:\n logger.error(\"Failed to install node-fetch:\")\n exit_with_stdio(res)\n\n res = subprocess.run(\n [\"pip\", \"freeze\", \"--path\", get_build_flag(\"HOSTSITEPACKAGES\")],\n capture_output=True,\n encoding=\"utf8\",\n )\n if res.returncode != 0:\n logger.error(\"Failed to run pip freeze:\")\n exit_with_stdio(res)\n\n (xbuildenv_path / \"requirements.txt\").write_text(res.stdout)\n (xbuildenv_root / \"unisolated.txt\").write_text(\"\\n\".join(get_unisolated_packages()))\n", "path": "pyodide-build/pyodide_build/create_xbuildenv.py"}], "after_files": [{"content": "import shutil\nimport subprocess\nfrom pathlib import Path\n\nfrom .build_env import (\n get_build_flag,\n get_pyodide_root,\n get_unisolated_packages,\n)\nfrom .common import exit_with_stdio\nfrom .logger import logger\nfrom .recipe import load_all_recipes\n\n\ndef _copy_xbuild_files(\n pyodide_root: Path, xbuildenv_path: Path, skip_missing_files: bool = False\n) -> None:\n site_packages = Path(get_build_flag(\"HOSTSITEPACKAGES\"))\n # Store package cross-build-files into site_packages_extras in the same tree\n # structure as they would appear in the real package.\n # In install_xbuildenv, we will use:\n # pip install -t $HOSTSITEPACKAGES -r requirements.txt\n # cp site-packages-extras $HOSTSITEPACKAGES\n site_packages_extras = xbuildenv_path / \"site-packages-extras\"\n recipes = load_all_recipes(pyodide_root / \"packages\")\n for recipe in recipes.values():\n xbuild_files = recipe.build.cross_build_files\n for path in xbuild_files:\n source = site_packages / path\n target = site_packages_extras / path\n target.parent.mkdir(parents=True, exist_ok=True)\n\n if not source.exists():\n if skip_missing_files:\n logger.warning(f\"Cross-build file '{path}' not found\")\n continue\n\n raise FileNotFoundError(f\"Cross-build file '{path}' not found\")\n\n shutil.copy(source, target)\n\n\ndef _copy_wasm_libs(\n pyodide_root: Path, xbuildenv_root: Path, skip_missing_files: bool = False\n) -> None:\n def get_relative_path(pyodide_root: Path, flag: str) -> Path:\n return Path(get_build_flag(flag)).relative_to(pyodide_root)\n\n pythoninclude = get_relative_path(pyodide_root, \"PYTHONINCLUDE\")\n wasm_lib_dir = get_relative_path(pyodide_root, \"WASM_LIBRARY_DIR\")\n sysconfig_dir = get_relative_path(pyodide_root, \"SYSCONFIGDATA_DIR\")\n to_copy: list[Path] = [\n pythoninclude,\n sysconfig_dir,\n Path(\"Makefile.envs\"),\n wasm_lib_dir / \"cmake\",\n Path(\"dist/pyodide-lock.json\"),\n Path(\"dist/python\"),\n Path(\"dist/python_stdlib.zip\"),\n Path(\"tools/constraints.txt\"),\n ]\n to_copy.extend(\n x.relative_to(pyodide_root) for x in (pyodide_root / \"dist\").glob(\"pyodide.*\")\n )\n # Some ad-hoc stuff here to moderate size. We'd like to include all of\n # wasm_lib_dir but there's 180mb of it. Better to leave out all the video\n # codecs and stuff.\n for pkg in [\"ssl\", \"libcrypto\", \"zlib\", \"xml\", \"mpfr\", \"lapack\", \"blas\", \"f2c\"]:\n to_copy.extend(\n x.relative_to(pyodide_root)\n for x in (pyodide_root / wasm_lib_dir / \"include\").glob(f\"**/*{pkg}*\")\n if \"boost\" not in str(x)\n )\n to_copy.extend(\n x.relative_to(pyodide_root)\n for x in (pyodide_root / wasm_lib_dir / \"lib\").glob(f\"**/*{pkg}*\")\n )\n\n for path in to_copy:\n if not (pyodide_root / path).exists():\n if skip_missing_files:\n logger.warning(f\"Cross-build file '{path}' not found\")\n continue\n\n raise FileNotFoundError(f\"Cross-build file '{path}' not found\")\n\n if (pyodide_root / path).is_dir():\n shutil.copytree(\n pyodide_root / path, xbuildenv_root / path, dirs_exist_ok=True\n )\n else:\n (xbuildenv_root / path).parent.mkdir(exist_ok=True, parents=True)\n shutil.copy(pyodide_root / path, xbuildenv_root / path)\n\n\ndef create(\n path: str | Path,\n pyodide_root: Path | None = None,\n *,\n skip_missing_files: bool = False,\n) -> None:\n if pyodide_root is None:\n pyodide_root = get_pyodide_root()\n\n xbuildenv_path = Path(path) / \"xbuildenv\"\n xbuildenv_root = xbuildenv_path / \"pyodide-root\"\n\n shutil.rmtree(xbuildenv_path, ignore_errors=True)\n xbuildenv_path.mkdir(parents=True, exist_ok=True)\n xbuildenv_root.mkdir()\n\n _copy_xbuild_files(pyodide_root, xbuildenv_path, skip_missing_files)\n _copy_wasm_libs(pyodide_root, xbuildenv_root, skip_missing_files)\n\n (xbuildenv_root / \"package.json\").write_text(\"{}\")\n res = subprocess.run(\n [\"pip\", \"freeze\", \"--path\", get_build_flag(\"HOSTSITEPACKAGES\")],\n capture_output=True,\n encoding=\"utf8\",\n )\n if res.returncode != 0:\n logger.error(\"Failed to run pip freeze:\")\n exit_with_stdio(res)\n\n (xbuildenv_path / \"requirements.txt\").write_text(res.stdout)\n (xbuildenv_root / \"unisolated.txt\").write_text(\"\\n\".join(get_unisolated_packages()))\n", "path": "pyodide-build/pyodide_build/create_xbuildenv.py"}]}
| 2,294 | 221 |
gh_patches_debug_513
|
rasdani/github-patches
|
git_diff
|
weni-ai__bothub-engine-150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Relative STATIC_URL in production broken email images
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/settings.py`
Content:
```
1 import os
2 import dj_database_url
3
4 from decouple import config
5
6
7 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
8 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
10
11 # SECURITY WARNING: keep the secret key used in production secret!
12 SECRET_KEY = config('SECRET_KEY')
13
14 # SECURITY WARNING: don't run with debug turned on in production!
15 DEBUG = config('DEBUG', default=False, cast=bool)
16
17 ALLOWED_HOSTS = config(
18 'ALLOWED_HOSTS',
19 default='*',
20 cast=lambda v: [s.strip() for s in v.split(',')])
21
22
23 # Application definition
24
25 INSTALLED_APPS = [
26 'django.contrib.admin',
27 'django.contrib.auth',
28 'django.contrib.contenttypes',
29 'django.contrib.sessions',
30 'django.contrib.messages',
31 'django.contrib.staticfiles',
32 'rest_framework',
33 'rest_framework.authtoken',
34 'django_filters',
35 'corsheaders',
36 'bothub.authentication',
37 'bothub.common',
38 'bothub.api',
39 ]
40
41 MIDDLEWARE = [
42 'django.middleware.security.SecurityMiddleware',
43 'whitenoise.middleware.WhiteNoiseMiddleware',
44 'django.contrib.sessions.middleware.SessionMiddleware',
45 'corsheaders.middleware.CorsMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.messages.middleware.MessageMiddleware',
50 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 ]
52
53 ROOT_URLCONF = 'bothub.urls'
54
55 TEMPLATES = [
56 {
57 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 'DIRS': [],
59 'APP_DIRS': True,
60 'OPTIONS': {
61 'context_processors': [
62 'django.template.context_processors.debug',
63 'django.template.context_processors.request',
64 'django.contrib.auth.context_processors.auth',
65 'django.contrib.messages.context_processors.messages',
66 ],
67 },
68 },
69 ]
70
71 WSGI_APPLICATION = 'bothub.wsgi.application'
72
73
74 # Database
75
76 DATABASES = {}
77 DATABASES['default'] = dj_database_url.parse(
78 config(
79 'DEFAULT_DATABASE',
80 default='sqlite:///db.sqlite3'))
81
82
83 # Auth
84
85 AUTH_USER_MODEL = 'authentication.User'
86
87
88 # Password validation
89
90 AUTH_PASSWORD_VALIDATORS = [
91 {
92 'NAME': 'django.contrib.auth.password_validation.' +
93 'UserAttributeSimilarityValidator',
94 },
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'MinimumLengthValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'NumericPasswordValidator',
106 },
107 ]
108
109
110 # Internationalization
111
112 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
113
114 TIME_ZONE = config('TIME_ZONE', default='UTC')
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122
123 # Static files (CSS, JavaScript, Images)
124
125 STATIC_URL = '/static/'
126
127 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
128
129 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
130
131
132 # rest framework
133
134 REST_FRAMEWORK = {
135 'DEFAULT_AUTHENTICATION_CLASSES': [
136 'rest_framework.authentication.TokenAuthentication',
137 ],
138 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
139 'LimitOffsetPagination',
140 'PAGE_SIZE': 20,
141 'DEFAULT_FILTER_BACKENDS': [
142 'django_filters.rest_framework.DjangoFilterBackend',
143 ],
144 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
145 }
146
147
148 # cors headers
149
150 CORS_ORIGIN_ALLOW_ALL = True
151 CORS_URLS_REGEX = r'^/api/.*$'
152
153
154 # mail
155
156 envvar_EMAIL_HOST = config('EMAIL_HOST', default=None)
157
158 ADMINS = config(
159 'ADMINS',
160 default='',
161 cast=lambda v: [
162 (
163 s.strip().split('|')[0],
164 s.strip().split('|')[1],
165 ) for s in v.split(',')] if v else [])
166 EMAIL_SUBJECT_PREFIX = '[bothub] '
167 DEFAULT_FROM_EMAIL = config(
168 'DEFAULT_FROM_EMAIL',
169 default='webmaster@localhost')
170 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
171
172 if envvar_EMAIL_HOST:
173 EMAIL_HOST = envvar_EMAIL_HOST
174 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
175 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
176 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
177 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
178 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
179 else:
180 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
181
182
183 # webapp
184
185 BOTHUB_WEBAPP_BASE_URL = config(
186 'BOTHUB_WEBAPP_BASE_URL',
187 default='http://localhost:8080/')
188
189
190 # NLP
191
192 BOTHUB_NLP_BASE_URL = config(
193 'BOTHUB_NLP_BASE_URL',
194 default='http://localhost:8001/')
195
196
197 # CSRF
198
199 CSRF_COOKIE_DOMAIN = config(
200 'CSRF_COOKIE_DOMAIN',
201 default=None)
202
203 CSRF_COOKIE_SECURE = config(
204 'CSRF_COOKIE_SECURE',
205 default=False,
206 cast=bool)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -122,7 +122,7 @@
# Static files (CSS, JavaScript, Images)
-STATIC_URL = '/static/'
+STATIC_URL = config('STATIC_URL', default='/static/')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
|
{"golden_diff": "diff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -122,7 +122,7 @@\n \n # Static files (CSS, JavaScript, Images)\n \n-STATIC_URL = '/static/'\n+STATIC_URL = config('STATIC_URL', default='/static/')\n \n STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n", "issue": "Relative STATIC_URL in production broken email images\n\n", "before_files": [{"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}], "after_files": [{"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}]}
| 1,941 | 92 |
gh_patches_debug_9911
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-3978
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typos in wait_gate.py::wait
https://github.com/quantumlib/Cirq/blob/150f95c31042669ab9905654998a8432844a4209/cirq/ops/wait_gate.py#L140-L143
They all say picoseconds, but should say picos, nanos, micros, millis.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/ops/wait_gate.py`
Content:
```
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union
15
16 import sympy
17
18 from cirq import value, protocols
19 from cirq.ops import raw_types
20
21 if TYPE_CHECKING:
22 import cirq
23
24
25 @value.value_equality
26 class WaitGate(raw_types.Gate):
27 """A single-qubit idle gate that represents waiting.
28
29 In non-noisy simulators, this gate is just an identity gate. But noisy
30 simulators and noise models may insert more error for longer waits.
31 """
32
33 def __init__(
34 self,
35 duration: 'cirq.DURATION_LIKE',
36 num_qubits: Optional[int] = None,
37 qid_shape: Tuple[int, ...] = None,
38 ) -> None:
39 """Initialize a wait gate with the given duration.
40
41 Args:
42 duration: A constant or parameterized wait duration. This can be
43 an instance of `datetime.timedelta` or `cirq.Duration`.
44 """
45 self.duration = value.Duration(duration)
46 if not protocols.is_parameterized(self.duration) and self.duration < 0:
47 raise ValueError('duration < 0')
48 if qid_shape is None:
49 if num_qubits is None:
50 # Assume one qubit for backwards compatibility
51 qid_shape = (2,)
52 else:
53 qid_shape = (2,) * num_qubits
54 if num_qubits is None:
55 num_qubits = len(qid_shape)
56 if not qid_shape:
57 raise ValueError('Waiting on an empty set of qubits.')
58 if num_qubits != len(qid_shape):
59 raise ValueError('len(qid_shape) != num_qubits')
60 self._qid_shape = qid_shape
61
62 def _is_parameterized_(self) -> bool:
63 return protocols.is_parameterized(self.duration)
64
65 def _parameter_names_(self) -> AbstractSet[str]:
66 return protocols.parameter_names(self.duration)
67
68 def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':
69 return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))
70
71 def _qid_shape_(self) -> Tuple[int, ...]:
72 return self._qid_shape
73
74 def _has_unitary_(self) -> bool:
75 return True
76
77 def _apply_unitary_(self, args):
78 return args.target_tensor # Identity.
79
80 def _decompose_(self, qubits):
81 return []
82
83 def _trace_distance_bound_(self):
84 return 0
85
86 def __pow__(self, power):
87 if power == 1 or power == -1:
88 # The inverse of a wait is still a wait.
89 return self
90 # Other scalar exponents could scale the wait... but ultimately it is
91 # ambiguous whether the user wanted to scale the duration or just wanted
92 # to affect the unitary. Play it safe and fail.
93 return NotImplemented
94
95 def __str__(self) -> str:
96 return f'WaitGate({self.duration})'
97
98 def __repr__(self) -> str:
99 return f'cirq.WaitGate({repr(self.duration)})'
100
101 def _json_dict_(self) -> Dict[str, Any]:
102 d = protocols.obj_to_dict_helper(self, ['duration'])
103 if len(self._qid_shape) != 1:
104 d['num_qubits'] = len(self._qid_shape)
105 if any(d != 2 for d in self._qid_shape):
106 d['qid_shape'] = self._qid_shape
107 return d
108
109 @classmethod
110 def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):
111 return cls(
112 duration=duration,
113 num_qubits=num_qubits,
114 qid_shape=None if qid_shape is None else tuple(qid_shape),
115 )
116
117 def _value_equality_values_(self) -> Any:
118 return self.duration
119
120 def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):
121 return 'WAIT\n'
122
123
124 def wait(
125 *target: 'cirq.Qid',
126 duration: 'cirq.DURATION_LIKE' = None,
127 picos: Union[int, float, sympy.Basic] = 0,
128 nanos: Union[int, float, sympy.Basic] = 0,
129 micros: Union[int, float, sympy.Basic] = 0,
130 millis: Union[int, float, sympy.Basic] = 0,
131 ) -> raw_types.Operation:
132 """Creates a WaitGate applied to all the given qubits.
133
134 The duration can be specified as a DURATION_LIKE or using keyword args with
135 numbers in the appropriate units. See Duration for details.
136
137 Args:
138 *target: The qubits that should wait.
139 value: Wait duration (see Duration).
140 picos: Picoseconds to wait (see Duration).
141 nanos: Picoseconds to wait (see Duration).
142 micros: Picoseconds to wait (see Duration).
143 millis: Picoseconds to wait (see Duration).
144 """
145 return WaitGate(
146 duration=value.Duration(
147 duration,
148 picos=picos,
149 nanos=nanos,
150 micros=micros,
151 millis=millis,
152 ),
153 qid_shape=protocols.qid_shape(target),
154 ).on(*target)
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/ops/wait_gate.py b/cirq/ops/wait_gate.py
--- a/cirq/ops/wait_gate.py
+++ b/cirq/ops/wait_gate.py
@@ -138,9 +138,9 @@
*target: The qubits that should wait.
value: Wait duration (see Duration).
picos: Picoseconds to wait (see Duration).
- nanos: Picoseconds to wait (see Duration).
- micros: Picoseconds to wait (see Duration).
- millis: Picoseconds to wait (see Duration).
+ nanos: Nanoseconds to wait (see Duration).
+ micros: Microseconds to wait (see Duration).
+ millis: Milliseconds to wait (see Duration).
"""
return WaitGate(
duration=value.Duration(
|
{"golden_diff": "diff --git a/cirq/ops/wait_gate.py b/cirq/ops/wait_gate.py\n--- a/cirq/ops/wait_gate.py\n+++ b/cirq/ops/wait_gate.py\n@@ -138,9 +138,9 @@\n *target: The qubits that should wait.\n value: Wait duration (see Duration).\n picos: Picoseconds to wait (see Duration).\n- nanos: Picoseconds to wait (see Duration).\n- micros: Picoseconds to wait (see Duration).\n- millis: Picoseconds to wait (see Duration).\n+ nanos: Nanoseconds to wait (see Duration).\n+ micros: Microseconds to wait (see Duration).\n+ millis: Milliseconds to wait (see Duration).\n \"\"\"\n return WaitGate(\n duration=value.Duration(\n", "issue": "Typos in wait_gate.py::wait\nhttps://github.com/quantumlib/Cirq/blob/150f95c31042669ab9905654998a8432844a4209/cirq/ops/wait_gate.py#L140-L143\r\n\r\nThey all say picoseconds, but should say picos, nanos, micros, millis.\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union\n\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass WaitGate(raw_types.Gate):\n \"\"\"A single-qubit idle gate that represents waiting.\n\n In non-noisy simulators, this gate is just an identity gate. But noisy\n simulators and noise models may insert more error for longer waits.\n \"\"\"\n\n def __init__(\n self,\n duration: 'cirq.DURATION_LIKE',\n num_qubits: Optional[int] = None,\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Initialize a wait gate with the given duration.\n\n Args:\n duration: A constant or parameterized wait duration. This can be\n an instance of `datetime.timedelta` or `cirq.Duration`.\n \"\"\"\n self.duration = value.Duration(duration)\n if not protocols.is_parameterized(self.duration) and self.duration < 0:\n raise ValueError('duration < 0')\n if qid_shape is None:\n if num_qubits is None:\n # Assume one qubit for backwards compatibility\n qid_shape = (2,)\n else:\n qid_shape = (2,) * num_qubits\n if num_qubits is None:\n num_qubits = len(qid_shape)\n if not qid_shape:\n raise ValueError('Waiting on an empty set of qubits.')\n if num_qubits != len(qid_shape):\n raise ValueError('len(qid_shape) != num_qubits')\n self._qid_shape = qid_shape\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.duration)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.duration)\n\n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _apply_unitary_(self, args):\n return args.target_tensor # Identity.\n\n def _decompose_(self, qubits):\n return []\n\n def _trace_distance_bound_(self):\n return 0\n\n def __pow__(self, power):\n if power == 1 or power == -1:\n # The inverse of a wait is still a wait.\n return self\n # Other scalar exponents could scale the wait... but ultimately it is\n # ambiguous whether the user wanted to scale the duration or just wanted\n # to affect the unitary. Play it safe and fail.\n return NotImplemented\n\n def __str__(self) -> str:\n return f'WaitGate({self.duration})'\n\n def __repr__(self) -> str:\n return f'cirq.WaitGate({repr(self.duration)})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n d = protocols.obj_to_dict_helper(self, ['duration'])\n if len(self._qid_shape) != 1:\n d['num_qubits'] = len(self._qid_shape)\n if any(d != 2 for d in self._qid_shape):\n d['qid_shape'] = self._qid_shape\n return d\n\n @classmethod\n def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):\n return cls(\n duration=duration,\n num_qubits=num_qubits,\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _value_equality_values_(self) -> Any:\n return self.duration\n\n def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):\n return 'WAIT\\n'\n\n\ndef wait(\n *target: 'cirq.Qid',\n duration: 'cirq.DURATION_LIKE' = None,\n picos: Union[int, float, sympy.Basic] = 0,\n nanos: Union[int, float, sympy.Basic] = 0,\n micros: Union[int, float, sympy.Basic] = 0,\n millis: Union[int, float, sympy.Basic] = 0,\n) -> raw_types.Operation:\n \"\"\"Creates a WaitGate applied to all the given qubits.\n\n The duration can be specified as a DURATION_LIKE or using keyword args with\n numbers in the appropriate units. See Duration for details.\n\n Args:\n *target: The qubits that should wait.\n value: Wait duration (see Duration).\n picos: Picoseconds to wait (see Duration).\n nanos: Picoseconds to wait (see Duration).\n micros: Picoseconds to wait (see Duration).\n millis: Picoseconds to wait (see Duration).\n \"\"\"\n return WaitGate(\n duration=value.Duration(\n duration,\n picos=picos,\n nanos=nanos,\n micros=micros,\n millis=millis,\n ),\n qid_shape=protocols.qid_shape(target),\n ).on(*target)\n", "path": "cirq/ops/wait_gate.py"}], "after_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union\n\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass WaitGate(raw_types.Gate):\n \"\"\"A single-qubit idle gate that represents waiting.\n\n In non-noisy simulators, this gate is just an identity gate. But noisy\n simulators and noise models may insert more error for longer waits.\n \"\"\"\n\n def __init__(\n self,\n duration: 'cirq.DURATION_LIKE',\n num_qubits: Optional[int] = None,\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Initialize a wait gate with the given duration.\n\n Args:\n duration: A constant or parameterized wait duration. This can be\n an instance of `datetime.timedelta` or `cirq.Duration`.\n \"\"\"\n self.duration = value.Duration(duration)\n if not protocols.is_parameterized(self.duration) and self.duration < 0:\n raise ValueError('duration < 0')\n if qid_shape is None:\n if num_qubits is None:\n # Assume one qubit for backwards compatibility\n qid_shape = (2,)\n else:\n qid_shape = (2,) * num_qubits\n if num_qubits is None:\n num_qubits = len(qid_shape)\n if not qid_shape:\n raise ValueError('Waiting on an empty set of qubits.')\n if num_qubits != len(qid_shape):\n raise ValueError('len(qid_shape) != num_qubits')\n self._qid_shape = qid_shape\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.duration)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.duration)\n\n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _apply_unitary_(self, args):\n return args.target_tensor # Identity.\n\n def _decompose_(self, qubits):\n return []\n\n def _trace_distance_bound_(self):\n return 0\n\n def __pow__(self, power):\n if power == 1 or power == -1:\n # The inverse of a wait is still a wait.\n return self\n # Other scalar exponents could scale the wait... but ultimately it is\n # ambiguous whether the user wanted to scale the duration or just wanted\n # to affect the unitary. Play it safe and fail.\n return NotImplemented\n\n def __str__(self) -> str:\n return f'WaitGate({self.duration})'\n\n def __repr__(self) -> str:\n return f'cirq.WaitGate({repr(self.duration)})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n d = protocols.obj_to_dict_helper(self, ['duration'])\n if len(self._qid_shape) != 1:\n d['num_qubits'] = len(self._qid_shape)\n if any(d != 2 for d in self._qid_shape):\n d['qid_shape'] = self._qid_shape\n return d\n\n @classmethod\n def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):\n return cls(\n duration=duration,\n num_qubits=num_qubits,\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _value_equality_values_(self) -> Any:\n return self.duration\n\n def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):\n return 'WAIT\\n'\n\n\ndef wait(\n *target: 'cirq.Qid',\n duration: 'cirq.DURATION_LIKE' = None,\n picos: Union[int, float, sympy.Basic] = 0,\n nanos: Union[int, float, sympy.Basic] = 0,\n micros: Union[int, float, sympy.Basic] = 0,\n millis: Union[int, float, sympy.Basic] = 0,\n) -> raw_types.Operation:\n \"\"\"Creates a WaitGate applied to all the given qubits.\n\n The duration can be specified as a DURATION_LIKE or using keyword args with\n numbers in the appropriate units. See Duration for details.\n\n Args:\n *target: The qubits that should wait.\n value: Wait duration (see Duration).\n picos: Picoseconds to wait (see Duration).\n nanos: Nanoseconds to wait (see Duration).\n micros: Microseconds to wait (see Duration).\n millis: Milliseconds to wait (see Duration).\n \"\"\"\n return WaitGate(\n duration=value.Duration(\n duration,\n picos=picos,\n nanos=nanos,\n micros=micros,\n millis=millis,\n ),\n qid_shape=protocols.qid_shape(target),\n ).on(*target)\n", "path": "cirq/ops/wait_gate.py"}]}
| 2,010 | 174 |
gh_patches_debug_1198
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-611
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Label Text Clipped Horizontally (Moved)
**Originally reported as a continuation of #576 by esbullington**
I think I'm having trouble with this same issue. I'm trying to use markup with a Label, and am finding that my Label text is cut-off along the horizontal axis if I have markup set to True. This probably is only occurring with the latest development version, even after the above path was pulled. The problem does not occur with Kivy 1.3.0. If needed, I can re-install the development version and make a screen shot, but for now I'm working with Kivy 1.3.0.
I've only started working with Kivy in the past few days, so I'm not yet in a place where I feel comfortable sending in a patch. (awesome framework, by the way, congrats on the great work!).
Oh, and it doesn't look like I can re-open the issue, so someone else may wish to do so, or else tell me to open another issue for the problem.
UPDATE: I coped markup.py from Kivy 1.3 to Kivy1.4-dev and it resolved this issue for me. I may now have problems with rst, but at least my markdown labels aren't cut in half.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/text/markup.py`
Content:
```
1 '''
2 Text Markup
3 ===========
4
5 .. versionadded:: 1.1.0
6
7 We provide a simple text-markup for inline text styling. The syntax look the
8 same as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.
9
10 A tag is defined as ``[tag]``, and might have a closed tag associated:
11 ``[/tag]``. Example of a markup text::
12
13 [b]Hello [color=ff0000]world[/b][/color]
14
15 The following tags are availables:
16
17 ``[b][/b]``
18 Activate bold text
19 ``[i][/i]``
20 Activate italic text
21 ``[font=<str>][/font]``
22 Change the font
23 ``[size=<integer>][/size]``
24 Change the font size
25 ``[color=#<color>][/color]``
26 Change the text color
27 ``[ref=<str>][/ref]``
28 Add an interactive zone. The reference + all the word box inside the
29 reference will be available in :data:`MarkupLabel.refs`
30 ``[anchor=<str>]``
31 Put an anchor in the text. You can get the position of your anchor within
32 the text with :data:`MarkupLabel.anchors`
33
34 If you need to escape the markup from the current text, use
35 :func:`kivy.utils.escape_markup`.
36 '''
37
38 __all__ = ('MarkupLabel', )
39
40 from kivy.graphics.texture import Texture
41 from kivy.utils import platform
42 from kivy.parser import parse_color
43 from kivy.logger import Logger
44 import re
45 from kivy.core.text import Label, LabelBase
46 from copy import copy
47
48 # We need to do this trick when documentation is generated
49 MarkupLabelBase = Label
50 if Label is None:
51 MarkupLabelBase = LabelBase
52
53
54 class MarkupLabel(MarkupLabelBase):
55 '''Markup text label.
56
57 See module documentation for more informations.
58 '''
59
60 def __init__(self, *largs, **kwargs):
61 self._style_stack = {}
62 self._refs = {}
63 super(MarkupLabel, self).__init__(*largs, **kwargs)
64
65 @property
66 def refs(self):
67 '''Get the bounding box of all the ``[ref=...]``::
68
69 { 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }
70 '''
71 return self._refs
72
73 @property
74 def anchors(self):
75 '''Get the position of all the ``[anchor=...]``::
76
77 { 'anchorA': (x, y), 'anchorB': (x, y), ... }
78 '''
79 return self._anchors
80
81 @property
82 def markup(self):
83 '''Return the text with all the markup splitted::
84
85 >>> MarkupLabel('[b]Hello world[/b]').markup
86 >>> ('[b]', 'Hello world', '[/b]')
87
88 '''
89 s = re.split('(\[.*?\])', self.label)
90 s = [x for x in s if x != '']
91 return s
92
93 def _push_style(self, k):
94 if not k in self._style_stack:
95 self._style_stack[k] = []
96 self._style_stack[k].append(self.options[k])
97
98 def _pop_style(self, k):
99 if k not in self._style_stack or len(self._style_stack[k]) == 0:
100 Logger.warning('Label: pop style stack without push')
101 return
102 v = self._style_stack[k].pop()
103 self.options[k] = v
104
105 def render(self, real=False):
106 options = copy(self.options)
107 if not real:
108 ret = self._pre_render()
109 else:
110 ret = self._real_render()
111 self.options = options
112 return ret
113
114 def _pre_render(self):
115 # split markup, words, and lines
116 # result: list of word with position and width/height
117 # during the first pass, we don't care about h/valign
118 self._lines = lines = []
119 self._refs = {}
120 self._anchors = {}
121 spush = self._push_style
122 spop = self._pop_style
123 options = self.options
124 options['_ref'] = None
125 for item in self.markup:
126 if item == '[b]':
127 spush('bold')
128 options['bold'] = True
129 self.resolve_font_name()
130 elif item == '[/b]':
131 spop('bold')
132 self.resolve_font_name()
133 elif item == '[i]':
134 spush('italic')
135 options['italic'] = True
136 self.resolve_font_name()
137 elif item == '[/i]':
138 spop('italic')
139 self.resolve_font_name()
140 elif item[:6] == '[size=':
141 try:
142 size = int(item[6:-1])
143 except ValueError:
144 size = options['font_size']
145 spush('font_size')
146 options['font_size'] = size
147 elif item == '[/size]':
148 spop('font_size')
149 elif item[:7] == '[color=':
150 color = parse_color(item[7:-1])
151 spush('color')
152 options['color'] = color
153 elif item == '[/color]':
154 spop('color')
155 elif item[:6] == '[font=':
156 fontname = item[6:-1]
157 spush('font_name')
158 options['font_name'] = fontname
159 self.resolve_font_name()
160 elif item == '[/font]':
161 spop('font_name')
162 self.resolve_font_name()
163 elif item[:5] == '[ref=':
164 ref = item[5:-1]
165 spush('_ref')
166 options['_ref'] = ref
167 elif item == '[/ref]':
168 spop('_ref')
169 elif item[:8] == '[anchor=':
170 ref = item[8:-1]
171 if len(lines):
172 x, y = lines[-1][0:2]
173 else:
174 x = y = 0
175 self._anchors[ref] = x, y
176 else:
177 item = item.replace('&bl;', '[').replace(
178 '&br;', ']').replace('&', '&')
179 self._pre_render_label(item, options, lines)
180
181 # calculate the texture size
182 w, h = self.text_size
183 if h < 0:
184 h = None
185 if w < 0:
186 w = None
187 if w is None:
188 w = max([line[0] for line in lines])
189 if h is None:
190 h = sum([line[1] for line in lines])
191 return w, h
192
193 def _pre_render_label(self, word, options, lines):
194 # precalculate id/name
195 if not self.fontid in self._cache_glyphs:
196 self._cache_glyphs[self.fontid] = {}
197 cache = self._cache_glyphs[self.fontid]
198
199 # verify that each glyph have size
200 glyphs = list(set(word))
201 get_extents = self.get_extents
202 for glyph in glyphs:
203 if not glyph in cache:
204 cache[glyph] = get_extents(glyph)
205
206 # get last line information
207 if len(lines):
208 line = lines[-1]
209 else:
210 # line-> line width, line height, words
211 # words -> (w, h, word)...
212 line = [0, 0, []]
213 lines.append(line)
214
215 # extract user limitation
216 uw, uh = self.text_size
217
218 # split the word
219 for part in re.split(r'( |\n)', word):
220
221 if part == '':
222 continue
223
224 if part == '\n':
225 # put a new line!
226 line = [0, 0, []]
227 lines.append(line)
228 continue
229
230 # get current line information
231 lw, lh = line[:2]
232
233 # calculate the size of the part
234 # (extract all extents of the part,
235 # calculate width through extents due to kerning
236 # and get the maximum height)
237 pg = [cache[g] for g in part]
238 pw = get_extents(part)[0]
239 ph = max([g[1] for g in pg])
240
241 options = copy(options)
242
243 # check if the part can be put in the line
244 if uw is None or lw + pw < uw:
245 # no limitation or part can be contained in the line
246 # then append the part to the line
247 line[2].append((pw, ph, part, options))
248 # and update the line size
249 line[0] += pw
250 line[1] = max(line[1], ph)
251 else:
252 # part can't be put in the line, do a new one...
253 line = [pw, ph, [(pw, ph, part, options)]]
254 lines.append(line)
255
256 def _real_render(self):
257 # use the lines to do the rendering !
258 self._render_begin()
259
260 r = self._render_text
261
262 # convert halign/valign to int, faster comparaison
263 av = {'top': 0, 'middle': 1, 'bottom': 2}[self.options['valign']]
264 ah = {'left': 0, 'center': 1, 'right': 2}[self.options['halign']]
265
266 y = 0
267 w, h = self._size
268 refs = self._refs
269 no_of_lines = len(self._lines)-1
270
271 for line in self._lines:
272 lh = line[1]
273 lw = line[0]
274
275 # horizontal alignement
276 if ah == 0:
277 x = 0
278 elif ah == 1:
279 x = int((w - lw) / 2)
280 else:
281 x = w - lw
282
283 # vertical alignement
284 if y == 0:
285 if av == 1:
286 y = int((h - (lh*no_of_lines))/2)
287 elif av == 2:
288 y = h - (lh*(no_of_lines))
289
290
291 for pw, ph, part, options in line[2]:
292 self.options = options
293 r(part, x, y + (lh - ph) / 1.25)
294
295 # should we record refs ?
296 ref = options['_ref']
297 if ref is not None:
298 if not ref in refs:
299 refs[ref] = []
300 refs[ref].append((x, y, x + pw, y + ph))
301
302 #print 'render', repr(part), x, y, (lh, ph), options
303 x += pw
304 y += line[1]
305
306 # get data from provider
307 data = self._render_end()
308 assert(data)
309
310 # create texture is necessary
311 texture = self.texture
312 mipmap = self.options['mipmap']
313 if texture is None:
314 if data is None:
315 if platform() in ('android', 'ios'):
316 colorfmt = 'rgba'
317 else:
318 colorfmt = 'luminance_alpha'
319 texture = Texture.create(
320 size=self.size, colorfmt=colorfmt,
321 mipmap=mipmap)
322 else:
323 texture = Texture.create_from_data(data, mipmap=mipmap)
324 texture.flip_vertical()
325 elif self.width != texture.width or self.height != texture.height:
326 if data is None:
327 texture = Texture.create(size=self.size, mipmap=mipmap)
328 else:
329 texture = Texture.create_from_data(data, mipmap=mipmap)
330 texture.flip_vertical()
331
332 # update texture
333 self.texture = texture
334 self.texture.blit_data(data)
335
336
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/core/text/markup.py b/kivy/core/text/markup.py
--- a/kivy/core/text/markup.py
+++ b/kivy/core/text/markup.py
@@ -266,7 +266,7 @@
y = 0
w, h = self._size
refs = self._refs
- no_of_lines = len(self._lines)-1
+ no_of_lines = len(self._lines)
for line in self._lines:
lh = line[1]
|
{"golden_diff": "diff --git a/kivy/core/text/markup.py b/kivy/core/text/markup.py\n--- a/kivy/core/text/markup.py\n+++ b/kivy/core/text/markup.py\n@@ -266,7 +266,7 @@\n y = 0\n w, h = self._size\n refs = self._refs\n- no_of_lines = len(self._lines)-1\n+ no_of_lines = len(self._lines)\n \n for line in self._lines:\n lh = line[1]\n", "issue": "Label Text Clipped Horizontally (Moved)\n**Originally reported as a continuation of #576 by esbullington**\n\nI think I'm having trouble with this same issue. I'm trying to use markup with a Label, and am finding that my Label text is cut-off along the horizontal axis if I have markup set to True. This probably is only occurring with the latest development version, even after the above path was pulled. The problem does not occur with Kivy 1.3.0. If needed, I can re-install the development version and make a screen shot, but for now I'm working with Kivy 1.3.0. \n\nI've only started working with Kivy in the past few days, so I'm not yet in a place where I feel comfortable sending in a patch. (awesome framework, by the way, congrats on the great work!).\n\nOh, and it doesn't look like I can re-open the issue, so someone else may wish to do so, or else tell me to open another issue for the problem.\n\nUPDATE: I coped markup.py from Kivy 1.3 to Kivy1.4-dev and it resolved this issue for me. I may now have problems with rst, but at least my markdown labels aren't cut in half.\n\n", "before_files": [{"content": "'''\nText Markup\n===========\n\n.. versionadded:: 1.1.0\n\nWe provide a simple text-markup for inline text styling. The syntax look the\nsame as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.\n\nA tag is defined as ``[tag]``, and might have a closed tag associated:\n``[/tag]``. Example of a markup text::\n\n [b]Hello [color=ff0000]world[/b][/color]\n\nThe following tags are availables:\n\n``[b][/b]``\n Activate bold text\n``[i][/i]``\n Activate italic text\n``[font=<str>][/font]``\n Change the font\n``[size=<integer>][/size]``\n Change the font size\n``[color=#<color>][/color]``\n Change the text color\n``[ref=<str>][/ref]``\n Add an interactive zone. The reference + all the word box inside the\n reference will be available in :data:`MarkupLabel.refs`\n``[anchor=<str>]``\n Put an anchor in the text. You can get the position of your anchor within\n the text with :data:`MarkupLabel.anchors`\n\nIf you need to escape the markup from the current text, use\n:func:`kivy.utils.escape_markup`.\n'''\n\n__all__ = ('MarkupLabel', )\n\nfrom kivy.graphics.texture import Texture\nfrom kivy.utils import platform\nfrom kivy.parser import parse_color\nfrom kivy.logger import Logger\nimport re\nfrom kivy.core.text import Label, LabelBase\nfrom copy import copy\n\n# We need to do this trick when documentation is generated\nMarkupLabelBase = Label\nif Label is None:\n MarkupLabelBase = LabelBase\n\n\nclass MarkupLabel(MarkupLabelBase):\n '''Markup text label.\n\n See module documentation for more informations.\n '''\n\n def __init__(self, *largs, **kwargs):\n self._style_stack = {}\n self._refs = {}\n super(MarkupLabel, self).__init__(*largs, **kwargs)\n\n @property\n def refs(self):\n '''Get the bounding box of all the ``[ref=...]``::\n\n { 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }\n '''\n return self._refs\n\n @property\n def anchors(self):\n '''Get the position of all the ``[anchor=...]``::\n\n { 'anchorA': (x, y), 'anchorB': (x, y), ... }\n '''\n return self._anchors\n\n @property\n def markup(self):\n '''Return the text with all the markup splitted::\n\n >>> MarkupLabel('[b]Hello world[/b]').markup\n >>> ('[b]', 'Hello world', '[/b]')\n\n '''\n s = re.split('(\\[.*?\\])', self.label)\n s = [x for x in s if x != '']\n return s\n\n def _push_style(self, k):\n if not k in self._style_stack:\n self._style_stack[k] = []\n self._style_stack[k].append(self.options[k])\n\n def _pop_style(self, k):\n if k not in self._style_stack or len(self._style_stack[k]) == 0:\n Logger.warning('Label: pop style stack without push')\n return\n v = self._style_stack[k].pop()\n self.options[k] = v\n\n def render(self, real=False):\n options = copy(self.options)\n if not real:\n ret = self._pre_render()\n else:\n ret = self._real_render()\n self.options = options\n return ret\n\n def _pre_render(self):\n # split markup, words, and lines\n # result: list of word with position and width/height\n # during the first pass, we don't care about h/valign\n self._lines = lines = []\n self._refs = {}\n self._anchors = {}\n spush = self._push_style\n spop = self._pop_style\n options = self.options\n options['_ref'] = None\n for item in self.markup:\n if item == '[b]':\n spush('bold')\n options['bold'] = True\n self.resolve_font_name()\n elif item == '[/b]':\n spop('bold')\n self.resolve_font_name()\n elif item == '[i]':\n spush('italic')\n options['italic'] = True\n self.resolve_font_name()\n elif item == '[/i]':\n spop('italic')\n self.resolve_font_name()\n elif item[:6] == '[size=':\n try:\n size = int(item[6:-1])\n except ValueError:\n size = options['font_size']\n spush('font_size')\n options['font_size'] = size\n elif item == '[/size]':\n spop('font_size')\n elif item[:7] == '[color=':\n color = parse_color(item[7:-1])\n spush('color')\n options['color'] = color\n elif item == '[/color]':\n spop('color')\n elif item[:6] == '[font=':\n fontname = item[6:-1]\n spush('font_name')\n options['font_name'] = fontname\n self.resolve_font_name()\n elif item == '[/font]':\n spop('font_name')\n self.resolve_font_name()\n elif item[:5] == '[ref=':\n ref = item[5:-1]\n spush('_ref')\n options['_ref'] = ref\n elif item == '[/ref]':\n spop('_ref')\n elif item[:8] == '[anchor=':\n ref = item[8:-1]\n if len(lines):\n x, y = lines[-1][0:2]\n else:\n x = y = 0\n self._anchors[ref] = x, y\n else:\n item = item.replace('&bl;', '[').replace(\n '&br;', ']').replace('&', '&')\n self._pre_render_label(item, options, lines)\n\n # calculate the texture size\n w, h = self.text_size\n if h < 0:\n h = None\n if w < 0:\n w = None\n if w is None:\n w = max([line[0] for line in lines])\n if h is None:\n h = sum([line[1] for line in lines])\n return w, h\n\n def _pre_render_label(self, word, options, lines):\n # precalculate id/name\n if not self.fontid in self._cache_glyphs:\n self._cache_glyphs[self.fontid] = {}\n cache = self._cache_glyphs[self.fontid]\n\n # verify that each glyph have size\n glyphs = list(set(word))\n get_extents = self.get_extents\n for glyph in glyphs:\n if not glyph in cache:\n cache[glyph] = get_extents(glyph)\n\n # get last line information\n if len(lines):\n line = lines[-1]\n else:\n # line-> line width, line height, words\n # words -> (w, h, word)...\n line = [0, 0, []]\n lines.append(line)\n\n # extract user limitation\n uw, uh = self.text_size\n\n # split the word\n for part in re.split(r'( |\\n)', word):\n\n if part == '':\n continue\n\n if part == '\\n':\n # put a new line!\n line = [0, 0, []]\n lines.append(line)\n continue\n\n # get current line information\n lw, lh = line[:2]\n\n # calculate the size of the part\n # (extract all extents of the part,\n # calculate width through extents due to kerning\n # and get the maximum height)\n pg = [cache[g] for g in part]\n pw = get_extents(part)[0]\n ph = max([g[1] for g in pg])\n\n options = copy(options)\n\n # check if the part can be put in the line\n if uw is None or lw + pw < uw:\n # no limitation or part can be contained in the line\n # then append the part to the line\n line[2].append((pw, ph, part, options))\n # and update the line size\n line[0] += pw\n line[1] = max(line[1], ph)\n else:\n # part can't be put in the line, do a new one...\n line = [pw, ph, [(pw, ph, part, options)]]\n lines.append(line)\n\n def _real_render(self):\n # use the lines to do the rendering !\n self._render_begin()\n\n r = self._render_text\n\n # convert halign/valign to int, faster comparaison\n av = {'top': 0, 'middle': 1, 'bottom': 2}[self.options['valign']]\n ah = {'left': 0, 'center': 1, 'right': 2}[self.options['halign']]\n\n y = 0\n w, h = self._size\n refs = self._refs\n no_of_lines = len(self._lines)-1\n\n for line in self._lines:\n lh = line[1]\n lw = line[0]\n\n # horizontal alignement\n if ah == 0:\n x = 0\n elif ah == 1:\n x = int((w - lw) / 2)\n else:\n x = w - lw\n\n # vertical alignement\n if y == 0:\n if av == 1:\n y = int((h - (lh*no_of_lines))/2)\n elif av == 2:\n y = h - (lh*(no_of_lines))\n\n\n for pw, ph, part, options in line[2]:\n self.options = options\n r(part, x, y + (lh - ph) / 1.25)\n\n # should we record refs ?\n ref = options['_ref']\n if ref is not None:\n if not ref in refs:\n refs[ref] = []\n refs[ref].append((x, y, x + pw, y + ph))\n\n #print 'render', repr(part), x, y, (lh, ph), options\n x += pw\n y += line[1]\n\n # get data from provider\n data = self._render_end()\n assert(data)\n\n # create texture is necessary\n texture = self.texture\n mipmap = self.options['mipmap']\n if texture is None:\n if data is None:\n if platform() in ('android', 'ios'):\n colorfmt = 'rgba'\n else:\n colorfmt = 'luminance_alpha'\n texture = Texture.create(\n size=self.size, colorfmt=colorfmt,\n mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n elif self.width != texture.width or self.height != texture.height:\n if data is None:\n texture = Texture.create(size=self.size, mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n\n # update texture\n self.texture = texture\n self.texture.blit_data(data)\n\n", "path": "kivy/core/text/markup.py"}], "after_files": [{"content": "'''\nText Markup\n===========\n\n.. versionadded:: 1.1.0\n\nWe provide a simple text-markup for inline text styling. The syntax look the\nsame as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.\n\nA tag is defined as ``[tag]``, and might have a closed tag associated:\n``[/tag]``. Example of a markup text::\n\n [b]Hello [color=ff0000]world[/b][/color]\n\nThe following tags are availables:\n\n``[b][/b]``\n Activate bold text\n``[i][/i]``\n Activate italic text\n``[font=<str>][/font]``\n Change the font\n``[size=<integer>][/size]``\n Change the font size\n``[color=#<color>][/color]``\n Change the text color\n``[ref=<str>][/ref]``\n Add an interactive zone. The reference + all the word box inside the\n reference will be available in :data:`MarkupLabel.refs`\n``[anchor=<str>]``\n Put an anchor in the text. You can get the position of your anchor within\n the text with :data:`MarkupLabel.anchors`\n\nIf you need to escape the markup from the current text, use\n:func:`kivy.utils.escape_markup`.\n'''\n\n__all__ = ('MarkupLabel', )\n\nfrom kivy.graphics.texture import Texture\nfrom kivy.utils import platform\nfrom kivy.parser import parse_color\nfrom kivy.logger import Logger\nimport re\nfrom kivy.core.text import Label, LabelBase\nfrom copy import copy\n\n# We need to do this trick when documentation is generated\nMarkupLabelBase = Label\nif Label is None:\n MarkupLabelBase = LabelBase\n\n\nclass MarkupLabel(MarkupLabelBase):\n '''Markup text label.\n\n See module documentation for more informations.\n '''\n\n def __init__(self, *largs, **kwargs):\n self._style_stack = {}\n self._refs = {}\n super(MarkupLabel, self).__init__(*largs, **kwargs)\n\n @property\n def refs(self):\n '''Get the bounding box of all the ``[ref=...]``::\n\n { 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }\n '''\n return self._refs\n\n @property\n def anchors(self):\n '''Get the position of all the ``[anchor=...]``::\n\n { 'anchorA': (x, y), 'anchorB': (x, y), ... }\n '''\n return self._anchors\n\n @property\n def markup(self):\n '''Return the text with all the markup splitted::\n\n >>> MarkupLabel('[b]Hello world[/b]').markup\n >>> ('[b]', 'Hello world', '[/b]')\n\n '''\n s = re.split('(\\[.*?\\])', self.label)\n s = [x for x in s if x != '']\n return s\n\n def _push_style(self, k):\n if not k in self._style_stack:\n self._style_stack[k] = []\n self._style_stack[k].append(self.options[k])\n\n def _pop_style(self, k):\n if k not in self._style_stack or len(self._style_stack[k]) == 0:\n Logger.warning('Label: pop style stack without push')\n return\n v = self._style_stack[k].pop()\n self.options[k] = v\n\n def render(self, real=False):\n options = copy(self.options)\n if not real:\n ret = self._pre_render()\n else:\n ret = self._real_render()\n self.options = options\n return ret\n\n def _pre_render(self):\n # split markup, words, and lines\n # result: list of word with position and width/height\n # during the first pass, we don't care about h/valign\n self._lines = lines = []\n self._refs = {}\n self._anchors = {}\n spush = self._push_style\n spop = self._pop_style\n options = self.options\n options['_ref'] = None\n for item in self.markup:\n if item == '[b]':\n spush('bold')\n options['bold'] = True\n self.resolve_font_name()\n elif item == '[/b]':\n spop('bold')\n self.resolve_font_name()\n elif item == '[i]':\n spush('italic')\n options['italic'] = True\n self.resolve_font_name()\n elif item == '[/i]':\n spop('italic')\n self.resolve_font_name()\n elif item[:6] == '[size=':\n try:\n size = int(item[6:-1])\n except ValueError:\n size = options['font_size']\n spush('font_size')\n options['font_size'] = size\n elif item == '[/size]':\n spop('font_size')\n elif item[:7] == '[color=':\n color = parse_color(item[7:-1])\n spush('color')\n options['color'] = color\n elif item == '[/color]':\n spop('color')\n elif item[:6] == '[font=':\n fontname = item[6:-1]\n spush('font_name')\n options['font_name'] = fontname\n self.resolve_font_name()\n elif item == '[/font]':\n spop('font_name')\n self.resolve_font_name()\n elif item[:5] == '[ref=':\n ref = item[5:-1]\n spush('_ref')\n options['_ref'] = ref\n elif item == '[/ref]':\n spop('_ref')\n elif item[:8] == '[anchor=':\n ref = item[8:-1]\n if len(lines):\n x, y = lines[-1][0:2]\n else:\n x = y = 0\n self._anchors[ref] = x, y\n else:\n item = item.replace('&bl;', '[').replace(\n '&br;', ']').replace('&', '&')\n self._pre_render_label(item, options, lines)\n\n # calculate the texture size\n w, h = self.text_size\n if h < 0:\n h = None\n if w < 0:\n w = None\n if w is None:\n w = max([line[0] for line in lines])\n if h is None:\n h = sum([line[1] for line in lines])\n return w, h\n\n def _pre_render_label(self, word, options, lines):\n # precalculate id/name\n if not self.fontid in self._cache_glyphs:\n self._cache_glyphs[self.fontid] = {}\n cache = self._cache_glyphs[self.fontid]\n\n # verify that each glyph have size\n glyphs = list(set(word))\n get_extents = self.get_extents\n for glyph in glyphs:\n if not glyph in cache:\n cache[glyph] = get_extents(glyph)\n\n # get last line information\n if len(lines):\n line = lines[-1]\n else:\n # line-> line width, line height, words\n # words -> (w, h, word)...\n line = [0, 0, []]\n lines.append(line)\n\n # extract user limitation\n uw, uh = self.text_size\n\n # split the word\n for part in re.split(r'( |\\n)', word):\n\n if part == '':\n continue\n\n if part == '\\n':\n # put a new line!\n line = [0, 0, []]\n lines.append(line)\n continue\n\n # get current line information\n lw, lh = line[:2]\n\n # calculate the size of the part\n # (extract all extents of the part,\n # calculate width through extents due to kerning\n # and get the maximum height)\n pg = [cache[g] for g in part]\n pw = get_extents(part)[0]\n ph = max([g[1] for g in pg])\n\n options = copy(options)\n\n # check if the part can be put in the line\n if uw is None or lw + pw < uw:\n # no limitation or part can be contained in the line\n # then append the part to the line\n line[2].append((pw, ph, part, options))\n # and update the line size\n line[0] += pw\n line[1] = max(line[1], ph)\n else:\n # part can't be put in the line, do a new one...\n line = [pw, ph, [(pw, ph, part, options)]]\n lines.append(line)\n\n def _real_render(self):\n # use the lines to do the rendering !\n self._render_begin()\n\n r = self._render_text\n\n # convert halign/valign to int, faster comparaison\n av = {'top': 0, 'middle': 1, 'bottom': 2}[self.options['valign']]\n ah = {'left': 0, 'center': 1, 'right': 2}[self.options['halign']]\n\n y = 0\n w, h = self._size\n refs = self._refs\n no_of_lines = len(self._lines)\n\n for line in self._lines:\n lh = line[1]\n lw = line[0]\n\n # horizontal alignement\n if ah == 0:\n x = 0\n elif ah == 1:\n x = int((w - lw) / 2)\n else:\n x = w - lw\n\n # vertical alignement\n if y == 0:\n if av == 1:\n y = int((h - (lh*no_of_lines))/2)\n elif av == 2:\n y = h - (lh*(no_of_lines))\n\n\n for pw, ph, part, options in line[2]:\n self.options = options\n r(part, x, y + (lh - ph) / 1.25)\n\n # should we record refs ?\n ref = options['_ref']\n if ref is not None:\n if not ref in refs:\n refs[ref] = []\n refs[ref].append((x, y, x + pw, y + ph))\n\n #print 'render', repr(part), x, y, (lh, ph), options\n x += pw\n y += line[1]\n\n # get data from provider\n data = self._render_end()\n assert(data)\n\n # create texture is necessary\n texture = self.texture\n mipmap = self.options['mipmap']\n if texture is None:\n if data is None:\n if platform() in ('android', 'ios'):\n colorfmt = 'rgba'\n else:\n colorfmt = 'luminance_alpha'\n texture = Texture.create(\n size=self.size, colorfmt=colorfmt,\n mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n elif self.width != texture.width or self.height != texture.height:\n if data is None:\n texture = Texture.create(size=self.size, mipmap=mipmap)\n else:\n texture = Texture.create_from_data(data, mipmap=mipmap)\n texture.flip_vertical()\n\n # update texture\n self.texture = texture\n self.texture.blit_data(data)\n\n", "path": "kivy/core/text/markup.py"}]}
| 3,973 | 116 |
gh_patches_debug_2325
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-256
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`concat` with the last axis fails on py3
Same problem in `concat` as #253
@ShigekiKarita reported this problem too. Thanks!
https://gist.github.com/ShigekiKarita/4293f886765a1ed4a144
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/concat.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7 _args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'
8 _preamble = '''
9 #define COPY(statement) \
10 int l = i / (rdim * cdimx); \
11 int c = i / rdim % cdimx + coffset; \
12 int r = i % rdim; \
13 int idx = r + rdim * (c + cdimy * l); \
14 statement;
15 '''
16
17
18 class Concat(function.Function):
19
20 """Concatenate multiple tensors towards specified axis."""
21
22 # concat along the channel dimension by default
23 def __init__(self, axis=1):
24 self.axis = axis
25
26 def check_type_forward(self, in_types):
27 type_check.expect(in_types.size() > 0)
28 type_check.expect(in_types[0].ndim >
29 type_check.Variable(self.axis, 'axis'))
30
31 ndim = in_types[0].ndim.eval()
32 for i in range(1, in_types.size().eval()):
33 type_check.expect(
34 in_types[0].dtype == in_types[i].dtype,
35 in_types[0].ndim == in_types[i].ndim,
36 )
37 for d in range(0, ndim):
38 if d == self.axis:
39 continue
40 type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
41
42 def check_type_backward(self, in_types, out_types):
43 type_check.expect(
44 in_types.size() > 0,
45 out_types.size() == 1,
46 )
47 y_type, = out_types
48
49 type_check.expect(y_type.dtype == in_types[0].dtype)
50 ndim = in_types[0].ndim.eval()
51 concat_size = sum(typ.shape[self.axis] for typ in in_types)
52 type_check.expect(concat_size == y_type.shape[self.axis])
53
54 for d in range(0, ndim):
55 if d == self.axis:
56 continue
57 type_check.expect(y_type.shape[d] == in_types[0].shape[d])
58
59 def forward_cpu(self, xs):
60 return numpy.concatenate(xs, axis=self.axis),
61
62 def forward_gpu(self, xs):
63 # TODO(beam2d): Unify the process into a single kernel.
64 shape = list(xs[0].shape)
65 for x in xs[1:]:
66 shape[self.axis] += x.shape[self.axis]
67 self.shape = shape
68
69 y = cuda.empty(shape, dtype=xs[0].dtype)
70 self.cdimy = y.shape[self.axis]
71 self.rdim = numpy.prod(shape[self.axis + 1:])
72
73 coffset = 0
74 kernel = cuda.elementwise(
75 _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)
76 for x in xs:
77 cdimx = x.shape[self.axis]
78 kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)
79 coffset += cdimx
80
81 return y,
82
83 def backward_cpu(self, xs, gy):
84 sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()
85 return numpy.split(gy[0], sizes, axis=self.axis)
86
87 def backward_gpu(self, xs, gy):
88 gxs = tuple(cuda.empty_like(x) for x in xs)
89
90 coffset = 0
91 kernel = cuda.elementwise(
92 _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)
93 for gx in gxs:
94 cdimx = gx.shape[self.axis]
95 kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)
96 coffset += cdimx
97
98 return gxs
99
100
101 def concat(xs, axis=1):
102 """Concatenates given variables along an axis.
103
104 Args:
105 xs (tuple of Variables): Variables to be concatenated.
106 axis (int): Axis that the input arrays are concatenated along.
107
108 Returns:
109 ~chainer.Variable: Output variable.
110
111 """
112 return Concat(axis=axis)(*xs)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py
--- a/chainer/functions/concat.py
+++ b/chainer/functions/concat.py
@@ -68,7 +68,7 @@
y = cuda.empty(shape, dtype=xs[0].dtype)
self.cdimy = y.shape[self.axis]
- self.rdim = numpy.prod(shape[self.axis + 1:])
+ self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)
coffset = 0
kernel = cuda.elementwise(
|
{"golden_diff": "diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py\n--- a/chainer/functions/concat.py\n+++ b/chainer/functions/concat.py\n@@ -68,7 +68,7 @@\n \n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n- self.rdim = numpy.prod(shape[self.axis + 1:])\n+ self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n \n coffset = 0\n kernel = cuda.elementwise(\n", "issue": "`concat` with the last axis fails on py3\nSame problem in `concat` as #253 \n\n@ShigekiKarita reported this problem too. Thanks!\nhttps://gist.github.com/ShigekiKarita/4293f886765a1ed4a144\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:])\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py"}]}
| 1,495 | 123 |
gh_patches_debug_12504
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-9990
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker: Enable WEBLATE_GITLAB_CREDENTIALS environment variable
### Describe the problem
Right now it seems I can use gitlab_username and gitlab_token variables. But when I try to use gitlab_credentials:
> WEBLATE_GITLAB_CREDENTIALS: "git.duniter.org": {username: weblate,token: XXXXXXXXXXXXXXX}
I get this error:
> ERROR: yaml.scanner.ScannerError: mapping values are not allowed here
> in "./docker-compose.override.yml", line 17, column 52
### Describe the solution you'd like
Add weblate_gitlab_credentials support
### Describe alternatives you've considered
_No response_
### Screenshots
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/utils/environment.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from __future__ import annotations
6
7 import os
8
9
10 def get_env_str(
11 name: str,
12 default: str | None = None,
13 required: bool = False,
14 fallback_name: str | None = None,
15 ) -> str:
16 file_env = f"{name}_FILE"
17 if filename := os.environ.get(file_env):
18 try:
19 with open(filename) as handle:
20 result = handle.read()
21 except OSError as error:
22 raise ValueError(
23 f"Failed to open {filename} as specified by {file_env}: {error}"
24 ) from error
25 else:
26 if fallback_name and name not in os.environ:
27 name = fallback_name
28 result = os.environ.get(name, default)
29 if required and not result:
30 raise ValueError(f"{name} has to be configured!")
31 return result
32
33
34 def get_env_list(name: str, default: list[str] | None = None) -> list[str]:
35 """Helper to get list from environment."""
36 if name not in os.environ:
37 return default or []
38 return os.environ[name].split(",")
39
40
41 def get_env_map(name: str, default: dict[str, str] | None = None) -> dict[str, str]:
42 """
43 Helper to get mapping from environment.
44
45 parses 'full_name:name,email:mail' into {'email': 'mail', 'full_name': 'name'}
46 """
47 if os.environ.get(name):
48 return dict(e.split(":") for e in os.environ[name].split(","))
49 return default or {}
50
51
52 def get_env_int(name: str, default: int = 0) -> int:
53 """Helper to get integer value from environment."""
54 if name not in os.environ:
55 return default
56 try:
57 return int(os.environ[name])
58 except ValueError as error:
59 raise ValueError(f"{name} is not an integer: {error}") from error
60
61
62 def get_env_float(name: str, default: float = 0.0) -> float:
63 """Helper to get float value from environment."""
64 if name not in os.environ:
65 return default
66 try:
67 return float(os.environ[name])
68 except ValueError as error:
69 raise ValueError(f"{name} is not an float: {error}") from error
70
71
72 def get_env_bool(name: str, default: bool = False) -> bool:
73 """Helper to get boolean value from environment."""
74 if name not in os.environ:
75 return default
76 true_values = {"true", "yes", "1"}
77 return os.environ[name].lower() in true_values
78
79
80 def modify_env_list(current: list[str], name: str) -> list[str]:
81 """Helper to modify list (for example checks)."""
82 for item in reversed(get_env_list(f"WEBLATE_ADD_{name}")):
83 current.insert(0, item)
84 for item in get_env_list(f"WEBLATE_REMOVE_{name}"):
85 current.remove(item)
86 return current
87
88
89 def get_env_credentials(
90 name: str,
91 ) -> dict[str, dict[str, str]]:
92 """Parses VCS integration credentials."""
93 username = os.environ.get(f"WEBLATE_{name}_USERNAME")
94 token = os.environ.get(f"WEBLATE_{name}_TOKEN")
95 host = os.environ.get(f"WEBLATE_{name}_HOST")
96
97 if not host and (username or token):
98 raise ValueError(
99 f"Incomplete {name}_CREDENTIALS configuration: missing WEBLATE_{name}_HOST"
100 )
101 return {host: {"username": username, "token": token}}
102
103
104 def get_env_ratelimit(name: str, default: str) -> str:
105 value = os.environ.get(name, default)
106
107 # Taken from rest_framework.throttling.SimpleRateThrottle.parse_rate
108 # it can not be imported here as that breaks config loading for
109 # rest_framework
110
111 try:
112 num, period = value.split("/")
113 except ValueError as error:
114 raise ValueError(f"Could not parse {name}: {error}") from error
115 if not num.isdigit():
116 raise ValueError(f"Could not parse {name}: rate is not numeric: {num}")
117 if period[0] not in ("s", "m", "h", "d"):
118 raise ValueError(f"Could not parse {name}: unknown period: {period}")
119
120 return value
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/weblate/utils/environment.py b/weblate/utils/environment.py
--- a/weblate/utils/environment.py
+++ b/weblate/utils/environment.py
@@ -4,6 +4,7 @@
from __future__ import annotations
+import ast
import os
@@ -90,6 +91,8 @@
name: str,
) -> dict[str, dict[str, str]]:
"""Parses VCS integration credentials."""
+ if credentials := get_env_str(f"WEBLATE_{name}_CREDENTIALS"):
+ return ast.literal_eval(credentials)
username = os.environ.get(f"WEBLATE_{name}_USERNAME")
token = os.environ.get(f"WEBLATE_{name}_TOKEN")
host = os.environ.get(f"WEBLATE_{name}_HOST")
|
{"golden_diff": "diff --git a/weblate/utils/environment.py b/weblate/utils/environment.py\n--- a/weblate/utils/environment.py\n+++ b/weblate/utils/environment.py\n@@ -4,6 +4,7 @@\n \n from __future__ import annotations\n \n+import ast\n import os\n \n \n@@ -90,6 +91,8 @@\n name: str,\n ) -> dict[str, dict[str, str]]:\n \"\"\"Parses VCS integration credentials.\"\"\"\n+ if credentials := get_env_str(f\"WEBLATE_{name}_CREDENTIALS\"):\n+ return ast.literal_eval(credentials)\n username = os.environ.get(f\"WEBLATE_{name}_USERNAME\")\n token = os.environ.get(f\"WEBLATE_{name}_TOKEN\")\n host = os.environ.get(f\"WEBLATE_{name}_HOST\")\n", "issue": "docker: Enable WEBLATE_GITLAB_CREDENTIALS environment variable\n### Describe the problem\n\nRight now it seems I can use gitlab_username and gitlab_token variables. But when I try to use gitlab_credentials:\r\n\r\n> WEBLATE_GITLAB_CREDENTIALS: \"git.duniter.org\": {username: weblate,token: XXXXXXXXXXXXXXX}\r\n\r\nI get this error:\r\n\r\n> ERROR: yaml.scanner.ScannerError: mapping values are not allowed here\r\n> in \"./docker-compose.override.yml\", line 17, column 52\r\n\n\n### Describe the solution you'd like\n\nAdd weblate_gitlab_credentials support\n\n### Describe alternatives you've considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nimport os\n\n\ndef get_env_str(\n name: str,\n default: str | None = None,\n required: bool = False,\n fallback_name: str | None = None,\n) -> str:\n file_env = f\"{name}_FILE\"\n if filename := os.environ.get(file_env):\n try:\n with open(filename) as handle:\n result = handle.read()\n except OSError as error:\n raise ValueError(\n f\"Failed to open {filename} as specified by {file_env}: {error}\"\n ) from error\n else:\n if fallback_name and name not in os.environ:\n name = fallback_name\n result = os.environ.get(name, default)\n if required and not result:\n raise ValueError(f\"{name} has to be configured!\")\n return result\n\n\ndef get_env_list(name: str, default: list[str] | None = None) -> list[str]:\n \"\"\"Helper to get list from environment.\"\"\"\n if name not in os.environ:\n return default or []\n return os.environ[name].split(\",\")\n\n\ndef get_env_map(name: str, default: dict[str, str] | None = None) -> dict[str, str]:\n \"\"\"\n Helper to get mapping from environment.\n\n parses 'full_name:name,email:mail' into {'email': 'mail', 'full_name': 'name'}\n \"\"\"\n if os.environ.get(name):\n return dict(e.split(\":\") for e in os.environ[name].split(\",\"))\n return default or {}\n\n\ndef get_env_int(name: str, default: int = 0) -> int:\n \"\"\"Helper to get integer value from environment.\"\"\"\n if name not in os.environ:\n return default\n try:\n return int(os.environ[name])\n except ValueError as error:\n raise ValueError(f\"{name} is not an integer: {error}\") from error\n\n\ndef get_env_float(name: str, default: float = 0.0) -> float:\n \"\"\"Helper to get float value from environment.\"\"\"\n if name not in os.environ:\n return default\n try:\n return float(os.environ[name])\n except ValueError as error:\n raise ValueError(f\"{name} is not an float: {error}\") from error\n\n\ndef get_env_bool(name: str, default: bool = False) -> bool:\n \"\"\"Helper to get boolean value from environment.\"\"\"\n if name not in os.environ:\n return default\n true_values = {\"true\", \"yes\", \"1\"}\n return os.environ[name].lower() in true_values\n\n\ndef modify_env_list(current: list[str], name: str) -> list[str]:\n \"\"\"Helper to modify list (for example checks).\"\"\"\n for item in reversed(get_env_list(f\"WEBLATE_ADD_{name}\")):\n current.insert(0, item)\n for item in get_env_list(f\"WEBLATE_REMOVE_{name}\"):\n current.remove(item)\n return current\n\n\ndef get_env_credentials(\n name: str,\n) -> dict[str, dict[str, str]]:\n \"\"\"Parses VCS integration credentials.\"\"\"\n username = os.environ.get(f\"WEBLATE_{name}_USERNAME\")\n token = os.environ.get(f\"WEBLATE_{name}_TOKEN\")\n host = os.environ.get(f\"WEBLATE_{name}_HOST\")\n\n if not host and (username or token):\n raise ValueError(\n f\"Incomplete {name}_CREDENTIALS configuration: missing WEBLATE_{name}_HOST\"\n )\n return {host: {\"username\": username, \"token\": token}}\n\n\ndef get_env_ratelimit(name: str, default: str) -> str:\n value = os.environ.get(name, default)\n\n # Taken from rest_framework.throttling.SimpleRateThrottle.parse_rate\n # it can not be imported here as that breaks config loading for\n # rest_framework\n\n try:\n num, period = value.split(\"/\")\n except ValueError as error:\n raise ValueError(f\"Could not parse {name}: {error}\") from error\n if not num.isdigit():\n raise ValueError(f\"Could not parse {name}: rate is not numeric: {num}\")\n if period[0] not in (\"s\", \"m\", \"h\", \"d\"):\n raise ValueError(f\"Could not parse {name}: unknown period: {period}\")\n\n return value\n", "path": "weblate/utils/environment.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nimport ast\nimport os\n\n\ndef get_env_str(\n name: str,\n default: str | None = None,\n required: bool = False,\n fallback_name: str | None = None,\n) -> str:\n file_env = f\"{name}_FILE\"\n if filename := os.environ.get(file_env):\n try:\n with open(filename) as handle:\n result = handle.read()\n except OSError as error:\n raise ValueError(\n f\"Failed to open {filename} as specified by {file_env}: {error}\"\n ) from error\n else:\n if fallback_name and name not in os.environ:\n name = fallback_name\n result = os.environ.get(name, default)\n if required and not result:\n raise ValueError(f\"{name} has to be configured!\")\n return result\n\n\ndef get_env_list(name: str, default: list[str] | None = None) -> list[str]:\n \"\"\"Helper to get list from environment.\"\"\"\n if name not in os.environ:\n return default or []\n return os.environ[name].split(\",\")\n\n\ndef get_env_map(name: str, default: dict[str, str] | None = None) -> dict[str, str]:\n \"\"\"\n Helper to get mapping from environment.\n\n parses 'full_name:name,email:mail' into {'email': 'mail', 'full_name': 'name'}\n \"\"\"\n if os.environ.get(name):\n return dict(e.split(\":\") for e in os.environ[name].split(\",\"))\n return default or {}\n\n\ndef get_env_int(name: str, default: int = 0) -> int:\n \"\"\"Helper to get integer value from environment.\"\"\"\n if name not in os.environ:\n return default\n try:\n return int(os.environ[name])\n except ValueError as error:\n raise ValueError(f\"{name} is not an integer: {error}\") from error\n\n\ndef get_env_float(name: str, default: float = 0.0) -> float:\n \"\"\"Helper to get float value from environment.\"\"\"\n if name not in os.environ:\n return default\n try:\n return float(os.environ[name])\n except ValueError as error:\n raise ValueError(f\"{name} is not an float: {error}\") from error\n\n\ndef get_env_bool(name: str, default: bool = False) -> bool:\n \"\"\"Helper to get boolean value from environment.\"\"\"\n if name not in os.environ:\n return default\n true_values = {\"true\", \"yes\", \"1\"}\n return os.environ[name].lower() in true_values\n\n\ndef modify_env_list(current: list[str], name: str) -> list[str]:\n \"\"\"Helper to modify list (for example checks).\"\"\"\n for item in reversed(get_env_list(f\"WEBLATE_ADD_{name}\")):\n current.insert(0, item)\n for item in get_env_list(f\"WEBLATE_REMOVE_{name}\"):\n current.remove(item)\n return current\n\n\ndef get_env_credentials(\n name: str,\n) -> dict[str, dict[str, str]]:\n \"\"\"Parses VCS integration credentials.\"\"\"\n if credentials := get_env_str(f\"WEBLATE_{name}_CREDENTIALS\"):\n return ast.literal_eval(credentials)\n username = os.environ.get(f\"WEBLATE_{name}_USERNAME\")\n token = os.environ.get(f\"WEBLATE_{name}_TOKEN\")\n host = os.environ.get(f\"WEBLATE_{name}_HOST\")\n\n if not host and (username or token):\n raise ValueError(\n f\"Incomplete {name}_CREDENTIALS configuration: missing WEBLATE_{name}_HOST\"\n )\n return {host: {\"username\": username, \"token\": token}}\n\n\ndef get_env_ratelimit(name: str, default: str) -> str:\n value = os.environ.get(name, default)\n\n # Taken from rest_framework.throttling.SimpleRateThrottle.parse_rate\n # it can not be imported here as that breaks config loading for\n # rest_framework\n\n try:\n num, period = value.split(\"/\")\n except ValueError as error:\n raise ValueError(f\"Could not parse {name}: {error}\") from error\n if not num.isdigit():\n raise ValueError(f\"Could not parse {name}: rate is not numeric: {num}\")\n if period[0] not in (\"s\", \"m\", \"h\", \"d\"):\n raise ValueError(f\"Could not parse {name}: unknown period: {period}\")\n\n return value\n", "path": "weblate/utils/environment.py"}]}
| 1,641 | 175 |
gh_patches_debug_16067
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-3077
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The docker_list parser is raising ValueError exceptions in production.
The DockerList parser is throwing a number of the exception ValueError("Line containing 'REPOSITORY' was not found in table",), and ValueError("Line containing 'CONTAINER' was not found in table",) in production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/docker_list.py`
Content:
```
1 """
2 DockerList - command ``/usr/bin/docker (images|ps)``
3 ====================================================
4
5 Parse the output of command "docker_list_images" and "docker_list_containers",
6 which have very similar formats.
7
8 The header line is parsed and used as the names for the remaining columns.
9 All fields in both header and data are assumed to be separated by at least
10 three spaces. This allows single spaces in values and headers, so headers
11 such as 'IMAGE ID' are captured as is.
12
13 If the header line and at least one data line are not found, no data is
14 stored.
15
16 Each row is stored as a dictionary, keyed on the header fields. The data is
17 available in two formats:
18
19 * The old format is a list of row dictionaries.
20 * The new format stores each dictionary in a dictionary keyed on the value of
21 a given field, given by the subclass.
22
23 """
24 from insights import CommandParser, parser
25 from insights.parsers import SkipException, parse_fixed_table
26 from insights.specs import Specs
27
28
29 class DockerList(CommandParser):
30 """
31 A general class for parsing tabular docker list information. Parsing
32 rules are:
33
34 * The first line is the header line.
35 * The other lines are data lines.
36 * All fields line up vertically.
37 * Fields are separated from each other by at least three spaces.
38 * Some fields can contain nothing, and this is shown as spaces, so we
39 need to catch that and turn it into None.
40
41 Why not just use hard-coded fields and columns? So that we can adapt to
42 different output lists.
43
44 Raises:
45 NotImplementedError: If `key_field` or `attr_name` is not defined
46 SkipException: If no data to parse
47 """
48 key_field = None
49 heading_ignore = []
50 attr_name = ''
51 substitutions = []
52
53 def parse_content(self, content):
54 """
55 Parse the lines given into a list of dictionaries for each row. This
56 is stored in the ``rows`` attribute.
57
58 If the ``key_field`` property is set, use this to key a ``data``
59 dictionary attribute.
60 """
61 if not (self.key_field and self.attr_name):
62 raise NotImplementedError("'key_field' or 'attr_name' is not defined")
63
64 self.rows = parse_fixed_table(content,
65 heading_ignore=self.heading_ignore,
66 header_substitute=self.substitutions)
67
68 if not self.rows:
69 raise SkipException('No data.')
70
71 data = {}
72 for row in self.rows:
73 k = row.get(self.key_field)
74 for sub in self.substitutions:
75 row[sub[0]] = row.pop(sub[1]) if sub[1] in row else None
76 if k is not None and k != '<none>':
77 data[k] = row
78 setattr(self, self.attr_name, data)
79
80
81 @parser(Specs.docker_list_images)
82 class DockerListImages(DockerList):
83 """
84 Handle the list of docker images using the DockerList parser class.
85
86 Sample output of command ``docker images --all --no-trunc --digests``::
87
88 REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE
89 rhel7_imagemagick latest <none> 882ab98aae5394aebe91fe6d8a4297fa0387c3cfd421b2d892bddf218ac373b2 4 days ago 785.4 MB
90 rhel6_nss-softokn latest <none> dd87dad2c7841a19263ae2dc96d32c501ee84a92f56aed75bb67f57efe4e48b5 5 days ago 449.7 MB
91
92 Attributes:
93 rows (list): List of row dictionaries.
94 images (dict): Dictionary keyed on the value of the "REPOSITORY" fileld
95
96 Examples:
97 >>> images.rows[0]['REPOSITORY']
98 'rhel6_vsftpd'
99 >>> images.rows[1]['VIRTUAL SIZE']
100 '785.4 MB'
101 >>> images.images['rhel6_vsftpd']['CREATED']
102 '37 minutes ago'
103 """
104 key_field = 'REPOSITORY'
105 heading_ignore = [key_field]
106 attr_name = 'images'
107 substitutions = [("IMAGE ID", "IMAGE_ID"), ("VIRTUAL SIZE", "VIRTUAL_SIZE")]
108
109 @property
110 def data(self):
111 return self.images
112
113
114 @parser(Specs.docker_list_containers)
115 class DockerListContainers(DockerList):
116 """
117 Handle the list of docker images using the DockerList parser class.
118
119 Sample output of command ``docker ps --all --no-trunc --size``::
120
121 CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE
122 95516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102 bd8638c869ea40a9269d87e9af6741574562af9ee013e03ac2745fb5f59e2478 "/bin/sh -c 'yum install -y vsftpd-2.2.2-6.el6'" 51 minutes ago Exited (137) 50 minutes ago tender_rosalind 4.751 MB (virtual 200.4 MB)
123 03e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216 rhel7_httpd "/usr/sbin/httpd -DFOREGROUND" 45 seconds ago Up 37 seconds 0.0.0.0:8080->80/tcp angry_saha 796 B (virtual 669.2 MB)
124
125 Attributes:
126 rows (list): List of row dictionaries.
127 containers(dict): Dictionary keyed on the value of the "NAMES" field
128
129 Examples:
130 >>> containers.rows[0]['STATUS']
131 'Up 37 seconds'
132 >>> containers.containers['tender_rosalind']['STATUS']
133 'Exited (137) 18 hours ago'
134 """
135 key_field = 'NAMES'
136 heading_ignore = ['CONTAINER']
137 attr_name = 'containers'
138 substitutions = [("CONTAINER ID", "CONTAINER_ID")]
139
140 @property
141 def data(self):
142 return self.containers
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/insights/parsers/docker_list.py b/insights/parsers/docker_list.py
--- a/insights/parsers/docker_list.py
+++ b/insights/parsers/docker_list.py
@@ -61,6 +61,12 @@
if not (self.key_field and self.attr_name):
raise NotImplementedError("'key_field' or 'attr_name' is not defined")
+ # There is another application named docker that's a kde system tray, that
+ # will output help when the spec is run due to incorrect arguments. So check
+ # the content for any lines starting with Usage: so it can be skipped.
+ if any(l for l in content if l.startswith("Usage: ")):
+ raise SkipException('No data only help output.')
+
self.rows = parse_fixed_table(content,
heading_ignore=self.heading_ignore,
header_substitute=self.substitutions)
|
{"golden_diff": "diff --git a/insights/parsers/docker_list.py b/insights/parsers/docker_list.py\n--- a/insights/parsers/docker_list.py\n+++ b/insights/parsers/docker_list.py\n@@ -61,6 +61,12 @@\n if not (self.key_field and self.attr_name):\n raise NotImplementedError(\"'key_field' or 'attr_name' is not defined\")\n \n+ # There is another application named docker that's a kde system tray, that\n+ # will output help when the spec is run due to incorrect arguments. So check\n+ # the content for any lines starting with Usage: so it can be skipped.\n+ if any(l for l in content if l.startswith(\"Usage: \")):\n+ raise SkipException('No data only help output.')\n+\n self.rows = parse_fixed_table(content,\n heading_ignore=self.heading_ignore,\n header_substitute=self.substitutions)\n", "issue": "The docker_list parser is raising ValueError exceptions in production.\nThe DockerList parser is throwing a number of the exception ValueError(\"Line containing 'REPOSITORY' was not found in table\",), and ValueError(\"Line containing 'CONTAINER' was not found in table\",) in production.\n", "before_files": [{"content": "\"\"\"\nDockerList - command ``/usr/bin/docker (images|ps)``\n====================================================\n\nParse the output of command \"docker_list_images\" and \"docker_list_containers\",\nwhich have very similar formats.\n\nThe header line is parsed and used as the names for the remaining columns.\nAll fields in both header and data are assumed to be separated by at least\nthree spaces. This allows single spaces in values and headers, so headers\nsuch as 'IMAGE ID' are captured as is.\n\nIf the header line and at least one data line are not found, no data is\nstored.\n\nEach row is stored as a dictionary, keyed on the header fields. The data is\navailable in two formats:\n\n* The old format is a list of row dictionaries.\n* The new format stores each dictionary in a dictionary keyed on the value of\n a given field, given by the subclass.\n\n\"\"\"\nfrom insights import CommandParser, parser\nfrom insights.parsers import SkipException, parse_fixed_table\nfrom insights.specs import Specs\n\n\nclass DockerList(CommandParser):\n \"\"\"\n A general class for parsing tabular docker list information. Parsing\n rules are:\n\n * The first line is the header line.\n * The other lines are data lines.\n * All fields line up vertically.\n * Fields are separated from each other by at least three spaces.\n * Some fields can contain nothing, and this is shown as spaces, so we\n need to catch that and turn it into None.\n\n Why not just use hard-coded fields and columns? So that we can adapt to\n different output lists.\n\n Raises:\n NotImplementedError: If `key_field` or `attr_name` is not defined\n SkipException: If no data to parse\n \"\"\"\n key_field = None\n heading_ignore = []\n attr_name = ''\n substitutions = []\n\n def parse_content(self, content):\n \"\"\"\n Parse the lines given into a list of dictionaries for each row. This\n is stored in the ``rows`` attribute.\n\n If the ``key_field`` property is set, use this to key a ``data``\n dictionary attribute.\n \"\"\"\n if not (self.key_field and self.attr_name):\n raise NotImplementedError(\"'key_field' or 'attr_name' is not defined\")\n\n self.rows = parse_fixed_table(content,\n heading_ignore=self.heading_ignore,\n header_substitute=self.substitutions)\n\n if not self.rows:\n raise SkipException('No data.')\n\n data = {}\n for row in self.rows:\n k = row.get(self.key_field)\n for sub in self.substitutions:\n row[sub[0]] = row.pop(sub[1]) if sub[1] in row else None\n if k is not None and k != '<none>':\n data[k] = row\n setattr(self, self.attr_name, data)\n\n\n@parser(Specs.docker_list_images)\nclass DockerListImages(DockerList):\n \"\"\"\n Handle the list of docker images using the DockerList parser class.\n\n Sample output of command ``docker images --all --no-trunc --digests``::\n\n REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE\n rhel7_imagemagick latest <none> 882ab98aae5394aebe91fe6d8a4297fa0387c3cfd421b2d892bddf218ac373b2 4 days ago 785.4 MB\n rhel6_nss-softokn latest <none> dd87dad2c7841a19263ae2dc96d32c501ee84a92f56aed75bb67f57efe4e48b5 5 days ago 449.7 MB\n\n Attributes:\n rows (list): List of row dictionaries.\n images (dict): Dictionary keyed on the value of the \"REPOSITORY\" fileld\n\n Examples:\n >>> images.rows[0]['REPOSITORY']\n 'rhel6_vsftpd'\n >>> images.rows[1]['VIRTUAL SIZE']\n '785.4 MB'\n >>> images.images['rhel6_vsftpd']['CREATED']\n '37 minutes ago'\n \"\"\"\n key_field = 'REPOSITORY'\n heading_ignore = [key_field]\n attr_name = 'images'\n substitutions = [(\"IMAGE ID\", \"IMAGE_ID\"), (\"VIRTUAL SIZE\", \"VIRTUAL_SIZE\")]\n\n @property\n def data(self):\n return self.images\n\n\n@parser(Specs.docker_list_containers)\nclass DockerListContainers(DockerList):\n \"\"\"\n Handle the list of docker images using the DockerList parser class.\n\n Sample output of command ``docker ps --all --no-trunc --size``::\n\n CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE\n 95516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102 bd8638c869ea40a9269d87e9af6741574562af9ee013e03ac2745fb5f59e2478 \"/bin/sh -c 'yum install -y vsftpd-2.2.2-6.el6'\" 51 minutes ago Exited (137) 50 minutes ago tender_rosalind 4.751 MB (virtual 200.4 MB)\n 03e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216 rhel7_httpd \"/usr/sbin/httpd -DFOREGROUND\" 45 seconds ago Up 37 seconds 0.0.0.0:8080->80/tcp angry_saha 796 B (virtual 669.2 MB)\n\n Attributes:\n rows (list): List of row dictionaries.\n containers(dict): Dictionary keyed on the value of the \"NAMES\" field\n\n Examples:\n >>> containers.rows[0]['STATUS']\n 'Up 37 seconds'\n >>> containers.containers['tender_rosalind']['STATUS']\n 'Exited (137) 18 hours ago'\n \"\"\"\n key_field = 'NAMES'\n heading_ignore = ['CONTAINER']\n attr_name = 'containers'\n substitutions = [(\"CONTAINER ID\", \"CONTAINER_ID\")]\n\n @property\n def data(self):\n return self.containers\n", "path": "insights/parsers/docker_list.py"}], "after_files": [{"content": "\"\"\"\nDockerList - command ``/usr/bin/docker (images|ps)``\n====================================================\n\nParse the output of command \"docker_list_images\" and \"docker_list_containers\",\nwhich have very similar formats.\n\nThe header line is parsed and used as the names for the remaining columns.\nAll fields in both header and data are assumed to be separated by at least\nthree spaces. This allows single spaces in values and headers, so headers\nsuch as 'IMAGE ID' are captured as is.\n\nIf the header line and at least one data line are not found, no data is\nstored.\n\nEach row is stored as a dictionary, keyed on the header fields. The data is\navailable in two formats:\n\n* The old format is a list of row dictionaries.\n* The new format stores each dictionary in a dictionary keyed on the value of\n a given field, given by the subclass.\n\n\"\"\"\nfrom insights import CommandParser, parser\nfrom insights.parsers import SkipException, parse_fixed_table\nfrom insights.specs import Specs\n\n\nclass DockerList(CommandParser):\n \"\"\"\n A general class for parsing tabular docker list information. Parsing\n rules are:\n\n * The first line is the header line.\n * The other lines are data lines.\n * All fields line up vertically.\n * Fields are separated from each other by at least three spaces.\n * Some fields can contain nothing, and this is shown as spaces, so we\n need to catch that and turn it into None.\n\n Why not just use hard-coded fields and columns? So that we can adapt to\n different output lists.\n\n Raises:\n NotImplementedError: If `key_field` or `attr_name` is not defined\n SkipException: If no data to parse\n \"\"\"\n key_field = None\n heading_ignore = []\n attr_name = ''\n substitutions = []\n\n def parse_content(self, content):\n \"\"\"\n Parse the lines given into a list of dictionaries for each row. This\n is stored in the ``rows`` attribute.\n\n If the ``key_field`` property is set, use this to key a ``data``\n dictionary attribute.\n \"\"\"\n if not (self.key_field and self.attr_name):\n raise NotImplementedError(\"'key_field' or 'attr_name' is not defined\")\n\n # There is another application named docker that's a kde system tray, that\n # will output help when the spec is run due to incorrect arguments. So check\n # the content for any lines starting with Usage: so it can be skipped.\n if any(l for l in content if l.startswith(\"Usage: \")):\n raise SkipException('No data only help output.')\n\n self.rows = parse_fixed_table(content,\n heading_ignore=self.heading_ignore,\n header_substitute=self.substitutions)\n\n if not self.rows:\n raise SkipException('No data.')\n\n data = {}\n for row in self.rows:\n k = row.get(self.key_field)\n for sub in self.substitutions:\n row[sub[0]] = row.pop(sub[1]) if sub[1] in row else None\n if k is not None and k != '<none>':\n data[k] = row\n setattr(self, self.attr_name, data)\n\n\n@parser(Specs.docker_list_images)\nclass DockerListImages(DockerList):\n \"\"\"\n Handle the list of docker images using the DockerList parser class.\n\n Sample output of command ``docker images --all --no-trunc --digests``::\n\n REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE\n rhel7_imagemagick latest <none> 882ab98aae5394aebe91fe6d8a4297fa0387c3cfd421b2d892bddf218ac373b2 4 days ago 785.4 MB\n rhel6_nss-softokn latest <none> dd87dad2c7841a19263ae2dc96d32c501ee84a92f56aed75bb67f57efe4e48b5 5 days ago 449.7 MB\n\n Attributes:\n rows (list): List of row dictionaries.\n images (dict): Dictionary keyed on the value of the \"REPOSITORY\" fileld\n\n Examples:\n >>> images.rows[0]['REPOSITORY']\n 'rhel6_vsftpd'\n >>> images.rows[1]['VIRTUAL SIZE']\n '785.4 MB'\n >>> images.images['rhel6_vsftpd']['CREATED']\n '37 minutes ago'\n \"\"\"\n key_field = 'REPOSITORY'\n heading_ignore = [key_field]\n attr_name = 'images'\n substitutions = [(\"IMAGE ID\", \"IMAGE_ID\"), (\"VIRTUAL SIZE\", \"VIRTUAL_SIZE\")]\n\n @property\n def data(self):\n return self.images\n\n\n@parser(Specs.docker_list_containers)\nclass DockerListContainers(DockerList):\n \"\"\"\n Handle the list of docker images using the DockerList parser class.\n\n Sample output of command ``docker ps --all --no-trunc --size``::\n\n CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE\n 95516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102 bd8638c869ea40a9269d87e9af6741574562af9ee013e03ac2745fb5f59e2478 \"/bin/sh -c 'yum install -y vsftpd-2.2.2-6.el6'\" 51 minutes ago Exited (137) 50 minutes ago tender_rosalind 4.751 MB (virtual 200.4 MB)\n 03e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216 rhel7_httpd \"/usr/sbin/httpd -DFOREGROUND\" 45 seconds ago Up 37 seconds 0.0.0.0:8080->80/tcp angry_saha 796 B (virtual 669.2 MB)\n\n Attributes:\n rows (list): List of row dictionaries.\n containers(dict): Dictionary keyed on the value of the \"NAMES\" field\n\n Examples:\n >>> containers.rows[0]['STATUS']\n 'Up 37 seconds'\n >>> containers.containers['tender_rosalind']['STATUS']\n 'Exited (137) 18 hours ago'\n \"\"\"\n key_field = 'NAMES'\n heading_ignore = ['CONTAINER']\n attr_name = 'containers'\n substitutions = [(\"CONTAINER ID\", \"CONTAINER_ID\")]\n\n @property\n def data(self):\n return self.containers\n", "path": "insights/parsers/docker_list.py"}]}
| 2,166 | 196 |
gh_patches_debug_11791
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1027
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ine.py for source in data["playlist"][0]["sources"]: TypeError: 'NoneType' object is not subscriptable
Hi, INE plugin is failing since recently:
```
$ streamlink -o ./streamlink.mp4 https://streaming.ine.com/play/1cfbc029-dd6d-4646-80b9-7316e3ac121a/introduction 720p --http-cookie laravel_session=removed
[cli][info] Found matching plugin ine for URL https://streaming.ine.com/play/1cfbc029-dd6d-4646-80b9-7316e3ac121a/introduction
Traceback (most recent call last):
File "/opt/local/Library/Frameworks/Python.framework/Versions/Current/bin/streamlink", line 11, in <module>
load_entry_point('streamlink==0.6.0', 'console_scripts', 'streamlink')()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink_cli/main.py", line 1027, in main
handle_url()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink_cli/main.py", line 482, in handle_url
streams = fetch_streams(plugin)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink_cli/main.py", line 394, in fetch_streams
sorting_excludes=args.stream_sorting_excludes)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink/plugin/plugin.py", line 328, in get_streams
return self.streams(*args, **kwargs)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink/plugin/plugin.py", line 236, in streams
ostreams = self._get_streams()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink/plugins/ine.py", line 50, in _get_streams
for source in data["playlist"][0]["sources"]:
TypeError: 'NoneType' object is not subscriptable
$
$ python --version
Python 3.5.3
$ streamlink --version
streamlink 0.6.0
$ streamlink --version-check
[cli][info] Your Streamlink version (0.6) is up to date!
$
```
Same error on mac OS and Windows.
This particular URL was 'downloadable' with no problem about a month ago or so.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/ine.py`
Content:
```
1 from __future__ import print_function
2
3 import json
4 import re
5
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import http
8 from streamlink.plugin.api import validate
9 from streamlink.stream import HLSStream
10
11
12 class INE(Plugin):
13 url_re = re.compile(r"""https://streaming.ine.com/play\#?/
14 ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?
15 (.*?)""", re.VERBOSE)
16 play_url = "https://streaming.ine.com/play/{vid}/watch"
17 js_re = re.compile(r'''script type="text/javascript" src="(https://content.jwplatform.com/players/.*?)"''')
18 jwplayer_re = re.compile(r'''jwplayer\(".*?"\).setup\((\{.*\})\);''', re.DOTALL)
19 setup_schema = validate.Schema(
20 validate.transform(jwplayer_re.search),
21 validate.any(
22 None,
23 validate.all(
24 validate.get(1),
25 validate.transform(json.loads),
26 {"playlist": [
27 {"sources": [{"file": validate.text,
28 "type": validate.text}]}
29 ]}
30 )
31 )
32 )
33
34 @classmethod
35 def can_handle_url(cls, url):
36 return cls.url_re.match(url) is not None
37
38 def _get_streams(self):
39 vid = self.url_re.match(self.url).group(1)
40 self.logger.debug("Found video ID: {0}", vid)
41
42 page = http.get(self.play_url.format(vid=vid))
43 js_url_m = self.js_re.search(page.text)
44 if js_url_m:
45 js_url = js_url_m.group(1)
46 self.logger.debug("Loading player JS: {0}", js_url)
47
48 res = http.get(js_url)
49 data = self.setup_schema.validate(res.text)
50 for source in data["playlist"][0]["sources"]:
51 if source["type"] == "hls":
52 return HLSStream.parse_variant_playlist(self.session, "https:" + source["file"])
53
54
55 __plugin__ = INE
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/ine.py b/src/streamlink/plugins/ine.py
--- a/src/streamlink/plugins/ine.py
+++ b/src/streamlink/plugins/ine.py
@@ -15,7 +15,7 @@
(.*?)""", re.VERBOSE)
play_url = "https://streaming.ine.com/play/{vid}/watch"
js_re = re.compile(r'''script type="text/javascript" src="(https://content.jwplatform.com/players/.*?)"''')
- jwplayer_re = re.compile(r'''jwplayer\(".*?"\).setup\((\{.*\})\);''', re.DOTALL)
+ jwplayer_re = re.compile(r'''jwConfig\s*=\s*(\{.*\});''', re.DOTALL)
setup_schema = validate.Schema(
validate.transform(jwplayer_re.search),
validate.any(
|
{"golden_diff": "diff --git a/src/streamlink/plugins/ine.py b/src/streamlink/plugins/ine.py\n--- a/src/streamlink/plugins/ine.py\n+++ b/src/streamlink/plugins/ine.py\n@@ -15,7 +15,7 @@\n (.*?)\"\"\", re.VERBOSE)\n play_url = \"https://streaming.ine.com/play/{vid}/watch\"\n js_re = re.compile(r'''script type=\"text/javascript\" src=\"(https://content.jwplatform.com/players/.*?)\"''')\n- jwplayer_re = re.compile(r'''jwplayer\\(\".*?\"\\).setup\\((\\{.*\\})\\);''', re.DOTALL)\n+ jwplayer_re = re.compile(r'''jwConfig\\s*=\\s*(\\{.*\\});''', re.DOTALL)\n setup_schema = validate.Schema(\n validate.transform(jwplayer_re.search),\n validate.any(\n", "issue": "ine.py for source in data[\"playlist\"][0][\"sources\"]: TypeError: 'NoneType' object is not subscriptable\nHi, INE plugin is failing since recently:\r\n\r\n```\r\n$ streamlink -o ./streamlink.mp4 https://streaming.ine.com/play/1cfbc029-dd6d-4646-80b9-7316e3ac121a/introduction 720p --http-cookie laravel_session=removed\r\n[cli][info] Found matching plugin ine for URL https://streaming.ine.com/play/1cfbc029-dd6d-4646-80b9-7316e3ac121a/introduction\r\nTraceback (most recent call last):\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/Current/bin/streamlink\", line 11, in <module>\r\n load_entry_point('streamlink==0.6.0', 'console_scripts', 'streamlink')()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink_cli/main.py\", line 1027, in main\r\n handle_url()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink_cli/main.py\", line 482, in handle_url\r\n streams = fetch_streams(plugin)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink_cli/main.py\", line 394, in fetch_streams\r\n sorting_excludes=args.stream_sorting_excludes)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink/plugin/plugin.py\", line 328, in get_streams\r\n return self.streams(*args, **kwargs)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink/plugin/plugin.py\", line 236, in streams\r\n ostreams = self._get_streams()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/streamlink/plugins/ine.py\", line 50, in _get_streams\r\n for source in data[\"playlist\"][0][\"sources\"]:\r\nTypeError: 'NoneType' object is not subscriptable\r\n$ \r\n$ python --version\r\nPython 3.5.3\r\n$ streamlink --version\r\nstreamlink 0.6.0\r\n$ streamlink --version-check\r\n[cli][info] Your Streamlink version (0.6) is up to date!\r\n$\r\n```\r\nSame error on mac OS and Windows.\r\nThis particular URL was 'downloadable' with no problem about a month ago or so.\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass INE(Plugin):\n url_re = re.compile(r\"\"\"https://streaming.ine.com/play\\#?/\n ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?\n (.*?)\"\"\", re.VERBOSE)\n play_url = \"https://streaming.ine.com/play/{vid}/watch\"\n js_re = re.compile(r'''script type=\"text/javascript\" src=\"(https://content.jwplatform.com/players/.*?)\"''')\n jwplayer_re = re.compile(r'''jwplayer\\(\".*?\"\\).setup\\((\\{.*\\})\\);''', re.DOTALL)\n setup_schema = validate.Schema(\n validate.transform(jwplayer_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n {\"playlist\": [\n {\"sources\": [{\"file\": validate.text,\n \"type\": validate.text}]}\n ]}\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n vid = self.url_re.match(self.url).group(1)\n self.logger.debug(\"Found video ID: {0}\", vid)\n\n page = http.get(self.play_url.format(vid=vid))\n js_url_m = self.js_re.search(page.text)\n if js_url_m:\n js_url = js_url_m.group(1)\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n\n res = http.get(js_url)\n data = self.setup_schema.validate(res.text)\n for source in data[\"playlist\"][0][\"sources\"]:\n if source[\"type\"] == \"hls\":\n return HLSStream.parse_variant_playlist(self.session, \"https:\" + source[\"file\"])\n\n\n__plugin__ = INE\n", "path": "src/streamlink/plugins/ine.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport json\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass INE(Plugin):\n url_re = re.compile(r\"\"\"https://streaming.ine.com/play\\#?/\n ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?\n (.*?)\"\"\", re.VERBOSE)\n play_url = \"https://streaming.ine.com/play/{vid}/watch\"\n js_re = re.compile(r'''script type=\"text/javascript\" src=\"(https://content.jwplatform.com/players/.*?)\"''')\n jwplayer_re = re.compile(r'''jwConfig\\s*=\\s*(\\{.*\\});''', re.DOTALL)\n setup_schema = validate.Schema(\n validate.transform(jwplayer_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n {\"playlist\": [\n {\"sources\": [{\"file\": validate.text,\n \"type\": validate.text}]}\n ]}\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n vid = self.url_re.match(self.url).group(1)\n self.logger.debug(\"Found video ID: {0}\", vid)\n\n page = http.get(self.play_url.format(vid=vid))\n js_url_m = self.js_re.search(page.text)\n if js_url_m:\n js_url = js_url_m.group(1)\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n\n res = http.get(js_url)\n data = self.setup_schema.validate(res.text)\n for source in data[\"playlist\"][0][\"sources\"]:\n if source[\"type\"] == \"hls\":\n return HLSStream.parse_variant_playlist(self.session, \"https:\" + source[\"file\"])\n\n\n__plugin__ = INE\n", "path": "src/streamlink/plugins/ine.py"}]}
| 1,450 | 198 |
gh_patches_debug_29769
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-5130
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Relationship between vetco and petco
I was looking at fixing the `vetco` spider, but after a quick look on the website everything I've seen is titled "At Petco".
To the Americans: is Vetco a real brand?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/vetco_clinics.py`
Content:
```
1 import re
2
3 import scrapy
4 from scrapy.selector import Selector
5
6 from locations.geo import postal_regions
7 from locations.items import Feature
8
9
10 class VetcoClinicsSpider(scrapy.Spider):
11 name = "vetco"
12 item_attributes = {"brand": "Vetco Clinics"}
13 allowed_domains = ["vetcoclinics.com"]
14
15 def start_requests(self):
16 for record in postal_regions("US"):
17 url_template = "https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={}"
18 yield scrapy.http.Request(url_template.format(record["postal_region"]))
19
20 def parse(self, response):
21 jsonresponse = response.json()
22 if jsonresponse is not None:
23 clinics = jsonresponse.get("clinics")
24 if clinics:
25 for stores in clinics:
26 body = stores["label"]
27 address = Selector(text=body).xpath("//address/text()").extract()
28 if len(address) == 3:
29 addr_full, city_state_postal, phone = (item.split(",") for item in address)
30 city, state_postal = (item.split(",") for item in city_state_postal)
31 state, postal = re.search(r"([A-Z]{2}) (\d{5})", state_postal[0]).groups()
32
33 else:
34 addr_full, city_state_postal = (item.split(",") for item in address)
35 city, state_postal = (item.split(",") for item in city_state_postal)
36 state, postal = re.search(r"([A-Z]{2}) (\d{5})", state_postal[0]).groups()
37
38 properties = {
39 "ref": addr_full[0].strip(),
40 "addr_full": addr_full[0].strip(),
41 "city": city[0].strip(),
42 "state": state,
43 "postcode": postal,
44 "lat": float(stores["point"]["lat"]),
45 "lon": float(stores["point"]["long"]),
46 "website": response.url,
47 }
48
49 yield Feature(**properties)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/vetco_clinics.py b/locations/spiders/vetco_clinics.py
deleted file mode 100644
--- a/locations/spiders/vetco_clinics.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import re
-
-import scrapy
-from scrapy.selector import Selector
-
-from locations.geo import postal_regions
-from locations.items import Feature
-
-
-class VetcoClinicsSpider(scrapy.Spider):
- name = "vetco"
- item_attributes = {"brand": "Vetco Clinics"}
- allowed_domains = ["vetcoclinics.com"]
-
- def start_requests(self):
- for record in postal_regions("US"):
- url_template = "https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={}"
- yield scrapy.http.Request(url_template.format(record["postal_region"]))
-
- def parse(self, response):
- jsonresponse = response.json()
- if jsonresponse is not None:
- clinics = jsonresponse.get("clinics")
- if clinics:
- for stores in clinics:
- body = stores["label"]
- address = Selector(text=body).xpath("//address/text()").extract()
- if len(address) == 3:
- addr_full, city_state_postal, phone = (item.split(",") for item in address)
- city, state_postal = (item.split(",") for item in city_state_postal)
- state, postal = re.search(r"([A-Z]{2}) (\d{5})", state_postal[0]).groups()
-
- else:
- addr_full, city_state_postal = (item.split(",") for item in address)
- city, state_postal = (item.split(",") for item in city_state_postal)
- state, postal = re.search(r"([A-Z]{2}) (\d{5})", state_postal[0]).groups()
-
- properties = {
- "ref": addr_full[0].strip(),
- "addr_full": addr_full[0].strip(),
- "city": city[0].strip(),
- "state": state,
- "postcode": postal,
- "lat": float(stores["point"]["lat"]),
- "lon": float(stores["point"]["long"]),
- "website": response.url,
- }
-
- yield Feature(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/vetco_clinics.py b/locations/spiders/vetco_clinics.py\ndeleted file mode 100644\n--- a/locations/spiders/vetco_clinics.py\n+++ /dev/null\n@@ -1,49 +0,0 @@\n-import re\n-\n-import scrapy\n-from scrapy.selector import Selector\n-\n-from locations.geo import postal_regions\n-from locations.items import Feature\n-\n-\n-class VetcoClinicsSpider(scrapy.Spider):\n- name = \"vetco\"\n- item_attributes = {\"brand\": \"Vetco Clinics\"}\n- allowed_domains = [\"vetcoclinics.com\"]\n-\n- def start_requests(self):\n- for record in postal_regions(\"US\"):\n- url_template = \"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={}\"\n- yield scrapy.http.Request(url_template.format(record[\"postal_region\"]))\n-\n- def parse(self, response):\n- jsonresponse = response.json()\n- if jsonresponse is not None:\n- clinics = jsonresponse.get(\"clinics\")\n- if clinics:\n- for stores in clinics:\n- body = stores[\"label\"]\n- address = Selector(text=body).xpath(\"//address/text()\").extract()\n- if len(address) == 3:\n- addr_full, city_state_postal, phone = (item.split(\",\") for item in address)\n- city, state_postal = (item.split(\",\") for item in city_state_postal)\n- state, postal = re.search(r\"([A-Z]{2}) (\\d{5})\", state_postal[0]).groups()\n-\n- else:\n- addr_full, city_state_postal = (item.split(\",\") for item in address)\n- city, state_postal = (item.split(\",\") for item in city_state_postal)\n- state, postal = re.search(r\"([A-Z]{2}) (\\d{5})\", state_postal[0]).groups()\n-\n- properties = {\n- \"ref\": addr_full[0].strip(),\n- \"addr_full\": addr_full[0].strip(),\n- \"city\": city[0].strip(),\n- \"state\": state,\n- \"postcode\": postal,\n- \"lat\": float(stores[\"point\"][\"lat\"]),\n- \"lon\": float(stores[\"point\"][\"long\"]),\n- \"website\": response.url,\n- }\n-\n- yield Feature(**properties)\n", "issue": "Relationship between vetco and petco\nI was looking at fixing the `vetco` spider, but after a quick look on the website everything I've seen is titled \"At Petco\".\r\n\r\nTo the Americans: is Vetco a real brand?\n", "before_files": [{"content": "import re\n\nimport scrapy\nfrom scrapy.selector import Selector\n\nfrom locations.geo import postal_regions\nfrom locations.items import Feature\n\n\nclass VetcoClinicsSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {\"brand\": \"Vetco Clinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n\n def start_requests(self):\n for record in postal_regions(\"US\"):\n url_template = \"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={}\"\n yield scrapy.http.Request(url_template.format(record[\"postal_region\"]))\n\n def parse(self, response):\n jsonresponse = response.json()\n if jsonresponse is not None:\n clinics = jsonresponse.get(\"clinics\")\n if clinics:\n for stores in clinics:\n body = stores[\"label\"]\n address = Selector(text=body).xpath(\"//address/text()\").extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = (item.split(\",\") for item in address)\n city, state_postal = (item.split(\",\") for item in city_state_postal)\n state, postal = re.search(r\"([A-Z]{2}) (\\d{5})\", state_postal[0]).groups()\n\n else:\n addr_full, city_state_postal = (item.split(\",\") for item in address)\n city, state_postal = (item.split(\",\") for item in city_state_postal)\n state, postal = re.search(r\"([A-Z]{2}) (\\d{5})\", state_postal[0]).groups()\n\n properties = {\n \"ref\": addr_full[0].strip(),\n \"addr_full\": addr_full[0].strip(),\n \"city\": city[0].strip(),\n \"state\": state,\n \"postcode\": postal,\n \"lat\": float(stores[\"point\"][\"lat\"]),\n \"lon\": float(stores[\"point\"][\"long\"]),\n \"website\": response.url,\n }\n\n yield Feature(**properties)\n", "path": "locations/spiders/vetco_clinics.py"}], "after_files": [{"content": null, "path": "locations/spiders/vetco_clinics.py"}]}
| 839 | 536 |
gh_patches_debug_39658
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-2785
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bounding boxes in torchvision.utils
## 🚀 Feature
I'd like to easily be able to draw a bounding box onto an image represented as a torch tensor.
## Motivation
Using YOLO, I get a bunch of bounding boxes in an image. I want to be able to easily draw those onto a torch tensor (created via `torchvision.transforms.ToTensor`). This seems like a reasonable request because other bbox utilities such as NMS exist in torchvision.
## Pitch
```python
tensor_image = torch.tensor(...)
new_img = torchvision.utils.draw_bounding_box(
tensor_image,
[x_min, y_min, x_max, y_max],
)
```
## Alternatives
I *think* the following works right now, but the `torch.Tensor` > `PIL.Image` > `torch.Tensor` conversion for a single operation per image makes me feel uncomfortable due to efficiency.
```python
# entirely untested!
tensor_image = torch.tensor(...)
def draw_bounding_box(tensor, bbox, **kwargs): # TODO: don't use a kwargs dict
# adapted from https://pytorch.org/docs/stable/_modules/torchvision/utils.html#save_image
from PIL import Image
# TODO: none of the kwargs to make_grid are in this scope
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
# TODO: convert bbox to the appropriate type
return cv2.rectangle(ndarr, bbox) # use PIL if opencv-python isn't a dependency
```
## Additional context
See [`cv2.rectangle`](https://docs.opencv.org/4.4.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) ([source](https://github.com/opencv/opencv/blob/422f802ec96feecbc9db2f60879e8fe364cf8a92/modules/imgproc/src/drawing.cpp#L1829-L1859)) and [`PIL.ImageDraw.rectangle`](https://pillow.readthedocs.io/en/stable/reference/ImageDraw.html#PIL.ImageDraw.ImageDraw.rectangle) ([source](https://pillow.readthedocs.io/en/stable/_modules/PIL/ImageDraw.html#ImageDraw.rectangle)).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/utils.py`
Content:
```
1 from typing import Union, Optional, List, Tuple, Text, BinaryIO
2 import io
3 import pathlib
4 import torch
5 import math
6 irange = range
7
8
9 def make_grid(
10 tensor: Union[torch.Tensor, List[torch.Tensor]],
11 nrow: int = 8,
12 padding: int = 2,
13 normalize: bool = False,
14 range: Optional[Tuple[int, int]] = None,
15 scale_each: bool = False,
16 pad_value: int = 0,
17 ) -> torch.Tensor:
18 """Make a grid of images.
19
20 Args:
21 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
22 or a list of images all of the same size.
23 nrow (int, optional): Number of images displayed in each row of the grid.
24 The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
25 padding (int, optional): amount of padding. Default: ``2``.
26 normalize (bool, optional): If True, shift the image to the range (0, 1),
27 by the min and max values specified by :attr:`range`. Default: ``False``.
28 range (tuple, optional): tuple (min, max) where min and max are numbers,
29 then these numbers are used to normalize the image. By default, min and max
30 are computed from the tensor.
31 scale_each (bool, optional): If ``True``, scale each image in the batch of
32 images separately rather than the (min, max) over all images. Default: ``False``.
33 pad_value (float, optional): Value for the padded pixels. Default: ``0``.
34
35 Example:
36 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
37
38 """
39 if not (torch.is_tensor(tensor) or
40 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
41 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
42
43 # if list of tensors, convert to a 4D mini-batch Tensor
44 if isinstance(tensor, list):
45 tensor = torch.stack(tensor, dim=0)
46
47 if tensor.dim() == 2: # single image H x W
48 tensor = tensor.unsqueeze(0)
49 if tensor.dim() == 3: # single image
50 if tensor.size(0) == 1: # if single-channel, convert to 3-channel
51 tensor = torch.cat((tensor, tensor, tensor), 0)
52 tensor = tensor.unsqueeze(0)
53
54 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
55 tensor = torch.cat((tensor, tensor, tensor), 1)
56
57 if normalize is True:
58 tensor = tensor.clone() # avoid modifying tensor in-place
59 if range is not None:
60 assert isinstance(range, tuple), \
61 "range has to be a tuple (min, max) if specified. min and max are numbers"
62
63 def norm_ip(img, low, high):
64 img.clamp_(min=low, max=high)
65 img.sub_(low).div_(max(high - low, 1e-5))
66
67 def norm_range(t, range):
68 if range is not None:
69 norm_ip(t, range[0], range[1])
70 else:
71 norm_ip(t, float(t.min()), float(t.max()))
72
73 if scale_each is True:
74 for t in tensor: # loop over mini-batch dimension
75 norm_range(t, range)
76 else:
77 norm_range(tensor, range)
78
79 if tensor.size(0) == 1:
80 return tensor.squeeze(0)
81
82 # make the mini-batch of images into a grid
83 nmaps = tensor.size(0)
84 xmaps = min(nrow, nmaps)
85 ymaps = int(math.ceil(float(nmaps) / xmaps))
86 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
87 num_channels = tensor.size(1)
88 grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
89 k = 0
90 for y in irange(ymaps):
91 for x in irange(xmaps):
92 if k >= nmaps:
93 break
94 # Tensor.copy_() is a valid method but seems to be missing from the stubs
95 # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_
96 grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]
97 2, x * width + padding, width - padding
98 ).copy_(tensor[k])
99 k = k + 1
100 return grid
101
102
103 def save_image(
104 tensor: Union[torch.Tensor, List[torch.Tensor]],
105 fp: Union[Text, pathlib.Path, BinaryIO],
106 nrow: int = 8,
107 padding: int = 2,
108 normalize: bool = False,
109 range: Optional[Tuple[int, int]] = None,
110 scale_each: bool = False,
111 pad_value: int = 0,
112 format: Optional[str] = None,
113 ) -> None:
114 """Save a given Tensor into an image file.
115
116 Args:
117 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
118 saves the tensor as a grid of images by calling ``make_grid``.
119 fp (string or file object): A filename or a file object
120 format(Optional): If omitted, the format to use is determined from the filename extension.
121 If a file object was used instead of a filename, this parameter should always be used.
122 **kwargs: Other arguments are documented in ``make_grid``.
123 """
124 from PIL import Image
125 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
126 normalize=normalize, range=range, scale_each=scale_each)
127 # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
128 ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
129 im = Image.fromarray(ndarr)
130 im.save(fp, format=format)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/utils.py b/torchvision/utils.py
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -1,8 +1,13 @@
from typing import Union, Optional, List, Tuple, Text, BinaryIO
-import io
import pathlib
import torch
import math
+import numpy as np
+from PIL import Image, ImageDraw
+from PIL import ImageFont
+
+__all__ = ["make_grid", "save_image", "draw_bounding_boxes"]
+
irange = range
@@ -121,10 +126,64 @@
If a file object was used instead of a filename, this parameter should always be used.
**kwargs: Other arguments are documented in ``make_grid``.
"""
- from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
im.save(fp, format=format)
+
+
[email protected]_grad()
+def draw_bounding_boxes(
+ image: torch.Tensor,
+ boxes: torch.Tensor,
+ labels: Optional[List[str]] = None,
+ colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
+ width: int = 1,
+ font: Optional[str] = None,
+ font_size: int = 10
+) -> torch.Tensor:
+
+ """
+ Draws bounding boxes on given image.
+ The values of the input image should be uint8 between 0 and 255.
+
+ Args:
+ image (Tensor): Tensor of shape (C x H x W)
+ bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
+ the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
+ `0 <= ymin < ymax < H`.
+ labels (List[str]): List containing the labels of bounding boxes.
+ colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
+ be represented as `str` or `Tuple[int, int, int]`.
+ width (int): Width of bounding box.
+ font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
+ also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
+ `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
+ font_size (int): The requested font size in points.
+ """
+
+ if not isinstance(image, torch.Tensor):
+ raise TypeError(f"Tensor expected, got {type(image)}")
+ elif image.dtype != torch.uint8:
+ raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
+ elif image.dim() != 3:
+ raise ValueError("Pass individual images, not batches")
+
+ ndarr = image.permute(1, 2, 0).numpy()
+ img_to_draw = Image.fromarray(ndarr)
+
+ img_boxes = boxes.to(torch.int64).tolist()
+
+ draw = ImageDraw.Draw(img_to_draw)
+
+ for i, bbox in enumerate(img_boxes):
+ color = None if colors is None else colors[i]
+ draw.rectangle(bbox, width=width, outline=color)
+
+ if labels is not None:
+ txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)
+ draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
+
+ return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)
|
{"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -1,8 +1,13 @@\n from typing import Union, Optional, List, Tuple, Text, BinaryIO\n-import io\n import pathlib\n import torch\n import math\n+import numpy as np\n+from PIL import Image, ImageDraw\n+from PIL import ImageFont\n+\n+__all__ = [\"make_grid\", \"save_image\", \"draw_bounding_boxes\"]\n+\n irange = range\n \n \n@@ -121,10 +126,64 @@\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n- from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n+\n+\[email protected]_grad()\n+def draw_bounding_boxes(\n+ image: torch.Tensor,\n+ boxes: torch.Tensor,\n+ labels: Optional[List[str]] = None,\n+ colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\n+ width: int = 1,\n+ font: Optional[str] = None,\n+ font_size: int = 10\n+) -> torch.Tensor:\n+\n+ \"\"\"\n+ Draws bounding boxes on given image.\n+ The values of the input image should be uint8 between 0 and 255.\n+\n+ Args:\n+ image (Tensor): Tensor of shape (C x H x W)\n+ bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\n+ the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\n+ `0 <= ymin < ymax < H`.\n+ labels (List[str]): List containing the labels of bounding boxes.\n+ colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can\n+ be represented as `str` or `Tuple[int, int, int]`.\n+ width (int): Width of bounding box.\n+ font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n+ also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n+ `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\n+ font_size (int): The requested font size in points.\n+ \"\"\"\n+\n+ if not isinstance(image, torch.Tensor):\n+ raise TypeError(f\"Tensor expected, got {type(image)}\")\n+ elif image.dtype != torch.uint8:\n+ raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\n+ elif image.dim() != 3:\n+ raise ValueError(\"Pass individual images, not batches\")\n+\n+ ndarr = image.permute(1, 2, 0).numpy()\n+ img_to_draw = Image.fromarray(ndarr)\n+\n+ img_boxes = boxes.to(torch.int64).tolist()\n+\n+ draw = ImageDraw.Draw(img_to_draw)\n+\n+ for i, bbox in enumerate(img_boxes):\n+ color = None if colors is None else colors[i]\n+ draw.rectangle(bbox, width=width, outline=color)\n+\n+ if labels is not None:\n+ txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\n+ draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)\n+\n+ return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)\n", "issue": "bounding boxes in torchvision.utils\n## \ud83d\ude80 Feature\r\n\r\nI'd like to easily be able to draw a bounding box onto an image represented as a torch tensor. \r\n\r\n## Motivation\r\n\r\nUsing YOLO, I get a bunch of bounding boxes in an image. I want to be able to easily draw those onto a torch tensor (created via `torchvision.transforms.ToTensor`). This seems like a reasonable request because other bbox utilities such as NMS exist in torchvision.\r\n\r\n## Pitch\r\n\r\n```python\r\ntensor_image = torch.tensor(...)\r\nnew_img = torchvision.utils.draw_bounding_box(\r\n tensor_image,\r\n [x_min, y_min, x_max, y_max],\r\n)\r\n```\r\n\r\n## Alternatives\r\n\r\nI *think* the following works right now, but the `torch.Tensor` > `PIL.Image` > `torch.Tensor` conversion for a single operation per image makes me feel uncomfortable due to efficiency.\r\n```python\r\n# entirely untested!\r\n\r\ntensor_image = torch.tensor(...)\r\ndef draw_bounding_box(tensor, bbox, **kwargs): # TODO: don't use a kwargs dict\r\n # adapted from https://pytorch.org/docs/stable/_modules/torchvision/utils.html#save_image\r\n from PIL import Image\r\n # TODO: none of the kwargs to make_grid are in this scope\r\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\r\n normalize=normalize, range=range, scale_each=scale_each)\r\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\r\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\r\n\r\n # TODO: convert bbox to the appropriate type\r\n return cv2.rectangle(ndarr, bbox) # use PIL if opencv-python isn't a dependency\r\n```\r\n\r\n## Additional context\r\n\r\nSee [`cv2.rectangle`](https://docs.opencv.org/4.4.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) ([source](https://github.com/opencv/opencv/blob/422f802ec96feecbc9db2f60879e8fe364cf8a92/modules/imgproc/src/drawing.cpp#L1829-L1859)) and [`PIL.ImageDraw.rectangle`](https://pillow.readthedocs.io/en/stable/reference/ImageDraw.html#PIL.ImageDraw.ImageDraw.rectangle) ([source](https://pillow.readthedocs.io/en/stable/_modules/PIL/ImageDraw.html#ImageDraw.rectangle)).\r\n\n", "before_files": [{"content": "from typing import Union, Optional, List, Tuple, Text, BinaryIO\nimport io\nimport pathlib\nimport torch\nimport math\nirange = range\n\n\ndef make_grid(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n) -> torch.Tensor:\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by :attr:`range`. Default: ``False``.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, low, high):\n img.clamp_(min=low, max=high)\n img.sub_(low).div_(max(high - low, 1e-5))\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n num_channels = tensor.size(1)\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # Tensor.copy_() is a valid method but seems to be missing from the stubs\n # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_\n grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]\n 2, x * width + padding, width - padding\n ).copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n fp: Union[Text, pathlib.Path, BinaryIO],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n format: Optional[str] = None,\n) -> None:\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp (string or file object): A filename or a file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n", "path": "torchvision/utils.py"}], "after_files": [{"content": "from typing import Union, Optional, List, Tuple, Text, BinaryIO\nimport pathlib\nimport torch\nimport math\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom PIL import ImageFont\n\n__all__ = [\"make_grid\", \"save_image\", \"draw_bounding_boxes\"]\n\nirange = range\n\n\ndef make_grid(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n) -> torch.Tensor:\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by :attr:`range`. Default: ``False``.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, low, high):\n img.clamp_(min=low, max=high)\n img.sub_(low).div_(max(high - low, 1e-5))\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n num_channels = tensor.size(1)\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # Tensor.copy_() is a valid method but seems to be missing from the stubs\n # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_\n grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]\n 2, x * width + padding, width - padding\n ).copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n fp: Union[Text, pathlib.Path, BinaryIO],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n format: Optional[str] = None,\n) -> None:\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp (string or file object): A filename or a file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n\n\[email protected]_grad()\ndef draw_bounding_boxes(\n image: torch.Tensor,\n boxes: torch.Tensor,\n labels: Optional[List[str]] = None,\n colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\n width: int = 1,\n font: Optional[str] = None,\n font_size: int = 10\n) -> torch.Tensor:\n\n \"\"\"\n Draws bounding boxes on given image.\n The values of the input image should be uint8 between 0 and 255.\n\n Args:\n image (Tensor): Tensor of shape (C x H x W)\n bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\n the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\n `0 <= ymin < ymax < H`.\n labels (List[str]): List containing the labels of bounding boxes.\n colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can\n be represented as `str` or `Tuple[int, int, int]`.\n width (int): Width of bounding box.\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\n font_size (int): The requested font size in points.\n \"\"\"\n\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Tensor expected, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n\n ndarr = image.permute(1, 2, 0).numpy()\n img_to_draw = Image.fromarray(ndarr)\n\n img_boxes = boxes.to(torch.int64).tolist()\n\n draw = ImageDraw.Draw(img_to_draw)\n\n for i, bbox in enumerate(img_boxes):\n color = None if colors is None else colors[i]\n draw.rectangle(bbox, width=width, outline=color)\n\n if labels is not None:\n txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\n draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)\n", "path": "torchvision/utils.py"}]}
| 2,576 | 948 |
gh_patches_debug_299
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GitHub Integration raises "NotImplementedError Algorithm not supported"
We have working github integration code using PyGithub v1.32 that does essentially:
```python
integration = github.GithubIntegration(settings.GITHUB_INTEGRATION_ID, settings.GITHUB_INTEGRATION_PRIVATE_PEM)
inst_token = integration.get_access_token(installation_id).token
```
After upgrading to v1.34 this code raises "NotImplementedError Algorithm not supported"
I suspect it has to do with the [switch to pyjwt from python-jose](https://github.com/PyGithub/PyGithub/commit/d447eb13b9f4688a4c981ca03b1b3111fb299142)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # ########################## Copyrights and license ############################
5 # #
6 # Copyright 2012 Vincent Jacques <[email protected]> #
7 # Copyright 2012 Zearin <[email protected]> #
8 # Copyright 2013 Vincent Jacques <[email protected]> #
9 # #
10 # This file is part of PyGithub. #
11 # http://pygithub.github.io/PyGithub/v1/index.html #
12 # #
13 # PyGithub is free software: you can redistribute it and/or modify it under #
14 # the terms of the GNU Lesser General Public License as published by the Free #
15 # Software Foundation, either version 3 of the License, or (at your option) #
16 # any later version. #
17 # #
18 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
19 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
20 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
21 # details. #
22 # #
23 # You should have received a copy of the GNU Lesser General Public License #
24 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
25 # #
26 # ##############################################################################
27
28 import setuptools
29 import textwrap
30
31 version = "1.34"
32
33
34 if __name__ == "__main__":
35 setuptools.setup(
36 name="PyGithub",
37 version=version,
38 description="Use the full Github API v3",
39 author="Vincent Jacques",
40 author_email="[email protected]",
41 url="http://pygithub.github.io/PyGithub/v1/index.html",
42 long_description=textwrap.dedent("""\
43 (Very short) Tutorial
44 =====================
45
46 First create a Github instance::
47
48 from github import Github
49
50 g = Github("user", "password")
51
52 Then play with your Github objects::
53
54 for repo in g.get_user().get_repos():
55 print repo.name
56 repo.edit(has_wiki=False)
57
58 You can also create a Github instance with an OAuth token::
59
60 g = Github(token)
61
62 Or without authentication::
63
64 g = Github()
65
66 Reference documentation
67 =======================
68
69 See http://pygithub.github.io/PyGithub/v1/index.html"""),
70 packages=[
71 "github",
72 "github.tests",
73 ],
74 package_data={
75 "github": ["tests/ReplayData/*.txt"]
76 },
77 classifiers=[
78 "Development Status :: 5 - Production/Stable",
79 "Environment :: Web Environment",
80 "Intended Audience :: Developers",
81 "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
82 "Operating System :: OS Independent",
83 "Programming Language :: Python",
84 "Programming Language :: Python :: 2",
85 "Programming Language :: Python :: 2.5",
86 "Programming Language :: Python :: 2.6",
87 "Programming Language :: Python :: 2.7",
88 "Programming Language :: Python :: 3",
89 "Programming Language :: Python :: 3.2",
90 "Programming Language :: Python :: 3.3",
91 "Programming Language :: Python :: 3.4",
92 "Programming Language :: Python :: 3.5",
93 "Topic :: Software Development",
94 ],
95 test_suite="github.tests.AllTests",
96 use_2to3=True,
97 install_requires=[
98 "pyjwt"
99 ]
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,5 +96,8 @@
use_2to3=True,
install_requires=[
"pyjwt"
- ]
+ ],
+ extras_require = {
+ "integrations": ["cryptography"]
+ }
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,5 +96,8 @@\n use_2to3=True,\n install_requires=[\n \"pyjwt\"\n- ]\n+ ],\n+ extras_require = {\n+ \"integrations\": [\"cryptography\"]\n+ }\n )\n", "issue": "GitHub Integration raises \"NotImplementedError Algorithm not supported\"\nWe have working github integration code using PyGithub v1.32 that does essentially:\r\n\r\n```python\r\nintegration = github.GithubIntegration(settings.GITHUB_INTEGRATION_ID, settings.GITHUB_INTEGRATION_PRIVATE_PEM)\r\ninst_token = integration.get_access_token(installation_id).token\r\n```\r\nAfter upgrading to v1.34 this code raises \"NotImplementedError Algorithm not supported\"\r\n\r\nI suspect it has to do with the [switch to pyjwt from python-jose](https://github.com/PyGithub/PyGithub/commit/d447eb13b9f4688a4c981ca03b1b3111fb299142)\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport setuptools\nimport textwrap\n\nversion = \"1.34\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"http://pygithub.github.io/PyGithub/v1/index.html\",\n long_description=textwrap.dedent(\"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n g = Github(\"user\", \"password\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print repo.name\n repo.edit(has_wiki=False)\n\n You can also create a Github instance with an OAuth token::\n\n g = Github(token)\n\n Or without authentication::\n\n g = Github()\n\n Reference documentation\n =======================\n\n See http://pygithub.github.io/PyGithub/v1/index.html\"\"\"),\n packages=[\n \"github\",\n \"github.tests\",\n ],\n package_data={\n \"github\": [\"tests/ReplayData/*.txt\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development\",\n ],\n test_suite=\"github.tests.AllTests\",\n use_2to3=True,\n install_requires=[\n \"pyjwt\"\n ]\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport setuptools\nimport textwrap\n\nversion = \"1.34\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"http://pygithub.github.io/PyGithub/v1/index.html\",\n long_description=textwrap.dedent(\"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n g = Github(\"user\", \"password\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print repo.name\n repo.edit(has_wiki=False)\n\n You can also create a Github instance with an OAuth token::\n\n g = Github(token)\n\n Or without authentication::\n\n g = Github()\n\n Reference documentation\n =======================\n\n See http://pygithub.github.io/PyGithub/v1/index.html\"\"\"),\n packages=[\n \"github\",\n \"github.tests\",\n ],\n package_data={\n \"github\": [\"tests/ReplayData/*.txt\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development\",\n ],\n test_suite=\"github.tests.AllTests\",\n use_2to3=True,\n install_requires=[\n \"pyjwt\"\n ],\n extras_require = {\n \"integrations\": [\"cryptography\"]\n }\n )\n", "path": "setup.py"}]}
| 1,370 | 76 |
gh_patches_debug_55170
|
rasdani/github-patches
|
git_diff
|
spack__spack-10720
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
git-lfs aborts (sometimes), fix in progress upstream
This is mostly an FYI.
Starting with `[email protected]` we frequently had `git-lfs` aborting. In some situations it ran successfully, in others it didn't. It seemed to depend on what other modules were loaded, but...
Between `[email protected]` and `[email protected]` the Makefile started unconditionally adding a `-extldflags` bit to the `go` command line, setting it to the value of `LDFLAGS`. If `LDFLAGS` isn't set to anything (our case) then it wasn't given an argument, even though it needs one. I'm not sure why this doesn't provide an error from the compiler, it seems to be grabbing something out of whatever comes next in memory.
I've changed the Makefile only set `-extldflags` if `LDFLAGS` is defined and made a Pull Request upstream: https://github.com/git-lfs/git-lfs/pull/3545
Depending what Upstream has to say, perhaps we'll want to patch `[email protected]`, or forbid it, or ...
I'll keep this updated as the `git-lfs` PR progresses.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/git-lfs/package.py`
Content:
```
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class GitLfs(MakefilePackage):
10 """Git LFS is a system for managing and versioning large files in
11 association with a Git repository. Instead of storing the large files
12 within the Git repository as blobs, Git LFS stores special "pointer
13 files" in the repository, while storing the actual file contents on a
14 Git LFS server."""
15
16 homepage = "https://git-lfs.github.com"
17 url = "https://github.com/git-lfs/git-lfs/archive/v2.6.1.tar.gz"
18
19 version('2.7.0', sha256='1c829ddd163be2206a44edb366bd7f6d84c5afae3496687405ca9d2a5f3af07b')
20 version('2.6.1', sha256='e17cd9d4e66d1116be32f7ddc7e660c7f8fabbf510bc01b01ec15a22dd934ead')
21
22 depends_on('[email protected]:', type='build')
23 depends_on('[email protected]:', type='run')
24
25 parallel = False
26
27 # Git-lfs does not provide an 'install' target in the Makefile
28 def install(self, spec, prefix):
29 mkdirp(prefix.bin)
30 install(join_path('bin', 'git-lfs'), prefix.bin)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/git-lfs/package.py b/var/spack/repos/builtin/packages/git-lfs/package.py
--- a/var/spack/repos/builtin/packages/git-lfs/package.py
+++ b/var/spack/repos/builtin/packages/git-lfs/package.py
@@ -22,6 +22,8 @@
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='run')
+ patch('patches/issue-10702.patch', when='@2.7.0')
+
parallel = False
# Git-lfs does not provide an 'install' target in the Makefile
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/git-lfs/package.py b/var/spack/repos/builtin/packages/git-lfs/package.py\n--- a/var/spack/repos/builtin/packages/git-lfs/package.py\n+++ b/var/spack/repos/builtin/packages/git-lfs/package.py\n@@ -22,6 +22,8 @@\n depends_on('[email protected]:', type='build')\n depends_on('[email protected]:', type='run')\n \n+ patch('patches/issue-10702.patch', when='@2.7.0')\n+\n parallel = False\n \n # Git-lfs does not provide an 'install' target in the Makefile\n", "issue": "git-lfs aborts (sometimes), fix in progress upstream\nThis is mostly an FYI.\r\n\r\nStarting with `[email protected]` we frequently had `git-lfs` aborting. In some situations it ran successfully, in others it didn't. It seemed to depend on what other modules were loaded, but...\r\n\r\nBetween `[email protected]` and `[email protected]` the Makefile started unconditionally adding a `-extldflags` bit to the `go` command line, setting it to the value of `LDFLAGS`. If `LDFLAGS` isn't set to anything (our case) then it wasn't given an argument, even though it needs one. I'm not sure why this doesn't provide an error from the compiler, it seems to be grabbing something out of whatever comes next in memory.\r\n\r\nI've changed the Makefile only set `-extldflags` if `LDFLAGS` is defined and made a Pull Request upstream: https://github.com/git-lfs/git-lfs/pull/3545\r\n\r\nDepending what Upstream has to say, perhaps we'll want to patch `[email protected]`, or forbid it, or ...\r\n\r\nI'll keep this updated as the `git-lfs` PR progresses.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass GitLfs(MakefilePackage):\n \"\"\"Git LFS is a system for managing and versioning large files in\n association with a Git repository. Instead of storing the large files\n within the Git repository as blobs, Git LFS stores special \"pointer\n files\" in the repository, while storing the actual file contents on a\n Git LFS server.\"\"\"\n\n homepage = \"https://git-lfs.github.com\"\n url = \"https://github.com/git-lfs/git-lfs/archive/v2.6.1.tar.gz\"\n\n version('2.7.0', sha256='1c829ddd163be2206a44edb366bd7f6d84c5afae3496687405ca9d2a5f3af07b')\n version('2.6.1', sha256='e17cd9d4e66d1116be32f7ddc7e660c7f8fabbf510bc01b01ec15a22dd934ead')\n\n depends_on('[email protected]:', type='build')\n depends_on('[email protected]:', type='run')\n\n parallel = False\n\n # Git-lfs does not provide an 'install' target in the Makefile\n def install(self, spec, prefix):\n mkdirp(prefix.bin)\n install(join_path('bin', 'git-lfs'), prefix.bin)\n", "path": "var/spack/repos/builtin/packages/git-lfs/package.py"}], "after_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass GitLfs(MakefilePackage):\n \"\"\"Git LFS is a system for managing and versioning large files in\n association with a Git repository. Instead of storing the large files\n within the Git repository as blobs, Git LFS stores special \"pointer\n files\" in the repository, while storing the actual file contents on a\n Git LFS server.\"\"\"\n\n homepage = \"https://git-lfs.github.com\"\n url = \"https://github.com/git-lfs/git-lfs/archive/v2.6.1.tar.gz\"\n\n version('2.7.0', sha256='1c829ddd163be2206a44edb366bd7f6d84c5afae3496687405ca9d2a5f3af07b')\n version('2.6.1', sha256='e17cd9d4e66d1116be32f7ddc7e660c7f8fabbf510bc01b01ec15a22dd934ead')\n\n depends_on('[email protected]:', type='build')\n depends_on('[email protected]:', type='run')\n\n patch('patches/issue-10702.patch', when='@2.7.0')\n\n parallel = False\n\n # Git-lfs does not provide an 'install' target in the Makefile\n def install(self, spec, prefix):\n mkdirp(prefix.bin)\n install(join_path('bin', 'git-lfs'), prefix.bin)\n", "path": "var/spack/repos/builtin/packages/git-lfs/package.py"}]}
| 995 | 146 |
gh_patches_debug_4801
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-786
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive for `WPS345 Found meaningless number operation`
# Bug report
<!--
Hi, thanks for submitting a bug. We appreciate that.
But, we will need some information about what's wrong to help you.
-->
## What's wrong
False positive for `1 / some`:
```
13:12 WPS345 Found meaningless number operation
return 1 / number
```
<!-- Describe what is not working. Please, attach a traceback. -->
## How is that should be
`1 / ANY` should be allowed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/operators.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import ClassVar, Mapping, Optional, Tuple, Type
5
6 from typing_extensions import final
7
8 from wemake_python_styleguide.logic import walk
9 from wemake_python_styleguide.logic.operators import (
10 count_unary_operator,
11 unwrap_unary_node,
12 )
13 from wemake_python_styleguide.types import AnyNodes
14 from wemake_python_styleguide.violations import consistency
15 from wemake_python_styleguide.violations.best_practices import (
16 ListMultiplyViolation,
17 )
18 from wemake_python_styleguide.visitors import base
19
20 _MeaninglessOperators = Mapping[int, Tuple[Type[ast.operator], ...]]
21 _OperatorLimits = Mapping[Type[ast.unaryop], int]
22
23
24 @final
25 class UselessOperatorsVisitor(base.BaseNodeVisitor):
26 """Checks operators used in the code."""
27
28 _limits: ClassVar[_OperatorLimits] = {
29 ast.UAdd: 0,
30 ast.Invert: 1,
31 ast.Not: 1,
32 ast.USub: 1,
33 }
34
35 _meaningless_operations: ClassVar[_MeaninglessOperators] = {
36 # ast.Div is not in the list,
37 # since we have a special violation for it.
38 0: (ast.Mult, ast.Add, ast.Sub, ast.Pow),
39 # `1` and `-1` are different, `-1` is allowed.
40 1: (ast.Div, ast.Mult, ast.Pow),
41 }
42
43 def visit_Num(self, node: ast.Num) -> None:
44 """
45 Checks numbers unnecessary operators inside the code.
46
47 Raises:
48 UselessOperatorsViolation
49
50 """
51 self._check_operator_count(node)
52 self.generic_visit(node)
53
54 def visit_BinOp(self, node: ast.BinOp) -> None:
55 """
56 Visits binary operators.
57
58 Raises:
59 ZeroDivisionViolation
60
61 """
62 self._check_zero_division(node.op, node.right)
63 self._check_useless_math_operator(node.op, node.left, node.right)
64 self.generic_visit(node)
65
66 def visit_AugAssign(self, node: ast.AugAssign) -> None:
67 """
68 Visits augmented assigns.
69
70 Raises:
71 ZeroDivisionViolation
72
73 """
74 self._check_zero_division(node.op, node.value)
75 self._check_useless_math_operator(node.op, node.value)
76 self.generic_visit(node)
77
78 def _check_operator_count(self, node: ast.Num) -> None:
79 for node_type, limit in self._limits.items():
80 if count_unary_operator(node, node_type) > limit:
81 self.add_violation(
82 consistency.UselessOperatorsViolation(
83 node, text=str(node.n),
84 ),
85 )
86
87 def _check_zero_division(self, op: ast.operator, number: ast.AST) -> None:
88 number = unwrap_unary_node(number)
89
90 is_zero_division = (
91 isinstance(op, ast.Div) and
92 isinstance(number, ast.Num) and
93 number.n == 0
94 )
95 if is_zero_division:
96 self.add_violation(consistency.ZeroDivisionViolation(number))
97
98 def _check_useless_math_operator(
99 self,
100 op: ast.operator,
101 left: ast.AST,
102 right: Optional[ast.AST] = None,
103 ) -> None:
104 non_negative_numbers = self._get_non_negative_nodes(left, right)
105
106 for number in non_negative_numbers:
107 forbidden = self._meaningless_operations.get(number.n, None)
108 if forbidden and isinstance(op, forbidden):
109 self.add_violation(
110 consistency.MeaninglessNumberOperationViolation(number),
111 )
112
113 def _get_non_negative_nodes(
114 self,
115 left: ast.AST,
116 right: Optional[ast.AST] = None,
117 ):
118 non_negative_numbers = []
119 for node in filter(None, (left, right)):
120 real_node = unwrap_unary_node(node)
121 if not isinstance(real_node, ast.Num):
122 continue
123
124 if real_node.n not in self._meaningless_operations:
125 continue
126
127 if real_node.n == 1 and walk.is_contained(node, ast.USub):
128 continue
129 non_negative_numbers.append(real_node)
130 return non_negative_numbers
131
132
133 @final
134 class WrongMathOperatorVisitor(base.BaseNodeVisitor):
135 """Checks that there are not wrong math operations."""
136
137 _string_nodes: ClassVar[AnyNodes] = (
138 ast.Str,
139 ast.Bytes,
140 ast.JoinedStr,
141 )
142
143 _list_nodes: ClassVar[AnyNodes] = (
144 ast.List,
145 ast.ListComp,
146 )
147
148 def visit_BinOp(self, node: ast.BinOp) -> None:
149 """
150 Visits binary operations.
151
152 Raises:
153 DoubleMinusOpeationViolation
154
155 """
156 self._check_negation(node.op, node.right)
157 self._check_list_multiply(node)
158 self._check_string_concat(node.left, node.op, node.right)
159 self.generic_visit(node)
160
161 def visit_AugAssign(self, node: ast.AugAssign) -> None:
162 """
163 Visits augmented assignes.
164
165 Raises:
166 DoubleMinusOpeationViolation
167
168 """
169 self._check_negation(node.op, node.value)
170 self._check_string_concat(node.value, node.op)
171 self.generic_visit(node)
172
173 def _check_negation(self, op: ast.operator, right: ast.AST) -> None:
174 is_double_minus = (
175 isinstance(op, (ast.Add, ast.Sub)) and
176 isinstance(right, ast.UnaryOp) and
177 isinstance(right.op, ast.USub)
178 )
179 if is_double_minus:
180 self.add_violation(
181 consistency.OperationSignNegationViolation(right),
182 )
183
184 def _check_list_multiply(self, node: ast.BinOp) -> None:
185 is_list_multiply = (
186 isinstance(node.op, ast.Mult) and
187 isinstance(node.left, self._list_nodes)
188 )
189 if is_list_multiply:
190 self.add_violation(ListMultiplyViolation(node.left))
191
192 def _check_string_concat(
193 self,
194 left: ast.AST,
195 op: ast.operator,
196 right: Optional[ast.AST] = None,
197 ) -> None:
198 if not isinstance(op, ast.Add):
199 return
200
201 left_line = getattr(left, 'lineno', 0)
202 if left_line != getattr(right, 'lineno', left_line):
203 # By default we treat nodes that do not have lineno
204 # as nodes on the same line.
205 return
206
207 for node in (left, right):
208 if isinstance(node, self._string_nodes):
209 self.add_violation(
210 consistency.ExplicitStringConcatViolation(node),
211 )
212 return
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/visitors/ast/operators.py b/wemake_python_styleguide/visitors/ast/operators.py
--- a/wemake_python_styleguide/visitors/ast/operators.py
+++ b/wemake_python_styleguide/visitors/ast/operators.py
@@ -101,6 +101,9 @@
left: ast.AST,
right: Optional[ast.AST] = None,
) -> None:
+ if isinstance(left, ast.Num) and right:
+ if left.n == 1:
+ left = None
non_negative_numbers = self._get_non_negative_nodes(left, right)
for number in non_negative_numbers:
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/operators.py b/wemake_python_styleguide/visitors/ast/operators.py\n--- a/wemake_python_styleguide/visitors/ast/operators.py\n+++ b/wemake_python_styleguide/visitors/ast/operators.py\n@@ -101,6 +101,9 @@\n left: ast.AST,\n right: Optional[ast.AST] = None,\n ) -> None:\n+ if isinstance(left, ast.Num) and right:\n+ if left.n == 1:\n+ left = None\n non_negative_numbers = self._get_non_negative_nodes(left, right)\n \n for number in non_negative_numbers:\n", "issue": "False positive for `WPS345 Found meaningless number operation`\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\n\r\nFalse positive for `1 / some`:\r\n\r\n```\r\n 13:12 WPS345 Found meaningless number operation\r\n return 1 / number\r\n```\r\n\r\n<!-- Describe what is not working. Please, attach a traceback. -->\r\n\r\n## How is that should be\r\n\r\n`1 / ANY` should be allowed.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Mapping, Optional, Tuple, Type\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.logic import walk\nfrom wemake_python_styleguide.logic.operators import (\n count_unary_operator,\n unwrap_unary_node,\n)\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations import consistency\nfrom wemake_python_styleguide.violations.best_practices import (\n ListMultiplyViolation,\n)\nfrom wemake_python_styleguide.visitors import base\n\n_MeaninglessOperators = Mapping[int, Tuple[Type[ast.operator], ...]]\n_OperatorLimits = Mapping[Type[ast.unaryop], int]\n\n\n@final\nclass UselessOperatorsVisitor(base.BaseNodeVisitor):\n \"\"\"Checks operators used in the code.\"\"\"\n\n _limits: ClassVar[_OperatorLimits] = {\n ast.UAdd: 0,\n ast.Invert: 1,\n ast.Not: 1,\n ast.USub: 1,\n }\n\n _meaningless_operations: ClassVar[_MeaninglessOperators] = {\n # ast.Div is not in the list,\n # since we have a special violation for it.\n 0: (ast.Mult, ast.Add, ast.Sub, ast.Pow),\n # `1` and `-1` are different, `-1` is allowed.\n 1: (ast.Div, ast.Mult, ast.Pow),\n }\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers unnecessary operators inside the code.\n\n Raises:\n UselessOperatorsViolation\n\n \"\"\"\n self._check_operator_count(node)\n self.generic_visit(node)\n\n def visit_BinOp(self, node: ast.BinOp) -> None:\n \"\"\"\n Visits binary operators.\n\n Raises:\n ZeroDivisionViolation\n\n \"\"\"\n self._check_zero_division(node.op, node.right)\n self._check_useless_math_operator(node.op, node.left, node.right)\n self.generic_visit(node)\n\n def visit_AugAssign(self, node: ast.AugAssign) -> None:\n \"\"\"\n Visits augmented assigns.\n\n Raises:\n ZeroDivisionViolation\n\n \"\"\"\n self._check_zero_division(node.op, node.value)\n self._check_useless_math_operator(node.op, node.value)\n self.generic_visit(node)\n\n def _check_operator_count(self, node: ast.Num) -> None:\n for node_type, limit in self._limits.items():\n if count_unary_operator(node, node_type) > limit:\n self.add_violation(\n consistency.UselessOperatorsViolation(\n node, text=str(node.n),\n ),\n )\n\n def _check_zero_division(self, op: ast.operator, number: ast.AST) -> None:\n number = unwrap_unary_node(number)\n\n is_zero_division = (\n isinstance(op, ast.Div) and\n isinstance(number, ast.Num) and\n number.n == 0\n )\n if is_zero_division:\n self.add_violation(consistency.ZeroDivisionViolation(number))\n\n def _check_useless_math_operator(\n self,\n op: ast.operator,\n left: ast.AST,\n right: Optional[ast.AST] = None,\n ) -> None:\n non_negative_numbers = self._get_non_negative_nodes(left, right)\n\n for number in non_negative_numbers:\n forbidden = self._meaningless_operations.get(number.n, None)\n if forbidden and isinstance(op, forbidden):\n self.add_violation(\n consistency.MeaninglessNumberOperationViolation(number),\n )\n\n def _get_non_negative_nodes(\n self,\n left: ast.AST,\n right: Optional[ast.AST] = None,\n ):\n non_negative_numbers = []\n for node in filter(None, (left, right)):\n real_node = unwrap_unary_node(node)\n if not isinstance(real_node, ast.Num):\n continue\n\n if real_node.n not in self._meaningless_operations:\n continue\n\n if real_node.n == 1 and walk.is_contained(node, ast.USub):\n continue\n non_negative_numbers.append(real_node)\n return non_negative_numbers\n\n\n@final\nclass WrongMathOperatorVisitor(base.BaseNodeVisitor):\n \"\"\"Checks that there are not wrong math operations.\"\"\"\n\n _string_nodes: ClassVar[AnyNodes] = (\n ast.Str,\n ast.Bytes,\n ast.JoinedStr,\n )\n\n _list_nodes: ClassVar[AnyNodes] = (\n ast.List,\n ast.ListComp,\n )\n\n def visit_BinOp(self, node: ast.BinOp) -> None:\n \"\"\"\n Visits binary operations.\n\n Raises:\n DoubleMinusOpeationViolation\n\n \"\"\"\n self._check_negation(node.op, node.right)\n self._check_list_multiply(node)\n self._check_string_concat(node.left, node.op, node.right)\n self.generic_visit(node)\n\n def visit_AugAssign(self, node: ast.AugAssign) -> None:\n \"\"\"\n Visits augmented assignes.\n\n Raises:\n DoubleMinusOpeationViolation\n\n \"\"\"\n self._check_negation(node.op, node.value)\n self._check_string_concat(node.value, node.op)\n self.generic_visit(node)\n\n def _check_negation(self, op: ast.operator, right: ast.AST) -> None:\n is_double_minus = (\n isinstance(op, (ast.Add, ast.Sub)) and\n isinstance(right, ast.UnaryOp) and\n isinstance(right.op, ast.USub)\n )\n if is_double_minus:\n self.add_violation(\n consistency.OperationSignNegationViolation(right),\n )\n\n def _check_list_multiply(self, node: ast.BinOp) -> None:\n is_list_multiply = (\n isinstance(node.op, ast.Mult) and\n isinstance(node.left, self._list_nodes)\n )\n if is_list_multiply:\n self.add_violation(ListMultiplyViolation(node.left))\n\n def _check_string_concat(\n self,\n left: ast.AST,\n op: ast.operator,\n right: Optional[ast.AST] = None,\n ) -> None:\n if not isinstance(op, ast.Add):\n return\n\n left_line = getattr(left, 'lineno', 0)\n if left_line != getattr(right, 'lineno', left_line):\n # By default we treat nodes that do not have lineno\n # as nodes on the same line.\n return\n\n for node in (left, right):\n if isinstance(node, self._string_nodes):\n self.add_violation(\n consistency.ExplicitStringConcatViolation(node),\n )\n return\n", "path": "wemake_python_styleguide/visitors/ast/operators.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Mapping, Optional, Tuple, Type\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.logic import walk\nfrom wemake_python_styleguide.logic.operators import (\n count_unary_operator,\n unwrap_unary_node,\n)\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations import consistency\nfrom wemake_python_styleguide.violations.best_practices import (\n ListMultiplyViolation,\n)\nfrom wemake_python_styleguide.visitors import base\n\n_MeaninglessOperators = Mapping[int, Tuple[Type[ast.operator], ...]]\n_OperatorLimits = Mapping[Type[ast.unaryop], int]\n\n\n@final\nclass UselessOperatorsVisitor(base.BaseNodeVisitor):\n \"\"\"Checks operators used in the code.\"\"\"\n\n _limits: ClassVar[_OperatorLimits] = {\n ast.UAdd: 0,\n ast.Invert: 1,\n ast.Not: 1,\n ast.USub: 1,\n }\n\n _meaningless_operations: ClassVar[_MeaninglessOperators] = {\n # ast.Div is not in the list,\n # since we have a special violation for it.\n 0: (ast.Mult, ast.Add, ast.Sub, ast.Pow),\n # `1` and `-1` are different, `-1` is allowed.\n 1: (ast.Div, ast.Mult, ast.Pow),\n }\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers unnecessary operators inside the code.\n\n Raises:\n UselessOperatorsViolation\n\n \"\"\"\n self._check_operator_count(node)\n self.generic_visit(node)\n\n def visit_BinOp(self, node: ast.BinOp) -> None:\n \"\"\"\n Visits binary operators.\n\n Raises:\n ZeroDivisionViolation\n\n \"\"\"\n self._check_zero_division(node.op, node.right)\n self._check_useless_math_operator(node.op, node.left, node.right)\n self.generic_visit(node)\n\n def visit_AugAssign(self, node: ast.AugAssign) -> None:\n \"\"\"\n Visits augmented assigns.\n\n Raises:\n ZeroDivisionViolation\n\n \"\"\"\n self._check_zero_division(node.op, node.value)\n self._check_useless_math_operator(node.op, node.value)\n self.generic_visit(node)\n\n def _check_operator_count(self, node: ast.Num) -> None:\n for node_type, limit in self._limits.items():\n if count_unary_operator(node, node_type) > limit:\n self.add_violation(\n consistency.UselessOperatorsViolation(\n node, text=str(node.n),\n ),\n )\n\n def _check_zero_division(self, op: ast.operator, number: ast.AST) -> None:\n number = unwrap_unary_node(number)\n\n is_zero_division = (\n isinstance(op, ast.Div) and\n isinstance(number, ast.Num) and\n number.n == 0\n )\n if is_zero_division:\n self.add_violation(consistency.ZeroDivisionViolation(number))\n\n def _check_useless_math_operator(\n self,\n op: ast.operator,\n left: ast.AST,\n right: Optional[ast.AST] = None,\n ) -> None:\n if isinstance(left, ast.Num) and right:\n if left.n == 1:\n left = None\n non_negative_numbers = self._get_non_negative_nodes(left, right)\n\n for number in non_negative_numbers:\n forbidden = self._meaningless_operations.get(number.n, None)\n if forbidden and isinstance(op, forbidden):\n self.add_violation(\n consistency.MeaninglessNumberOperationViolation(number),\n )\n\n def _get_non_negative_nodes(\n self,\n left: ast.AST,\n right: Optional[ast.AST] = None,\n ):\n non_negative_numbers = []\n for node in filter(None, (left, right)):\n real_node = unwrap_unary_node(node)\n if not isinstance(real_node, ast.Num):\n continue\n\n if real_node.n not in self._meaningless_operations:\n continue\n\n if real_node.n == 1 and walk.is_contained(node, ast.USub):\n continue\n non_negative_numbers.append(real_node)\n return non_negative_numbers\n\n\n@final\nclass WrongMathOperatorVisitor(base.BaseNodeVisitor):\n \"\"\"Checks that there are not wrong math operations.\"\"\"\n\n _string_nodes: ClassVar[AnyNodes] = (\n ast.Str,\n ast.Bytes,\n ast.JoinedStr,\n )\n\n _list_nodes: ClassVar[AnyNodes] = (\n ast.List,\n ast.ListComp,\n )\n\n def visit_BinOp(self, node: ast.BinOp) -> None:\n \"\"\"\n Visits binary operations.\n\n Raises:\n DoubleMinusOpeationViolation\n\n \"\"\"\n self._check_negation(node.op, node.right)\n self._check_list_multiply(node)\n self._check_string_concat(node.left, node.op, node.right)\n self.generic_visit(node)\n\n def visit_AugAssign(self, node: ast.AugAssign) -> None:\n \"\"\"\n Visits augmented assignes.\n\n Raises:\n DoubleMinusOpeationViolation\n\n \"\"\"\n self._check_negation(node.op, node.value)\n self._check_string_concat(node.value, node.op)\n self.generic_visit(node)\n\n def _check_negation(self, op: ast.operator, right: ast.AST) -> None:\n is_double_minus = (\n isinstance(op, (ast.Add, ast.Sub)) and\n isinstance(right, ast.UnaryOp) and\n isinstance(right.op, ast.USub)\n )\n if is_double_minus:\n self.add_violation(\n consistency.OperationSignNegationViolation(right),\n )\n\n def _check_list_multiply(self, node: ast.BinOp) -> None:\n is_list_multiply = (\n isinstance(node.op, ast.Mult) and\n isinstance(node.left, self._list_nodes)\n )\n if is_list_multiply:\n self.add_violation(ListMultiplyViolation(node.left))\n\n def _check_string_concat(\n self,\n left: ast.AST,\n op: ast.operator,\n right: Optional[ast.AST] = None,\n ) -> None:\n if not isinstance(op, ast.Add):\n return\n\n left_line = getattr(left, 'lineno', 0)\n if left_line != getattr(right, 'lineno', left_line):\n # By default we treat nodes that do not have lineno\n # as nodes on the same line.\n return\n\n for node in (left, right):\n if isinstance(node, self._string_nodes):\n self.add_violation(\n consistency.ExplicitStringConcatViolation(node),\n )\n return\n", "path": "wemake_python_styleguide/visitors/ast/operators.py"}]}
| 2,374 | 153 |
gh_patches_debug_15244
|
rasdani/github-patches
|
git_diff
|
google__fuzzbench-242
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should local docker run restrict cpu to 1 to match FuzzBench prod environment ?
See also
https://github.com/google/fuzzbench/issues/173#issuecomment-605283610
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `common/fuzzer_utils.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Fuzzer helpers."""
15
16 import importlib
17 import os
18 import re
19 from typing import Optional
20
21 from common import logs
22 from common import utils
23 from common import yaml_utils
24
25 DEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'
26 FUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'
27 VALID_FUZZER_REGEX = re.compile(r'^[A-Za-z0-9_]+$')
28
29
30 def get_fuzz_target_binary(search_directory: str,
31 fuzz_target_name: str) -> Optional[str]:
32 """Return target binary path."""
33 if fuzz_target_name:
34 fuzz_target_binary = os.path.join(search_directory, fuzz_target_name)
35 if os.path.exists(fuzz_target_binary):
36 return fuzz_target_binary
37 return None
38
39 default_fuzz_target_binary = os.path.join(search_directory,
40 DEFAULT_FUZZ_TARGET_NAME)
41 if os.path.exists(default_fuzz_target_binary):
42 return default_fuzz_target_binary
43
44 for root, _, files in os.walk(search_directory):
45 if root == 'uninstrumented':
46 continue
47 for filename in files:
48 if filename.endswith('-uninstrumented'):
49 # Skip uninstrumented binaries (e.g. with QSYM).
50 continue
51
52 file_path = os.path.join(root, filename)
53 with open(file_path, 'rb') as file_handle:
54 if FUZZ_TARGET_SEARCH_STRING in file_handle.read():
55 return file_path
56
57 return None
58
59
60 def validate(fuzzer):
61 """Return True if |fuzzer| is a valid fuzzbench fuzzer."""
62 # Although importing probably allows a subset of what the regex allows, use
63 # the regex anyway to be safe. The regex is enforcing that the fuzzer is a
64 # valid path for GCS or a linux system.
65 if VALID_FUZZER_REGEX.match(fuzzer) is None:
66 logs.error('%s does not conform to %s pattern.', fuzzer,
67 VALID_FUZZER_REGEX.pattern)
68 return False
69
70 # Try importing the fuzzer module.
71 module_name = 'fuzzers.{}.fuzzer'.format(fuzzer)
72 try:
73 importlib.import_module(module_name)
74 return True
75 except Exception as error: # pylint: disable=broad-except
76 logs.error('Encountered "%s" while trying to import %s.', error,
77 module_name)
78 return False
79
80
81 def get_fuzzer_configs(fuzzers=None):
82 """Returns the list of all fuzzers."""
83 fuzzers_dir = os.path.join(utils.ROOT_DIR, 'fuzzers')
84 fuzzer_configs = []
85 for fuzzer in os.listdir(fuzzers_dir):
86 if not os.path.isfile(os.path.join(fuzzers_dir, fuzzer, 'fuzzer.py')):
87 continue
88 if fuzzer == 'coverage':
89 continue
90
91 if not fuzzers or fuzzer in fuzzers:
92 # Auto-generate the default configuration for each base fuzzer.
93 fuzzer_configs.append({'fuzzer': fuzzer})
94
95 variant_config_path = os.path.join(fuzzers_dir, fuzzer, 'variants.yaml')
96 if not os.path.isfile(variant_config_path):
97 continue
98
99 variant_config = yaml_utils.read(variant_config_path)
100 assert 'variants' in variant_config, (
101 'Missing "variants" section of {}'.format(variant_config_path))
102 for variant in variant_config['variants']:
103 if not fuzzers or variant['name'] in fuzzers:
104 # Modify the config from the variants.yaml format to the
105 # format expected by a fuzzer config.
106 assert 'name' in variant, (
107 'Missing name attribute for fuzzer variant in {}'.format(
108 variant_config_path))
109 variant['variant_name'] = variant['name']
110 del variant['name']
111 variant['fuzzer'] = fuzzer
112 fuzzer_configs.append(variant)
113
114 return fuzzer_configs
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/common/fuzzer_utils.py b/common/fuzzer_utils.py
--- a/common/fuzzer_utils.py
+++ b/common/fuzzer_utils.py
@@ -20,7 +20,6 @@
from common import logs
from common import utils
-from common import yaml_utils
DEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'
FUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'
@@ -80,6 +79,10 @@
def get_fuzzer_configs(fuzzers=None):
"""Returns the list of all fuzzers."""
+ # Import it here to avoid yaml dependency in runner.
+ # pylint: disable=import-outside-toplevel
+ from common import yaml_utils
+
fuzzers_dir = os.path.join(utils.ROOT_DIR, 'fuzzers')
fuzzer_configs = []
for fuzzer in os.listdir(fuzzers_dir):
|
{"golden_diff": "diff --git a/common/fuzzer_utils.py b/common/fuzzer_utils.py\n--- a/common/fuzzer_utils.py\n+++ b/common/fuzzer_utils.py\n@@ -20,7 +20,6 @@\n \n from common import logs\n from common import utils\n-from common import yaml_utils\n \n DEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'\n FUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'\n@@ -80,6 +79,10 @@\n \n def get_fuzzer_configs(fuzzers=None):\n \"\"\"Returns the list of all fuzzers.\"\"\"\n+ # Import it here to avoid yaml dependency in runner.\n+ # pylint: disable=import-outside-toplevel\n+ from common import yaml_utils\n+\n fuzzers_dir = os.path.join(utils.ROOT_DIR, 'fuzzers')\n fuzzer_configs = []\n for fuzzer in os.listdir(fuzzers_dir):\n", "issue": "Should local docker run restrict cpu to 1 to match FuzzBench prod environment ?\nSee also\r\nhttps://github.com/google/fuzzbench/issues/173#issuecomment-605283610\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fuzzer helpers.\"\"\"\n\nimport importlib\nimport os\nimport re\nfrom typing import Optional\n\nfrom common import logs\nfrom common import utils\nfrom common import yaml_utils\n\nDEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'\nFUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'\nVALID_FUZZER_REGEX = re.compile(r'^[A-Za-z0-9_]+$')\n\n\ndef get_fuzz_target_binary(search_directory: str,\n fuzz_target_name: str) -> Optional[str]:\n \"\"\"Return target binary path.\"\"\"\n if fuzz_target_name:\n fuzz_target_binary = os.path.join(search_directory, fuzz_target_name)\n if os.path.exists(fuzz_target_binary):\n return fuzz_target_binary\n return None\n\n default_fuzz_target_binary = os.path.join(search_directory,\n DEFAULT_FUZZ_TARGET_NAME)\n if os.path.exists(default_fuzz_target_binary):\n return default_fuzz_target_binary\n\n for root, _, files in os.walk(search_directory):\n if root == 'uninstrumented':\n continue\n for filename in files:\n if filename.endswith('-uninstrumented'):\n # Skip uninstrumented binaries (e.g. with QSYM).\n continue\n\n file_path = os.path.join(root, filename)\n with open(file_path, 'rb') as file_handle:\n if FUZZ_TARGET_SEARCH_STRING in file_handle.read():\n return file_path\n\n return None\n\n\ndef validate(fuzzer):\n \"\"\"Return True if |fuzzer| is a valid fuzzbench fuzzer.\"\"\"\n # Although importing probably allows a subset of what the regex allows, use\n # the regex anyway to be safe. The regex is enforcing that the fuzzer is a\n # valid path for GCS or a linux system.\n if VALID_FUZZER_REGEX.match(fuzzer) is None:\n logs.error('%s does not conform to %s pattern.', fuzzer,\n VALID_FUZZER_REGEX.pattern)\n return False\n\n # Try importing the fuzzer module.\n module_name = 'fuzzers.{}.fuzzer'.format(fuzzer)\n try:\n importlib.import_module(module_name)\n return True\n except Exception as error: # pylint: disable=broad-except\n logs.error('Encountered \"%s\" while trying to import %s.', error,\n module_name)\n return False\n\n\ndef get_fuzzer_configs(fuzzers=None):\n \"\"\"Returns the list of all fuzzers.\"\"\"\n fuzzers_dir = os.path.join(utils.ROOT_DIR, 'fuzzers')\n fuzzer_configs = []\n for fuzzer in os.listdir(fuzzers_dir):\n if not os.path.isfile(os.path.join(fuzzers_dir, fuzzer, 'fuzzer.py')):\n continue\n if fuzzer == 'coverage':\n continue\n\n if not fuzzers or fuzzer in fuzzers:\n # Auto-generate the default configuration for each base fuzzer.\n fuzzer_configs.append({'fuzzer': fuzzer})\n\n variant_config_path = os.path.join(fuzzers_dir, fuzzer, 'variants.yaml')\n if not os.path.isfile(variant_config_path):\n continue\n\n variant_config = yaml_utils.read(variant_config_path)\n assert 'variants' in variant_config, (\n 'Missing \"variants\" section of {}'.format(variant_config_path))\n for variant in variant_config['variants']:\n if not fuzzers or variant['name'] in fuzzers:\n # Modify the config from the variants.yaml format to the\n # format expected by a fuzzer config.\n assert 'name' in variant, (\n 'Missing name attribute for fuzzer variant in {}'.format(\n variant_config_path))\n variant['variant_name'] = variant['name']\n del variant['name']\n variant['fuzzer'] = fuzzer\n fuzzer_configs.append(variant)\n\n return fuzzer_configs\n", "path": "common/fuzzer_utils.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fuzzer helpers.\"\"\"\n\nimport importlib\nimport os\nimport re\nfrom typing import Optional\n\nfrom common import logs\nfrom common import utils\n\nDEFAULT_FUZZ_TARGET_NAME = 'fuzz-target'\nFUZZ_TARGET_SEARCH_STRING = b'LLVMFuzzerTestOneInput'\nVALID_FUZZER_REGEX = re.compile(r'^[A-Za-z0-9_]+$')\n\n\ndef get_fuzz_target_binary(search_directory: str,\n fuzz_target_name: str) -> Optional[str]:\n \"\"\"Return target binary path.\"\"\"\n if fuzz_target_name:\n fuzz_target_binary = os.path.join(search_directory, fuzz_target_name)\n if os.path.exists(fuzz_target_binary):\n return fuzz_target_binary\n return None\n\n default_fuzz_target_binary = os.path.join(search_directory,\n DEFAULT_FUZZ_TARGET_NAME)\n if os.path.exists(default_fuzz_target_binary):\n return default_fuzz_target_binary\n\n for root, _, files in os.walk(search_directory):\n if root == 'uninstrumented':\n continue\n for filename in files:\n if filename.endswith('-uninstrumented'):\n # Skip uninstrumented binaries (e.g. with QSYM).\n continue\n\n file_path = os.path.join(root, filename)\n with open(file_path, 'rb') as file_handle:\n if FUZZ_TARGET_SEARCH_STRING in file_handle.read():\n return file_path\n\n return None\n\n\ndef validate(fuzzer):\n \"\"\"Return True if |fuzzer| is a valid fuzzbench fuzzer.\"\"\"\n # Although importing probably allows a subset of what the regex allows, use\n # the regex anyway to be safe. The regex is enforcing that the fuzzer is a\n # valid path for GCS or a linux system.\n if VALID_FUZZER_REGEX.match(fuzzer) is None:\n logs.error('%s does not conform to %s pattern.', fuzzer,\n VALID_FUZZER_REGEX.pattern)\n return False\n\n # Try importing the fuzzer module.\n module_name = 'fuzzers.{}.fuzzer'.format(fuzzer)\n try:\n importlib.import_module(module_name)\n return True\n except Exception as error: # pylint: disable=broad-except\n logs.error('Encountered \"%s\" while trying to import %s.', error,\n module_name)\n return False\n\n\ndef get_fuzzer_configs(fuzzers=None):\n \"\"\"Returns the list of all fuzzers.\"\"\"\n # Import it here to avoid yaml dependency in runner.\n # pylint: disable=import-outside-toplevel\n from common import yaml_utils\n\n fuzzers_dir = os.path.join(utils.ROOT_DIR, 'fuzzers')\n fuzzer_configs = []\n for fuzzer in os.listdir(fuzzers_dir):\n if not os.path.isfile(os.path.join(fuzzers_dir, fuzzer, 'fuzzer.py')):\n continue\n if fuzzer == 'coverage':\n continue\n\n if not fuzzers or fuzzer in fuzzers:\n # Auto-generate the default configuration for each base fuzzer.\n fuzzer_configs.append({'fuzzer': fuzzer})\n\n variant_config_path = os.path.join(fuzzers_dir, fuzzer, 'variants.yaml')\n if not os.path.isfile(variant_config_path):\n continue\n\n variant_config = yaml_utils.read(variant_config_path)\n assert 'variants' in variant_config, (\n 'Missing \"variants\" section of {}'.format(variant_config_path))\n for variant in variant_config['variants']:\n if not fuzzers or variant['name'] in fuzzers:\n # Modify the config from the variants.yaml format to the\n # format expected by a fuzzer config.\n assert 'name' in variant, (\n 'Missing name attribute for fuzzer variant in {}'.format(\n variant_config_path))\n variant['variant_name'] = variant['name']\n del variant['name']\n variant['fuzzer'] = fuzzer\n fuzzer_configs.append(variant)\n\n return fuzzer_configs\n", "path": "common/fuzzer_utils.py"}]}
| 1,502 | 194 |
gh_patches_debug_3667
|
rasdani/github-patches
|
git_diff
|
vega__altair-784
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix vega-embed version for Altair 1
For example in https://github.com/altair-viz/altair/blob/d4d29ca06e920f71073766c6456d387e682cee17/altair/vegalite/v1/html.py#L7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v1/html.py`
Content:
```
1 HTML_TEMPLATE = """
2 <!DOCTYPE html>
3 <html>
4 <head>
5 <script src="https://cdn.jsdelivr.net/npm/vega@2"></script>
6 <script src="https://cdn.jsdelivr.net/npm/vega-lite@1"></script>
7 <script src="https://cdn.jsdelivr.net/npm/vega-embed@3"></script>
8 </head>
9 <body>
10 <div id="vis"></div>
11 <script type="text/javascript">
12 var spec = {spec};
13 var opt = {opt};
14 vegaEmbed("#vis", spec, opt);
15 </script>
16 </body>
17 </html>
18 """
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/altair/vegalite/v1/html.py b/altair/vegalite/v1/html.py
--- a/altair/vegalite/v1/html.py
+++ b/altair/vegalite/v1/html.py
@@ -4,7 +4,7 @@
<head>
<script src="https://cdn.jsdelivr.net/npm/vega@2"></script>
<script src="https://cdn.jsdelivr.net/npm/vega-lite@1"></script>
- <script src="https://cdn.jsdelivr.net/npm/vega-embed@3"></script>
+ <script src="https://cdn.jsdelivr.net/npm/vega-embed@2"></script>
</head>
<body>
<div id="vis"></div>
|
{"golden_diff": "diff --git a/altair/vegalite/v1/html.py b/altair/vegalite/v1/html.py\n--- a/altair/vegalite/v1/html.py\n+++ b/altair/vegalite/v1/html.py\n@@ -4,7 +4,7 @@\n <head>\n <script src=\"https://cdn.jsdelivr.net/npm/vega@2\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-lite@1\"></script>\n- <script src=\"https://cdn.jsdelivr.net/npm/vega-embed@3\"></script>\n+ <script src=\"https://cdn.jsdelivr.net/npm/vega-embed@2\"></script>\n </head>\n <body>\n <div id=\"vis\"></div>\n", "issue": "Fix vega-embed version for Altair 1\nFor example in https://github.com/altair-viz/altair/blob/d4d29ca06e920f71073766c6456d387e682cee17/altair/vegalite/v1/html.py#L7\n", "before_files": [{"content": "HTML_TEMPLATE = \"\"\"\n<!DOCTYPE html>\n<html>\n<head>\n <script src=\"https://cdn.jsdelivr.net/npm/vega@2\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-lite@1\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-embed@3\"></script>\n</head>\n<body>\n <div id=\"vis\"></div>\n <script type=\"text/javascript\">\n var spec = {spec};\n var opt = {opt};\n vegaEmbed(\"#vis\", spec, opt);\n </script>\n</body>\n</html>\n\"\"\"\n", "path": "altair/vegalite/v1/html.py"}], "after_files": [{"content": "HTML_TEMPLATE = \"\"\"\n<!DOCTYPE html>\n<html>\n<head>\n <script src=\"https://cdn.jsdelivr.net/npm/vega@2\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-lite@1\"></script>\n <script src=\"https://cdn.jsdelivr.net/npm/vega-embed@2\"></script>\n</head>\n<body>\n <div id=\"vis\"></div>\n <script type=\"text/javascript\">\n var spec = {spec};\n var opt = {opt};\n vegaEmbed(\"#vis\", spec, opt);\n </script>\n</body>\n</html>\n\"\"\"\n", "path": "altair/vegalite/v1/html.py"}]}
| 506 | 167 |
gh_patches_debug_37774
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-2303
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to built docker pi image on raspberry pi 3
Hi,
I don't succeed building the image on a rasberry pi 3.
OS "Raspbian GNU/Linux 9 (stretch)"
```
> pi@GP-RB-FRONT:~/freqtrade/freqtrade $ docker build . -f Dockerfile.pi -t freqtrade
> Sending build context to Docker daemon 19.51MB
> Step 1/15 : FROM balenalib/raspberrypi3-debian:stretch
> ---> 72b8e4ae9043
> Step 2/15 : RUN [ "cross-build-start" ]
> ---> Running in 66dfc8472a92
> standard_init_linux.go:211: exec user process caused "exec format error"
> The command 'cross-build-start' returned a non-zero code: 1
```
Have a nice day.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from sys import version_info
2 from setuptools import setup
3
4 if version_info.major == 3 and version_info.minor < 6 or \
5 version_info.major < 3:
6 print('Your Python interpreter must be 3.6 or greater!')
7 exit(1)
8
9 from pathlib import Path # noqa: E402
10 from freqtrade import __version__ # noqa: E402
11
12
13 readme_file = Path(__file__).parent / "README.md"
14 readme_long = "Crypto Trading Bot"
15 if readme_file.is_file():
16 readme_long = (Path(__file__).parent / "README.md").read_text()
17
18 # Requirements used for submodules
19 api = ['flask']
20 plot = ['plotly>=4.0']
21
22 develop = [
23 'coveralls',
24 'flake8',
25 'flake8-type-annotations',
26 'flake8-tidy-imports',
27 'mypy',
28 'pytest',
29 'pytest-asyncio',
30 'pytest-cov',
31 'pytest-mock',
32 'pytest-random-order',
33 ]
34
35 jupyter = [
36 'jupyter',
37 'nbstripout',
38 'ipykernel',
39 ]
40
41 all_extra = api + plot + develop + jupyter
42
43 setup(name='freqtrade',
44 version=__version__,
45 description='Crypto Trading Bot',
46 long_description=readme_long,
47 long_description_content_type="text/markdown",
48 url='https://github.com/freqtrade/freqtrade',
49 author='Freqtrade Team',
50 author_email='[email protected]',
51 license='GPLv3',
52 packages=['freqtrade'],
53 setup_requires=['pytest-runner', 'numpy'],
54 tests_require=['pytest', 'pytest-mock', 'pytest-cov'],
55 install_requires=[
56 # from requirements-common.txt
57 'ccxt>=1.18.1080',
58 'SQLAlchemy',
59 'python-telegram-bot',
60 'arrow',
61 'cachetools',
62 'requests',
63 'urllib3',
64 'wrapt',
65 'scikit-learn',
66 'joblib',
67 'jsonschema',
68 'TA-Lib',
69 'tabulate',
70 'coinmarketcap',
71 'scikit-optimize',
72 'filelock',
73 'py_find_1st',
74 'python-rapidjson',
75 'sdnotify',
76 'colorama',
77 # from requirements.txt
78 'numpy',
79 'pandas',
80 'scipy',
81 ],
82 extras_require={
83 'api': api,
84 'dev': all_extra,
85 'plot': plot,
86 'all': all_extra,
87 'jupyter': jupyter,
88
89 },
90 include_package_data=True,
91 zip_safe=False,
92 entry_points={
93 'console_scripts': [
94 'freqtrade = freqtrade.main:main',
95 ],
96 },
97 classifiers=[
98 'Programming Language :: Python :: 3.6',
99 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
100 'Topic :: Office/Business :: Financial :: Investment',
101 'Intended Audience :: Science/Research',
102 ])
103
```
Path: `freqtrade/optimize/__init__.py`
Content:
```
1 import logging
2 from typing import Any, Dict
3
4 from filelock import FileLock, Timeout
5
6 from freqtrade import DependencyException, constants
7 from freqtrade.state import RunMode
8 from freqtrade.utils import setup_utils_configuration
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def setup_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:
15 """
16 Prepare the configuration for the Hyperopt module
17 :param args: Cli args from Arguments()
18 :return: Configuration
19 """
20 config = setup_utils_configuration(args, method)
21
22 if method == RunMode.BACKTEST:
23 if config['stake_amount'] == constants.UNLIMITED_STAKE_AMOUNT:
24 raise DependencyException('stake amount could not be "%s" for backtesting' %
25 constants.UNLIMITED_STAKE_AMOUNT)
26
27 return config
28
29
30 def start_backtesting(args: Dict[str, Any]) -> None:
31 """
32 Start Backtesting script
33 :param args: Cli args from Arguments()
34 :return: None
35 """
36 # Import here to avoid loading backtesting module when it's not used
37 from freqtrade.optimize.backtesting import Backtesting
38
39 # Initialize configuration
40 config = setup_configuration(args, RunMode.BACKTEST)
41
42 logger.info('Starting freqtrade in Backtesting mode')
43
44 # Initialize backtesting object
45 backtesting = Backtesting(config)
46 backtesting.start()
47
48
49 def start_hyperopt(args: Dict[str, Any]) -> None:
50 """
51 Start hyperopt script
52 :param args: Cli args from Arguments()
53 :return: None
54 """
55 # Import here to avoid loading hyperopt module when it's not used
56 from freqtrade.optimize.hyperopt import Hyperopt
57
58 # Initialize configuration
59 config = setup_configuration(args, RunMode.HYPEROPT)
60
61 logger.info('Starting freqtrade in Hyperopt mode')
62
63 lock = FileLock(Hyperopt.get_lock_filename(config))
64
65 try:
66 with lock.acquire(timeout=1):
67
68 # Remove noisy log messages
69 logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)
70 logging.getLogger('filelock').setLevel(logging.WARNING)
71
72 # Initialize backtesting object
73 hyperopt = Hyperopt(config)
74 hyperopt.start()
75
76 except Timeout:
77 logger.info("Another running instance of freqtrade Hyperopt detected.")
78 logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. "
79 "Hyperopt module is resource hungry. Please run your Hyperopts sequentially "
80 "or on separate machines.")
81 logger.info("Quitting now.")
82 # TODO: return False here in order to help freqtrade to exit
83 # with non-zero exit code...
84 # Same in Edge and Backtesting start() functions.
85
86
87 def start_edge(args: Dict[str, Any]) -> None:
88 """
89 Start Edge script
90 :param args: Cli args from Arguments()
91 :return: None
92 """
93 from freqtrade.optimize.edge_cli import EdgeCli
94 # Initialize configuration
95 config = setup_configuration(args, RunMode.EDGE)
96 logger.info('Starting freqtrade in Edge mode')
97
98 # Initialize Edge object
99 edge_cli = EdgeCli(config)
100 edge_cli.start()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/freqtrade/optimize/__init__.py b/freqtrade/optimize/__init__.py
--- a/freqtrade/optimize/__init__.py
+++ b/freqtrade/optimize/__init__.py
@@ -1,9 +1,7 @@
import logging
from typing import Any, Dict
-from filelock import FileLock, Timeout
-
-from freqtrade import DependencyException, constants
+from freqtrade import DependencyException, constants, OperationalException
from freqtrade.state import RunMode
from freqtrade.utils import setup_utils_configuration
@@ -53,8 +51,12 @@
:return: None
"""
# Import here to avoid loading hyperopt module when it's not used
- from freqtrade.optimize.hyperopt import Hyperopt
-
+ try:
+ from filelock import FileLock, Timeout
+ from freqtrade.optimize.hyperopt import Hyperopt
+ except ImportError as e:
+ raise OperationalException(
+ f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
# Initialize configuration
config = setup_configuration(args, RunMode.HYPEROPT)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,13 @@
# Requirements used for submodules
api = ['flask']
plot = ['plotly>=4.0']
+hyperopt = [
+ 'scipy',
+ 'scikit-learn',
+ 'scikit-optimize',
+ 'filelock',
+ 'joblib',
+ ]
develop = [
'coveralls',
@@ -38,7 +45,7 @@
'ipykernel',
]
-all_extra = api + plot + develop + jupyter
+all_extra = api + plot + develop + jupyter + hyperopt
setup(name='freqtrade',
version=__version__,
@@ -62,14 +69,10 @@
'requests',
'urllib3',
'wrapt',
- 'scikit-learn',
- 'joblib',
'jsonschema',
'TA-Lib',
'tabulate',
'coinmarketcap',
- 'scikit-optimize',
- 'filelock',
'py_find_1st',
'python-rapidjson',
'sdnotify',
@@ -77,15 +80,14 @@
# from requirements.txt
'numpy',
'pandas',
- 'scipy',
],
extras_require={
'api': api,
'dev': all_extra,
'plot': plot,
- 'all': all_extra,
'jupyter': jupyter,
-
+ 'hyperopt': hyperopt,
+ 'all': all_extra,
},
include_package_data=True,
zip_safe=False,
|
{"golden_diff": "diff --git a/freqtrade/optimize/__init__.py b/freqtrade/optimize/__init__.py\n--- a/freqtrade/optimize/__init__.py\n+++ b/freqtrade/optimize/__init__.py\n@@ -1,9 +1,7 @@\n import logging\n from typing import Any, Dict\n \n-from filelock import FileLock, Timeout\n-\n-from freqtrade import DependencyException, constants\n+from freqtrade import DependencyException, constants, OperationalException\n from freqtrade.state import RunMode\n from freqtrade.utils import setup_utils_configuration\n \n@@ -53,8 +51,12 @@\n :return: None\n \"\"\"\n # Import here to avoid loading hyperopt module when it's not used\n- from freqtrade.optimize.hyperopt import Hyperopt\n-\n+ try:\n+ from filelock import FileLock, Timeout\n+ from freqtrade.optimize.hyperopt import Hyperopt\n+ except ImportError as e:\n+ raise OperationalException(\n+ f\"{e}. Please ensure that the hyperopt dependencies are installed.\") from e\n # Initialize configuration\n config = setup_configuration(args, RunMode.HYPEROPT)\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,6 +18,13 @@\n # Requirements used for submodules\n api = ['flask']\n plot = ['plotly>=4.0']\n+hyperopt = [\n+ 'scipy',\n+ 'scikit-learn',\n+ 'scikit-optimize',\n+ 'filelock',\n+ 'joblib',\n+ ]\n \n develop = [\n 'coveralls',\n@@ -38,7 +45,7 @@\n 'ipykernel',\n ]\n \n-all_extra = api + plot + develop + jupyter\n+all_extra = api + plot + develop + jupyter + hyperopt\n \n setup(name='freqtrade',\n version=__version__,\n@@ -62,14 +69,10 @@\n 'requests',\n 'urllib3',\n 'wrapt',\n- 'scikit-learn',\n- 'joblib',\n 'jsonschema',\n 'TA-Lib',\n 'tabulate',\n 'coinmarketcap',\n- 'scikit-optimize',\n- 'filelock',\n 'py_find_1st',\n 'python-rapidjson',\n 'sdnotify',\n@@ -77,15 +80,14 @@\n # from requirements.txt\n 'numpy',\n 'pandas',\n- 'scipy',\n ],\n extras_require={\n 'api': api,\n 'dev': all_extra,\n 'plot': plot,\n- 'all': all_extra,\n 'jupyter': jupyter,\n-\n+ 'hyperopt': hyperopt,\n+ 'all': all_extra,\n },\n include_package_data=True,\n zip_safe=False,\n", "issue": "Unable to built docker pi image on raspberry pi 3\nHi, \r\n\r\nI don't succeed building the image on a rasberry pi 3.\r\n\r\nOS \"Raspbian GNU/Linux 9 (stretch)\"\r\n\r\n```\r\n> pi@GP-RB-FRONT:~/freqtrade/freqtrade $ docker build . -f Dockerfile.pi -t freqtrade\r\n> Sending build context to Docker daemon 19.51MB\r\n> Step 1/15 : FROM balenalib/raspberrypi3-debian:stretch\r\n> ---> 72b8e4ae9043\r\n> Step 2/15 : RUN [ \"cross-build-start\" ]\r\n> ---> Running in 66dfc8472a92\r\n> standard_init_linux.go:211: exec user process caused \"exec format error\"\r\n> The command 'cross-build-start' returned a non-zero code: 1\r\n```\r\n\r\nHave a nice day.\r\n\n", "before_files": [{"content": "from sys import version_info\nfrom setuptools import setup\n\nif version_info.major == 3 and version_info.minor < 6 or \\\n version_info.major < 3:\n print('Your Python interpreter must be 3.6 or greater!')\n exit(1)\n\nfrom pathlib import Path # noqa: E402\nfrom freqtrade import __version__ # noqa: E402\n\n\nreadme_file = Path(__file__).parent / \"README.md\"\nreadme_long = \"Crypto Trading Bot\"\nif readme_file.is_file():\n readme_long = (Path(__file__).parent / \"README.md\").read_text()\n\n# Requirements used for submodules\napi = ['flask']\nplot = ['plotly>=4.0']\n\ndevelop = [\n 'coveralls',\n 'flake8',\n 'flake8-type-annotations',\n 'flake8-tidy-imports',\n 'mypy',\n 'pytest',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-mock',\n 'pytest-random-order',\n]\n\njupyter = [\n 'jupyter',\n 'nbstripout',\n 'ipykernel',\n ]\n\nall_extra = api + plot + develop + jupyter\n\nsetup(name='freqtrade',\n version=__version__,\n description='Crypto Trading Bot',\n long_description=readme_long,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/freqtrade/freqtrade',\n author='Freqtrade Team',\n author_email='[email protected]',\n license='GPLv3',\n packages=['freqtrade'],\n setup_requires=['pytest-runner', 'numpy'],\n tests_require=['pytest', 'pytest-mock', 'pytest-cov'],\n install_requires=[\n # from requirements-common.txt\n 'ccxt>=1.18.1080',\n 'SQLAlchemy',\n 'python-telegram-bot',\n 'arrow',\n 'cachetools',\n 'requests',\n 'urllib3',\n 'wrapt',\n 'scikit-learn',\n 'joblib',\n 'jsonschema',\n 'TA-Lib',\n 'tabulate',\n 'coinmarketcap',\n 'scikit-optimize',\n 'filelock',\n 'py_find_1st',\n 'python-rapidjson',\n 'sdnotify',\n 'colorama',\n # from requirements.txt\n 'numpy',\n 'pandas',\n 'scipy',\n ],\n extras_require={\n 'api': api,\n 'dev': all_extra,\n 'plot': plot,\n 'all': all_extra,\n 'jupyter': jupyter,\n\n },\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'freqtrade = freqtrade.main:main',\n ],\n },\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Intended Audience :: Science/Research',\n ])\n", "path": "setup.py"}, {"content": "import logging\nfrom typing import Any, Dict\n\nfrom filelock import FileLock, Timeout\n\nfrom freqtrade import DependencyException, constants\nfrom freqtrade.state import RunMode\nfrom freqtrade.utils import setup_utils_configuration\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef setup_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:\n \"\"\"\n Prepare the configuration for the Hyperopt module\n :param args: Cli args from Arguments()\n :return: Configuration\n \"\"\"\n config = setup_utils_configuration(args, method)\n\n if method == RunMode.BACKTEST:\n if config['stake_amount'] == constants.UNLIMITED_STAKE_AMOUNT:\n raise DependencyException('stake amount could not be \"%s\" for backtesting' %\n constants.UNLIMITED_STAKE_AMOUNT)\n\n return config\n\n\ndef start_backtesting(args: Dict[str, Any]) -> None:\n \"\"\"\n Start Backtesting script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n # Import here to avoid loading backtesting module when it's not used\n from freqtrade.optimize.backtesting import Backtesting\n\n # Initialize configuration\n config = setup_configuration(args, RunMode.BACKTEST)\n\n logger.info('Starting freqtrade in Backtesting mode')\n\n # Initialize backtesting object\n backtesting = Backtesting(config)\n backtesting.start()\n\n\ndef start_hyperopt(args: Dict[str, Any]) -> None:\n \"\"\"\n Start hyperopt script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n # Import here to avoid loading hyperopt module when it's not used\n from freqtrade.optimize.hyperopt import Hyperopt\n\n # Initialize configuration\n config = setup_configuration(args, RunMode.HYPEROPT)\n\n logger.info('Starting freqtrade in Hyperopt mode')\n\n lock = FileLock(Hyperopt.get_lock_filename(config))\n\n try:\n with lock.acquire(timeout=1):\n\n # Remove noisy log messages\n logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)\n logging.getLogger('filelock').setLevel(logging.WARNING)\n\n # Initialize backtesting object\n hyperopt = Hyperopt(config)\n hyperopt.start()\n\n except Timeout:\n logger.info(\"Another running instance of freqtrade Hyperopt detected.\")\n logger.info(\"Simultaneous execution of multiple Hyperopt commands is not supported. \"\n \"Hyperopt module is resource hungry. Please run your Hyperopts sequentially \"\n \"or on separate machines.\")\n logger.info(\"Quitting now.\")\n # TODO: return False here in order to help freqtrade to exit\n # with non-zero exit code...\n # Same in Edge and Backtesting start() functions.\n\n\ndef start_edge(args: Dict[str, Any]) -> None:\n \"\"\"\n Start Edge script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n from freqtrade.optimize.edge_cli import EdgeCli\n # Initialize configuration\n config = setup_configuration(args, RunMode.EDGE)\n logger.info('Starting freqtrade in Edge mode')\n\n # Initialize Edge object\n edge_cli = EdgeCli(config)\n edge_cli.start()\n", "path": "freqtrade/optimize/__init__.py"}], "after_files": [{"content": "from sys import version_info\nfrom setuptools import setup\n\nif version_info.major == 3 and version_info.minor < 6 or \\\n version_info.major < 3:\n print('Your Python interpreter must be 3.6 or greater!')\n exit(1)\n\nfrom pathlib import Path # noqa: E402\nfrom freqtrade import __version__ # noqa: E402\n\n\nreadme_file = Path(__file__).parent / \"README.md\"\nreadme_long = \"Crypto Trading Bot\"\nif readme_file.is_file():\n readme_long = (Path(__file__).parent / \"README.md\").read_text()\n\n# Requirements used for submodules\napi = ['flask']\nplot = ['plotly>=4.0']\nhyperopt = [\n 'scipy',\n 'scikit-learn',\n 'scikit-optimize',\n 'filelock',\n 'joblib',\n ]\n\ndevelop = [\n 'coveralls',\n 'flake8',\n 'flake8-type-annotations',\n 'flake8-tidy-imports',\n 'mypy',\n 'pytest',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-mock',\n 'pytest-random-order',\n]\n\njupyter = [\n 'jupyter',\n 'nbstripout',\n 'ipykernel',\n ]\n\nall_extra = api + plot + develop + jupyter + hyperopt\n\nsetup(name='freqtrade',\n version=__version__,\n description='Crypto Trading Bot',\n long_description=readme_long,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/freqtrade/freqtrade',\n author='Freqtrade Team',\n author_email='[email protected]',\n license='GPLv3',\n packages=['freqtrade'],\n setup_requires=['pytest-runner', 'numpy'],\n tests_require=['pytest', 'pytest-mock', 'pytest-cov'],\n install_requires=[\n # from requirements-common.txt\n 'ccxt>=1.18.1080',\n 'SQLAlchemy',\n 'python-telegram-bot',\n 'arrow',\n 'cachetools',\n 'requests',\n 'urllib3',\n 'wrapt',\n 'jsonschema',\n 'TA-Lib',\n 'tabulate',\n 'coinmarketcap',\n 'py_find_1st',\n 'python-rapidjson',\n 'sdnotify',\n 'colorama',\n # from requirements.txt\n 'numpy',\n 'pandas',\n ],\n extras_require={\n 'api': api,\n 'dev': all_extra,\n 'plot': plot,\n 'jupyter': jupyter,\n 'hyperopt': hyperopt,\n 'all': all_extra,\n },\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'freqtrade = freqtrade.main:main',\n ],\n },\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Intended Audience :: Science/Research',\n ])\n", "path": "setup.py"}, {"content": "import logging\nfrom typing import Any, Dict\n\nfrom freqtrade import DependencyException, constants, OperationalException\nfrom freqtrade.state import RunMode\nfrom freqtrade.utils import setup_utils_configuration\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef setup_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:\n \"\"\"\n Prepare the configuration for the Hyperopt module\n :param args: Cli args from Arguments()\n :return: Configuration\n \"\"\"\n config = setup_utils_configuration(args, method)\n\n if method == RunMode.BACKTEST:\n if config['stake_amount'] == constants.UNLIMITED_STAKE_AMOUNT:\n raise DependencyException('stake amount could not be \"%s\" for backtesting' %\n constants.UNLIMITED_STAKE_AMOUNT)\n\n return config\n\n\ndef start_backtesting(args: Dict[str, Any]) -> None:\n \"\"\"\n Start Backtesting script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n # Import here to avoid loading backtesting module when it's not used\n from freqtrade.optimize.backtesting import Backtesting\n\n # Initialize configuration\n config = setup_configuration(args, RunMode.BACKTEST)\n\n logger.info('Starting freqtrade in Backtesting mode')\n\n # Initialize backtesting object\n backtesting = Backtesting(config)\n backtesting.start()\n\n\ndef start_hyperopt(args: Dict[str, Any]) -> None:\n \"\"\"\n Start hyperopt script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n # Import here to avoid loading hyperopt module when it's not used\n try:\n from filelock import FileLock, Timeout\n from freqtrade.optimize.hyperopt import Hyperopt\n except ImportError as e:\n raise OperationalException(\n f\"{e}. Please ensure that the hyperopt dependencies are installed.\") from e\n # Initialize configuration\n config = setup_configuration(args, RunMode.HYPEROPT)\n\n logger.info('Starting freqtrade in Hyperopt mode')\n\n lock = FileLock(Hyperopt.get_lock_filename(config))\n\n try:\n with lock.acquire(timeout=1):\n\n # Remove noisy log messages\n logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)\n logging.getLogger('filelock').setLevel(logging.WARNING)\n\n # Initialize backtesting object\n hyperopt = Hyperopt(config)\n hyperopt.start()\n\n except Timeout:\n logger.info(\"Another running instance of freqtrade Hyperopt detected.\")\n logger.info(\"Simultaneous execution of multiple Hyperopt commands is not supported. \"\n \"Hyperopt module is resource hungry. Please run your Hyperopts sequentially \"\n \"or on separate machines.\")\n logger.info(\"Quitting now.\")\n # TODO: return False here in order to help freqtrade to exit\n # with non-zero exit code...\n # Same in Edge and Backtesting start() functions.\n\n\ndef start_edge(args: Dict[str, Any]) -> None:\n \"\"\"\n Start Edge script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n from freqtrade.optimize.edge_cli import EdgeCli\n # Initialize configuration\n config = setup_configuration(args, RunMode.EDGE)\n logger.info('Starting freqtrade in Edge mode')\n\n # Initialize Edge object\n edge_cli = EdgeCli(config)\n edge_cli.start()\n", "path": "freqtrade/optimize/__init__.py"}]}
| 2,228 | 620 |
gh_patches_debug_10379
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-3115
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snowflake materialization fails on feature views with dummy entity
## Expected Behavior
Same as feature views with entities
## Current Behavior
Materialization query errors, query has duplicate column names caused by a dummy entity having multiple dummy join keys
## Steps to reproduce
### Specifications
- Version:
- Platform:
- Subsystem:
## Possible Solution
even if infer features is ran more than once, only update the dummy entity join key once
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/inference.py`
Content:
```
1 import re
2 from typing import List, Set, Union
3
4 from feast.data_source import DataSource, PushSource, RequestSource
5 from feast.entity import Entity
6 from feast.errors import RegistryInferenceFailure
7 from feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_NAME, FeatureView
8 from feast.field import Field, from_value_type
9 from feast.infra.offline_stores.bigquery_source import BigQuerySource
10 from feast.infra.offline_stores.contrib.mssql_offline_store.mssqlserver_source import (
11 MsSqlServerSource,
12 )
13 from feast.infra.offline_stores.file_source import FileSource
14 from feast.infra.offline_stores.redshift_source import RedshiftSource
15 from feast.infra.offline_stores.snowflake_source import SnowflakeSource
16 from feast.repo_config import RepoConfig
17 from feast.stream_feature_view import StreamFeatureView
18 from feast.types import String
19 from feast.value_type import ValueType
20
21
22 def update_data_sources_with_inferred_event_timestamp_col(
23 data_sources: List[DataSource], config: RepoConfig
24 ) -> None:
25 ERROR_MSG_PREFIX = "Unable to infer DataSource timestamp_field"
26 for data_source in data_sources:
27 if isinstance(data_source, RequestSource):
28 continue
29 if isinstance(data_source, PushSource):
30 data_source = data_source.batch_source
31 if data_source.timestamp_field is None or data_source.timestamp_field == "":
32 # prepare right match pattern for data source
33 ts_column_type_regex_pattern: str
34 # TODO(adchia): Move Spark source inference out of this logic
35 if (
36 isinstance(data_source, FileSource)
37 or "SparkSource" == data_source.__class__.__name__
38 ):
39 ts_column_type_regex_pattern = r"^timestamp"
40 elif isinstance(data_source, BigQuerySource):
41 ts_column_type_regex_pattern = "TIMESTAMP|DATETIME"
42 elif isinstance(data_source, RedshiftSource):
43 ts_column_type_regex_pattern = "TIMESTAMP[A-Z]*"
44 elif isinstance(data_source, SnowflakeSource):
45 ts_column_type_regex_pattern = "TIMESTAMP_[A-Z]*"
46 elif isinstance(data_source, MsSqlServerSource):
47 ts_column_type_regex_pattern = "TIMESTAMP|DATETIME"
48 else:
49 raise RegistryInferenceFailure(
50 "DataSource",
51 f"""
52 DataSource inferencing of timestamp_field is currently only supported
53 for FileSource, SparkSource, BigQuerySource, RedshiftSource, SnowflakeSource, MsSqlSource.
54 Attempting to infer from {data_source}.
55 """,
56 )
57 # for informing the type checker
58 assert (
59 isinstance(data_source, FileSource)
60 or isinstance(data_source, BigQuerySource)
61 or isinstance(data_source, RedshiftSource)
62 or isinstance(data_source, SnowflakeSource)
63 or isinstance(data_source, MsSqlServerSource)
64 or "SparkSource" == data_source.__class__.__name__
65 )
66
67 # loop through table columns to find singular match
68 timestamp_fields = []
69 for (
70 col_name,
71 col_datatype,
72 ) in data_source.get_table_column_names_and_types(config):
73 if re.match(ts_column_type_regex_pattern, col_datatype):
74 timestamp_fields.append(col_name)
75
76 if len(timestamp_fields) > 1:
77 raise RegistryInferenceFailure(
78 "DataSource",
79 f"""{ERROR_MSG_PREFIX}; found multiple possible columns of timestamp type.
80 Data source type: {data_source.__class__.__name__},
81 Timestamp regex: `{ts_column_type_regex_pattern}`, columns: {timestamp_fields}""",
82 )
83 elif len(timestamp_fields) == 1:
84 data_source.timestamp_field = timestamp_fields[0]
85 else:
86 raise RegistryInferenceFailure(
87 "DataSource",
88 f"""
89 {ERROR_MSG_PREFIX}; Found no columns of timestamp type.
90 Data source type: {data_source.__class__.__name__},
91 Timestamp regex: `{ts_column_type_regex_pattern}`.
92 """,
93 )
94
95
96 def update_feature_views_with_inferred_features_and_entities(
97 fvs: Union[List[FeatureView], List[StreamFeatureView]],
98 entities: List[Entity],
99 config: RepoConfig,
100 ) -> None:
101 """
102 Infers the features and entities associated with each feature view and updates it in place.
103
104 Columns whose names match a join key of an entity are considered to be entity columns; all
105 other columns except designated timestamp columns are considered to be feature columns. If
106 the feature view already has features, feature inference is skipped.
107
108 Note that this inference logic currently does not take any transformations (either a UDF or
109 aggregations) into account. For example, even if a stream feature view has a transformation,
110 this method assumes that the batch source contains transformed data with the correct final schema.
111
112 Args:
113 fvs: The feature views to be updated.
114 entities: A list containing entities associated with the feature views.
115 config: The config for the current feature store.
116 """
117 entity_name_to_entity_map = {e.name: e for e in entities}
118 entity_name_to_join_keys_map = {e.name: e.join_keys for e in entities}
119
120 for fv in fvs:
121 join_keys = set(
122 [
123 join_key
124 for entity_name in fv.entities
125 for join_key in entity_name_to_join_keys_map[entity_name]
126 ]
127 )
128
129 # Fields whose names match a join key are considered to be entity columns; all
130 # other fields are considered to be feature columns.
131 for field in fv.schema:
132 if field.name in join_keys:
133 # Do not override a preexisting field with the same name.
134 if field.name not in [
135 entity_column.name for entity_column in fv.entity_columns
136 ]:
137 fv.entity_columns.append(field)
138 else:
139 if field.name not in [feature.name for feature in fv.features]:
140 fv.features.append(field)
141
142 # Since the `value_type` parameter has not yet been fully deprecated for
143 # entities, we respect the `value_type` attribute if it still exists.
144 for entity_name in fv.entities:
145 entity = entity_name_to_entity_map[entity_name]
146 if (
147 entity.join_key
148 not in [entity_column.name for entity_column in fv.entity_columns]
149 and entity.value_type != ValueType.UNKNOWN
150 ):
151 fv.entity_columns.append(
152 Field(
153 name=entity.join_key,
154 dtype=from_value_type(entity.value_type),
155 )
156 )
157
158 # Infer a dummy entity column for entityless feature views.
159 if len(fv.entities) == 1 and fv.entities[0] == DUMMY_ENTITY_NAME:
160 fv.entity_columns.append(Field(name=DUMMY_ENTITY_ID, dtype=String))
161
162 # Run inference for entity columns if there are fewer entity fields than expected.
163 num_expected_join_keys = sum(
164 [
165 len(entity_name_to_join_keys_map[entity_name])
166 for entity_name in fv.entities
167 ]
168 )
169 run_inference_for_entities = len(fv.entity_columns) < num_expected_join_keys
170
171 # Run inference for feature columns if there are no feature fields.
172 run_inference_for_features = len(fv.features) == 0
173
174 if run_inference_for_entities or run_inference_for_features:
175 _infer_features_and_entities(
176 fv,
177 join_keys,
178 run_inference_for_features,
179 config,
180 )
181
182 if not fv.features:
183 raise RegistryInferenceFailure(
184 "FeatureView",
185 f"Could not infer Features for the FeatureView named {fv.name}.",
186 )
187
188
189 def _infer_features_and_entities(
190 fv: FeatureView,
191 join_keys: Set[str],
192 run_inference_for_features,
193 config,
194 ) -> None:
195 """
196 Updates the specific feature in place with inferred features and entities.
197
198 Args:
199 fv: The feature view on which to run inference.
200 join_keys: The set of join keys for the feature view's entities.
201 run_inference_for_features: Whether to run inference for features.
202 config: The config for the current feature store.
203 """
204 columns_to_exclude = {
205 fv.batch_source.timestamp_field,
206 fv.batch_source.created_timestamp_column,
207 }
208 for original_col, mapped_col in fv.batch_source.field_mapping.items():
209 if mapped_col in columns_to_exclude:
210 columns_to_exclude.remove(mapped_col)
211 columns_to_exclude.add(original_col)
212
213 table_column_names_and_types = fv.batch_source.get_table_column_names_and_types(
214 config
215 )
216
217 for col_name, col_datatype in table_column_names_and_types:
218 if col_name in columns_to_exclude:
219 continue
220 elif col_name in join_keys:
221 field = Field(
222 name=col_name,
223 dtype=from_value_type(
224 fv.batch_source.source_datatype_to_feast_value_type()(col_datatype)
225 ),
226 )
227 if field.name not in [
228 entity_column.name for entity_column in fv.entity_columns
229 ]:
230 fv.entity_columns.append(field)
231 elif not re.match(
232 "^__|__$", col_name
233 ): # double underscores often signal an internal-use column
234 if run_inference_for_features:
235 feature_name = (
236 fv.batch_source.field_mapping[col_name]
237 if col_name in fv.batch_source.field_mapping
238 else col_name
239 )
240 field = Field(
241 name=feature_name,
242 dtype=from_value_type(
243 fv.batch_source.source_datatype_to_feast_value_type()(
244 col_datatype
245 )
246 ),
247 )
248 if field.name not in [feature.name for feature in fv.features]:
249 fv.features.append(field)
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/feast/inference.py b/sdk/python/feast/inference.py
--- a/sdk/python/feast/inference.py
+++ b/sdk/python/feast/inference.py
@@ -156,7 +156,11 @@
)
# Infer a dummy entity column for entityless feature views.
- if len(fv.entities) == 1 and fv.entities[0] == DUMMY_ENTITY_NAME:
+ if (
+ len(fv.entities) == 1
+ and fv.entities[0] == DUMMY_ENTITY_NAME
+ and not fv.entity_columns
+ ):
fv.entity_columns.append(Field(name=DUMMY_ENTITY_ID, dtype=String))
# Run inference for entity columns if there are fewer entity fields than expected.
|
{"golden_diff": "diff --git a/sdk/python/feast/inference.py b/sdk/python/feast/inference.py\n--- a/sdk/python/feast/inference.py\n+++ b/sdk/python/feast/inference.py\n@@ -156,7 +156,11 @@\n )\n \n # Infer a dummy entity column for entityless feature views.\n- if len(fv.entities) == 1 and fv.entities[0] == DUMMY_ENTITY_NAME:\n+ if (\n+ len(fv.entities) == 1\n+ and fv.entities[0] == DUMMY_ENTITY_NAME\n+ and not fv.entity_columns\n+ ):\n fv.entity_columns.append(Field(name=DUMMY_ENTITY_ID, dtype=String))\n \n # Run inference for entity columns if there are fewer entity fields than expected.\n", "issue": "Snowflake materialization fails on feature views with dummy entity\n## Expected Behavior \r\n\r\nSame as feature views with entities\r\n\r\n## Current Behavior\r\n\r\nMaterialization query errors, query has duplicate column names caused by a dummy entity having multiple dummy join keys\r\n\r\n## Steps to reproduce\r\n\r\n### Specifications\r\n\r\n- Version:\r\n- Platform:\r\n- Subsystem:\r\n\r\n## Possible Solution\r\n\r\neven if infer features is ran more than once, only update the dummy entity join key once\r\n\n", "before_files": [{"content": "import re\nfrom typing import List, Set, Union\n\nfrom feast.data_source import DataSource, PushSource, RequestSource\nfrom feast.entity import Entity\nfrom feast.errors import RegistryInferenceFailure\nfrom feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_NAME, FeatureView\nfrom feast.field import Field, from_value_type\nfrom feast.infra.offline_stores.bigquery_source import BigQuerySource\nfrom feast.infra.offline_stores.contrib.mssql_offline_store.mssqlserver_source import (\n MsSqlServerSource,\n)\nfrom feast.infra.offline_stores.file_source import FileSource\nfrom feast.infra.offline_stores.redshift_source import RedshiftSource\nfrom feast.infra.offline_stores.snowflake_source import SnowflakeSource\nfrom feast.repo_config import RepoConfig\nfrom feast.stream_feature_view import StreamFeatureView\nfrom feast.types import String\nfrom feast.value_type import ValueType\n\n\ndef update_data_sources_with_inferred_event_timestamp_col(\n data_sources: List[DataSource], config: RepoConfig\n) -> None:\n ERROR_MSG_PREFIX = \"Unable to infer DataSource timestamp_field\"\n for data_source in data_sources:\n if isinstance(data_source, RequestSource):\n continue\n if isinstance(data_source, PushSource):\n data_source = data_source.batch_source\n if data_source.timestamp_field is None or data_source.timestamp_field == \"\":\n # prepare right match pattern for data source\n ts_column_type_regex_pattern: str\n # TODO(adchia): Move Spark source inference out of this logic\n if (\n isinstance(data_source, FileSource)\n or \"SparkSource\" == data_source.__class__.__name__\n ):\n ts_column_type_regex_pattern = r\"^timestamp\"\n elif isinstance(data_source, BigQuerySource):\n ts_column_type_regex_pattern = \"TIMESTAMP|DATETIME\"\n elif isinstance(data_source, RedshiftSource):\n ts_column_type_regex_pattern = \"TIMESTAMP[A-Z]*\"\n elif isinstance(data_source, SnowflakeSource):\n ts_column_type_regex_pattern = \"TIMESTAMP_[A-Z]*\"\n elif isinstance(data_source, MsSqlServerSource):\n ts_column_type_regex_pattern = \"TIMESTAMP|DATETIME\"\n else:\n raise RegistryInferenceFailure(\n \"DataSource\",\n f\"\"\"\n DataSource inferencing of timestamp_field is currently only supported\n for FileSource, SparkSource, BigQuerySource, RedshiftSource, SnowflakeSource, MsSqlSource.\n Attempting to infer from {data_source}.\n \"\"\",\n )\n # for informing the type checker\n assert (\n isinstance(data_source, FileSource)\n or isinstance(data_source, BigQuerySource)\n or isinstance(data_source, RedshiftSource)\n or isinstance(data_source, SnowflakeSource)\n or isinstance(data_source, MsSqlServerSource)\n or \"SparkSource\" == data_source.__class__.__name__\n )\n\n # loop through table columns to find singular match\n timestamp_fields = []\n for (\n col_name,\n col_datatype,\n ) in data_source.get_table_column_names_and_types(config):\n if re.match(ts_column_type_regex_pattern, col_datatype):\n timestamp_fields.append(col_name)\n\n if len(timestamp_fields) > 1:\n raise RegistryInferenceFailure(\n \"DataSource\",\n f\"\"\"{ERROR_MSG_PREFIX}; found multiple possible columns of timestamp type.\n Data source type: {data_source.__class__.__name__},\n Timestamp regex: `{ts_column_type_regex_pattern}`, columns: {timestamp_fields}\"\"\",\n )\n elif len(timestamp_fields) == 1:\n data_source.timestamp_field = timestamp_fields[0]\n else:\n raise RegistryInferenceFailure(\n \"DataSource\",\n f\"\"\"\n {ERROR_MSG_PREFIX}; Found no columns of timestamp type.\n Data source type: {data_source.__class__.__name__},\n Timestamp regex: `{ts_column_type_regex_pattern}`.\n \"\"\",\n )\n\n\ndef update_feature_views_with_inferred_features_and_entities(\n fvs: Union[List[FeatureView], List[StreamFeatureView]],\n entities: List[Entity],\n config: RepoConfig,\n) -> None:\n \"\"\"\n Infers the features and entities associated with each feature view and updates it in place.\n\n Columns whose names match a join key of an entity are considered to be entity columns; all\n other columns except designated timestamp columns are considered to be feature columns. If\n the feature view already has features, feature inference is skipped.\n\n Note that this inference logic currently does not take any transformations (either a UDF or\n aggregations) into account. For example, even if a stream feature view has a transformation,\n this method assumes that the batch source contains transformed data with the correct final schema.\n\n Args:\n fvs: The feature views to be updated.\n entities: A list containing entities associated with the feature views.\n config: The config for the current feature store.\n \"\"\"\n entity_name_to_entity_map = {e.name: e for e in entities}\n entity_name_to_join_keys_map = {e.name: e.join_keys for e in entities}\n\n for fv in fvs:\n join_keys = set(\n [\n join_key\n for entity_name in fv.entities\n for join_key in entity_name_to_join_keys_map[entity_name]\n ]\n )\n\n # Fields whose names match a join key are considered to be entity columns; all\n # other fields are considered to be feature columns.\n for field in fv.schema:\n if field.name in join_keys:\n # Do not override a preexisting field with the same name.\n if field.name not in [\n entity_column.name for entity_column in fv.entity_columns\n ]:\n fv.entity_columns.append(field)\n else:\n if field.name not in [feature.name for feature in fv.features]:\n fv.features.append(field)\n\n # Since the `value_type` parameter has not yet been fully deprecated for\n # entities, we respect the `value_type` attribute if it still exists.\n for entity_name in fv.entities:\n entity = entity_name_to_entity_map[entity_name]\n if (\n entity.join_key\n not in [entity_column.name for entity_column in fv.entity_columns]\n and entity.value_type != ValueType.UNKNOWN\n ):\n fv.entity_columns.append(\n Field(\n name=entity.join_key,\n dtype=from_value_type(entity.value_type),\n )\n )\n\n # Infer a dummy entity column for entityless feature views.\n if len(fv.entities) == 1 and fv.entities[0] == DUMMY_ENTITY_NAME:\n fv.entity_columns.append(Field(name=DUMMY_ENTITY_ID, dtype=String))\n\n # Run inference for entity columns if there are fewer entity fields than expected.\n num_expected_join_keys = sum(\n [\n len(entity_name_to_join_keys_map[entity_name])\n for entity_name in fv.entities\n ]\n )\n run_inference_for_entities = len(fv.entity_columns) < num_expected_join_keys\n\n # Run inference for feature columns if there are no feature fields.\n run_inference_for_features = len(fv.features) == 0\n\n if run_inference_for_entities or run_inference_for_features:\n _infer_features_and_entities(\n fv,\n join_keys,\n run_inference_for_features,\n config,\n )\n\n if not fv.features:\n raise RegistryInferenceFailure(\n \"FeatureView\",\n f\"Could not infer Features for the FeatureView named {fv.name}.\",\n )\n\n\ndef _infer_features_and_entities(\n fv: FeatureView,\n join_keys: Set[str],\n run_inference_for_features,\n config,\n) -> None:\n \"\"\"\n Updates the specific feature in place with inferred features and entities.\n\n Args:\n fv: The feature view on which to run inference.\n join_keys: The set of join keys for the feature view's entities.\n run_inference_for_features: Whether to run inference for features.\n config: The config for the current feature store.\n \"\"\"\n columns_to_exclude = {\n fv.batch_source.timestamp_field,\n fv.batch_source.created_timestamp_column,\n }\n for original_col, mapped_col in fv.batch_source.field_mapping.items():\n if mapped_col in columns_to_exclude:\n columns_to_exclude.remove(mapped_col)\n columns_to_exclude.add(original_col)\n\n table_column_names_and_types = fv.batch_source.get_table_column_names_and_types(\n config\n )\n\n for col_name, col_datatype in table_column_names_and_types:\n if col_name in columns_to_exclude:\n continue\n elif col_name in join_keys:\n field = Field(\n name=col_name,\n dtype=from_value_type(\n fv.batch_source.source_datatype_to_feast_value_type()(col_datatype)\n ),\n )\n if field.name not in [\n entity_column.name for entity_column in fv.entity_columns\n ]:\n fv.entity_columns.append(field)\n elif not re.match(\n \"^__|__$\", col_name\n ): # double underscores often signal an internal-use column\n if run_inference_for_features:\n feature_name = (\n fv.batch_source.field_mapping[col_name]\n if col_name in fv.batch_source.field_mapping\n else col_name\n )\n field = Field(\n name=feature_name,\n dtype=from_value_type(\n fv.batch_source.source_datatype_to_feast_value_type()(\n col_datatype\n )\n ),\n )\n if field.name not in [feature.name for feature in fv.features]:\n fv.features.append(field)\n", "path": "sdk/python/feast/inference.py"}], "after_files": [{"content": "import re\nfrom typing import List, Set, Union\n\nfrom feast.data_source import DataSource, PushSource, RequestSource\nfrom feast.entity import Entity\nfrom feast.errors import RegistryInferenceFailure\nfrom feast.feature_view import DUMMY_ENTITY_ID, DUMMY_ENTITY_NAME, FeatureView\nfrom feast.field import Field, from_value_type\nfrom feast.infra.offline_stores.bigquery_source import BigQuerySource\nfrom feast.infra.offline_stores.contrib.mssql_offline_store.mssqlserver_source import (\n MsSqlServerSource,\n)\nfrom feast.infra.offline_stores.file_source import FileSource\nfrom feast.infra.offline_stores.redshift_source import RedshiftSource\nfrom feast.infra.offline_stores.snowflake_source import SnowflakeSource\nfrom feast.repo_config import RepoConfig\nfrom feast.stream_feature_view import StreamFeatureView\nfrom feast.types import String\nfrom feast.value_type import ValueType\n\n\ndef update_data_sources_with_inferred_event_timestamp_col(\n data_sources: List[DataSource], config: RepoConfig\n) -> None:\n ERROR_MSG_PREFIX = \"Unable to infer DataSource timestamp_field\"\n for data_source in data_sources:\n if isinstance(data_source, RequestSource):\n continue\n if isinstance(data_source, PushSource):\n data_source = data_source.batch_source\n if data_source.timestamp_field is None or data_source.timestamp_field == \"\":\n # prepare right match pattern for data source\n ts_column_type_regex_pattern: str\n # TODO(adchia): Move Spark source inference out of this logic\n if (\n isinstance(data_source, FileSource)\n or \"SparkSource\" == data_source.__class__.__name__\n ):\n ts_column_type_regex_pattern = r\"^timestamp\"\n elif isinstance(data_source, BigQuerySource):\n ts_column_type_regex_pattern = \"TIMESTAMP|DATETIME\"\n elif isinstance(data_source, RedshiftSource):\n ts_column_type_regex_pattern = \"TIMESTAMP[A-Z]*\"\n elif isinstance(data_source, SnowflakeSource):\n ts_column_type_regex_pattern = \"TIMESTAMP_[A-Z]*\"\n elif isinstance(data_source, MsSqlServerSource):\n ts_column_type_regex_pattern = \"TIMESTAMP|DATETIME\"\n else:\n raise RegistryInferenceFailure(\n \"DataSource\",\n f\"\"\"\n DataSource inferencing of timestamp_field is currently only supported\n for FileSource, SparkSource, BigQuerySource, RedshiftSource, SnowflakeSource, MsSqlSource.\n Attempting to infer from {data_source}.\n \"\"\",\n )\n # for informing the type checker\n assert (\n isinstance(data_source, FileSource)\n or isinstance(data_source, BigQuerySource)\n or isinstance(data_source, RedshiftSource)\n or isinstance(data_source, SnowflakeSource)\n or isinstance(data_source, MsSqlServerSource)\n or \"SparkSource\" == data_source.__class__.__name__\n )\n\n # loop through table columns to find singular match\n timestamp_fields = []\n for (\n col_name,\n col_datatype,\n ) in data_source.get_table_column_names_and_types(config):\n if re.match(ts_column_type_regex_pattern, col_datatype):\n timestamp_fields.append(col_name)\n\n if len(timestamp_fields) > 1:\n raise RegistryInferenceFailure(\n \"DataSource\",\n f\"\"\"{ERROR_MSG_PREFIX}; found multiple possible columns of timestamp type.\n Data source type: {data_source.__class__.__name__},\n Timestamp regex: `{ts_column_type_regex_pattern}`, columns: {timestamp_fields}\"\"\",\n )\n elif len(timestamp_fields) == 1:\n data_source.timestamp_field = timestamp_fields[0]\n else:\n raise RegistryInferenceFailure(\n \"DataSource\",\n f\"\"\"\n {ERROR_MSG_PREFIX}; Found no columns of timestamp type.\n Data source type: {data_source.__class__.__name__},\n Timestamp regex: `{ts_column_type_regex_pattern}`.\n \"\"\",\n )\n\n\ndef update_feature_views_with_inferred_features_and_entities(\n fvs: Union[List[FeatureView], List[StreamFeatureView]],\n entities: List[Entity],\n config: RepoConfig,\n) -> None:\n \"\"\"\n Infers the features and entities associated with each feature view and updates it in place.\n\n Columns whose names match a join key of an entity are considered to be entity columns; all\n other columns except designated timestamp columns are considered to be feature columns. If\n the feature view already has features, feature inference is skipped.\n\n Note that this inference logic currently does not take any transformations (either a UDF or\n aggregations) into account. For example, even if a stream feature view has a transformation,\n this method assumes that the batch source contains transformed data with the correct final schema.\n\n Args:\n fvs: The feature views to be updated.\n entities: A list containing entities associated with the feature views.\n config: The config for the current feature store.\n \"\"\"\n entity_name_to_entity_map = {e.name: e for e in entities}\n entity_name_to_join_keys_map = {e.name: e.join_keys for e in entities}\n\n for fv in fvs:\n join_keys = set(\n [\n join_key\n for entity_name in fv.entities\n for join_key in entity_name_to_join_keys_map[entity_name]\n ]\n )\n\n # Fields whose names match a join key are considered to be entity columns; all\n # other fields are considered to be feature columns.\n for field in fv.schema:\n if field.name in join_keys:\n # Do not override a preexisting field with the same name.\n if field.name not in [\n entity_column.name for entity_column in fv.entity_columns\n ]:\n fv.entity_columns.append(field)\n else:\n if field.name not in [feature.name for feature in fv.features]:\n fv.features.append(field)\n\n # Since the `value_type` parameter has not yet been fully deprecated for\n # entities, we respect the `value_type` attribute if it still exists.\n for entity_name in fv.entities:\n entity = entity_name_to_entity_map[entity_name]\n if (\n entity.join_key\n not in [entity_column.name for entity_column in fv.entity_columns]\n and entity.value_type != ValueType.UNKNOWN\n ):\n fv.entity_columns.append(\n Field(\n name=entity.join_key,\n dtype=from_value_type(entity.value_type),\n )\n )\n\n # Infer a dummy entity column for entityless feature views.\n if (\n len(fv.entities) == 1\n and fv.entities[0] == DUMMY_ENTITY_NAME\n and not fv.entity_columns\n ):\n fv.entity_columns.append(Field(name=DUMMY_ENTITY_ID, dtype=String))\n\n # Run inference for entity columns if there are fewer entity fields than expected.\n num_expected_join_keys = sum(\n [\n len(entity_name_to_join_keys_map[entity_name])\n for entity_name in fv.entities\n ]\n )\n run_inference_for_entities = len(fv.entity_columns) < num_expected_join_keys\n\n # Run inference for feature columns if there are no feature fields.\n run_inference_for_features = len(fv.features) == 0\n\n if run_inference_for_entities or run_inference_for_features:\n _infer_features_and_entities(\n fv,\n join_keys,\n run_inference_for_features,\n config,\n )\n\n if not fv.features:\n raise RegistryInferenceFailure(\n \"FeatureView\",\n f\"Could not infer Features for the FeatureView named {fv.name}.\",\n )\n\n\ndef _infer_features_and_entities(\n fv: FeatureView,\n join_keys: Set[str],\n run_inference_for_features,\n config,\n) -> None:\n \"\"\"\n Updates the specific feature in place with inferred features and entities.\n\n Args:\n fv: The feature view on which to run inference.\n join_keys: The set of join keys for the feature view's entities.\n run_inference_for_features: Whether to run inference for features.\n config: The config for the current feature store.\n \"\"\"\n columns_to_exclude = {\n fv.batch_source.timestamp_field,\n fv.batch_source.created_timestamp_column,\n }\n for original_col, mapped_col in fv.batch_source.field_mapping.items():\n if mapped_col in columns_to_exclude:\n columns_to_exclude.remove(mapped_col)\n columns_to_exclude.add(original_col)\n\n table_column_names_and_types = fv.batch_source.get_table_column_names_and_types(\n config\n )\n\n for col_name, col_datatype in table_column_names_and_types:\n if col_name in columns_to_exclude:\n continue\n elif col_name in join_keys:\n field = Field(\n name=col_name,\n dtype=from_value_type(\n fv.batch_source.source_datatype_to_feast_value_type()(col_datatype)\n ),\n )\n if field.name not in [\n entity_column.name for entity_column in fv.entity_columns\n ]:\n fv.entity_columns.append(field)\n elif not re.match(\n \"^__|__$\", col_name\n ): # double underscores often signal an internal-use column\n if run_inference_for_features:\n feature_name = (\n fv.batch_source.field_mapping[col_name]\n if col_name in fv.batch_source.field_mapping\n else col_name\n )\n field = Field(\n name=feature_name,\n dtype=from_value_type(\n fv.batch_source.source_datatype_to_feast_value_type()(\n col_datatype\n )\n ),\n )\n if field.name not in [feature.name for feature in fv.features]:\n fv.features.append(field)\n", "path": "sdk/python/feast/inference.py"}]}
| 3,002 | 171 |
gh_patches_debug_25418
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-781
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Maximum batch size" and "Maximum batch time" are not overwritten through env variables
if the two variable `max_batch_time` and `max_batch_size` are defined in the `model-settings.json`:
```json
{
"name": "node-1",
"implementation": "models.NodeOne",
"max_batch_size": 5,
"max_batch_time": 1,
"parameters": {
"uri": "./fakeuri"
}
}
```
Then they couldn't be overwritten by environment variables `MLSERVER_MODEL_MAX_BATCH_SIZE` and `MLSERVER_MODEL_MAX_BATCH_TIME` and mlserver will always revert back to the values set in the `model-settings.json`. However, it is expected to use the values in the json as defaults and env variable as user arguments for overwriting them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hack/generate_dotenv.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 This script will read the `settings.json` and `model-settings.json` on a given
4 folder and will export them as environment variables.
5
6 This is used when building custom Docker images, which may have a default set
7 of settings that we want to source always (e.g. the default runtime to use).
8 """
9
10 import click
11 import json
12 from json import JSONDecodeError
13 import os
14
15 from typing import List, Tuple, Type
16 from pydantic import BaseSettings
17
18 from mlserver.settings import Settings, ModelSettings, ModelParameters
19 from mlserver.cli.serve import DEFAULT_SETTINGS_FILENAME
20 from mlserver.repository import DEFAULT_MODEL_SETTINGS_FILENAME
21
22
23 def load_default_settings(folder: str) -> List[Tuple[Type[BaseSettings], dict]]:
24 default_settings = []
25
26 settings_path = os.path.join(folder, DEFAULT_SETTINGS_FILENAME)
27 if os.path.isfile(settings_path):
28 default_settings.append((Settings, _read_json_file(settings_path)))
29
30 model_parameters = None
31 model_settings_path = os.path.join(folder, DEFAULT_MODEL_SETTINGS_FILENAME)
32 if os.path.isfile(model_settings_path):
33 raw_defaults = _read_json_file(model_settings_path)
34 model_parameters = raw_defaults.pop("parameters", None)
35 default_settings.append((ModelSettings, raw_defaults))
36
37 if model_parameters:
38 default_settings.append((ModelParameters, model_parameters))
39
40 return default_settings
41
42
43 def _read_json_file(file_path: str) -> dict:
44 with open(file_path, "r") as file:
45 return json.load(file)
46
47
48 def get_default_env(default_settings: List[Tuple[Type[BaseSettings], dict]]) -> dict:
49 env = {}
50 for settings_class, raw_defaults in default_settings:
51 env.update(_convert_to_env(settings_class, raw_defaults))
52
53 return env
54
55
56 def _convert_to_env(settings_class: Type[BaseSettings], raw_defaults: dict) -> dict:
57 env = {}
58
59 env_prefix = _get_env_prefix(settings_class)
60 for field_name, field_value in raw_defaults.items():
61 env_var_name = env_prefix + field_name.upper()
62 env[env_var_name] = str(field_value)
63
64 return env
65
66
67 def _get_env_prefix(settings_class: Type[BaseSettings]) -> str:
68 if not hasattr(settings_class, "Config"):
69 return ""
70
71 config = settings_class.Config
72 return getattr(config, "env_prefix", "")
73
74
75 def save_default_env(env: dict, output: str):
76 with open(output, "w") as file:
77 for name, value in env.items():
78 file.write(_parse_dict_values(name, value))
79
80
81 def _parse_dict_values(name: str, value: str) -> str:
82 try:
83 value = value.replace("'", '"')
84 json.loads(value)
85 return f"export {name}='{value}'\n"
86 except JSONDecodeError:
87 return f'export {name}="{value}"\n'
88
89
90 @click.command()
91 @click.argument("folder", nargs=1)
92 @click.argument("output", nargs=1)
93 def main(folder: str, output: str):
94 default_settings = load_default_settings(folder)
95 default_env = get_default_env(default_settings)
96 save_default_env(default_env, output)
97
98
99 if __name__ == "__main__":
100 main()
101
```
Path: `mlserver/cli/constants.py`
Content:
```
1 DockerfileName = "Dockerfile"
2 DockerfileTemplate = """
3 FROM continuumio/miniconda3:4.12.0 AS env-builder
4 SHELL ["/bin/bash", "-c"]
5
6 ARG MLSERVER_ENV_NAME="mlserver-custom-env" \\
7 MLSERVER_ENV_TARBALL="./envs/base.tar.gz"
8
9 RUN conda config --add channels conda-forge && \\
10 conda install conda-pack
11
12 # The `[]` character range will ensure that Docker doesn't complain if the
13 # files don't exist:
14 # https://stackoverflow.com/a/65138098/5015573
15 COPY \\
16 ./environment.ym[l] \\
17 ./environment.yam[l] \\
18 ./conda.ym[l] \\
19 ./conda.yam[l] \\
20 .
21 RUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\
22 for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\
23 if [[ -f $envFile ]]; then \\
24 conda env create \
25 --name $MLSERVER_ENV_NAME \\
26 --file $envFile; \\
27 conda-pack --ignore-missing-files \
28 -n $MLSERVER_ENV_NAME \\
29 -o $MLSERVER_ENV_TARBALL; \\
30 fi \\
31 done; \\
32 chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)
33
34 FROM seldonio/mlserver:{version}-slim
35 SHELL ["/bin/bash", "-c"]
36
37 # Copy all potential sources for custom environments
38 COPY \\
39 --chown=1000 \\
40 --from=env-builder \\
41 /envs/base.tar.g[z] \\
42 ./envs/base.tar.gz
43 COPY \\
44 ./settings.jso[n] \\
45 ./model-settings.jso[n] \\
46 ./requirements.tx[t] \\
47 .
48
49 USER root
50 # Install dependencies system-wide, to ensure that they are available for every
51 # user
52 RUN ./hack/build-env.sh . ./envs/base && \
53 chown -R 1000:0 ./envs/base && \\
54 chmod -R 776 ./envs/base
55 USER 1000
56
57 # Copy everything else
58 COPY . .
59
60 # Override MLServer's own `CMD` to activate the embedded environment
61 # (optionally activating the hot-loaded one as well).
62 CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\
63 mlserver start $MLSERVER_MODELS_DIR
64 """
65
66 DockerignoreName = ".dockerignore"
67 Dockerignore = """
68 # Binaries for programs and plugins
69 *.exe
70 *.exe~
71 *.dll
72 *.so
73 *.dylib
74 *.pyc
75 *.pyo
76 *.pyd
77 bin
78
79 # Mac file system
80 **/.DS_Store
81
82 # Python dev
83 __pycache__
84 .Python
85 env
86 pip-log.txt
87 pip-delete-this-directory.txt
88 .mypy_cache
89 eggs/
90 .eggs/
91 *.egg-info/
92 ./pytest_cache
93 .tox
94 build/
95 dist/
96
97 # Notebook Checkpoints
98 .ipynb_checkpoints
99
100 .coverage
101 .coverage.*
102 .cache
103 nosetests.xml
104 coverage.xml
105 *,cover
106 *.log
107 .git
108 """
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hack/generate_dotenv.py b/hack/generate_dotenv.py
--- a/hack/generate_dotenv.py
+++ b/hack/generate_dotenv.py
@@ -82,9 +82,10 @@
try:
value = value.replace("'", '"')
json.loads(value)
- return f"export {name}='{value}'\n"
+ return f"{name}='{value}'\n"
except JSONDecodeError:
- return f'export {name}="{value}"\n'
+ # If not JSON, then assume it's a plain string
+ return f'{name}="{value}"\n'
@click.command()
diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py
--- a/mlserver/cli/constants.py
+++ b/mlserver/cli/constants.py
@@ -48,8 +48,9 @@
USER root
# Install dependencies system-wide, to ensure that they are available for every
-# user
-RUN ./hack/build-env.sh . ./envs/base && \
+# user and give permissions to (future) environment folder.
+RUN ./hack/build-env.sh . && \\
+ mkdir -p ./envs/base && \\
chown -R 1000:0 ./envs/base && \\
chmod -R 776 ./envs/base
USER 1000
@@ -59,7 +60,7 @@
# Override MLServer's own `CMD` to activate the embedded environment
# (optionally activating the hot-loaded one as well).
-CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\
+CMD source ./hack/activate-env.sh ./envs/base.tar.gz && \\
mlserver start $MLSERVER_MODELS_DIR
"""
|
{"golden_diff": "diff --git a/hack/generate_dotenv.py b/hack/generate_dotenv.py\n--- a/hack/generate_dotenv.py\n+++ b/hack/generate_dotenv.py\n@@ -82,9 +82,10 @@\n try:\n value = value.replace(\"'\", '\"')\n json.loads(value)\n- return f\"export {name}='{value}'\\n\"\n+ return f\"{name}='{value}'\\n\"\n except JSONDecodeError:\n- return f'export {name}=\"{value}\"\\n'\n+ # If not JSON, then assume it's a plain string\n+ return f'{name}=\"{value}\"\\n'\n \n \n @click.command()\ndiff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py\n--- a/mlserver/cli/constants.py\n+++ b/mlserver/cli/constants.py\n@@ -48,8 +48,9 @@\n \n USER root\n # Install dependencies system-wide, to ensure that they are available for every\n-# user\n-RUN ./hack/build-env.sh . ./envs/base && \\\n+# user and give permissions to (future) environment folder.\n+RUN ./hack/build-env.sh . && \\\\\n+ mkdir -p ./envs/base && \\\\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\n USER 1000\n@@ -59,7 +60,7 @@\n \n # Override MLServer's own `CMD` to activate the embedded environment\n # (optionally activating the hot-loaded one as well).\n-CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n+CMD source ./hack/activate-env.sh ./envs/base.tar.gz && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n \"\"\"\n", "issue": "\"Maximum batch size\" and \"Maximum batch time\" are not overwritten through env variables\nif the two variable `max_batch_time` and `max_batch_size` are defined in the `model-settings.json`:\r\n```json\r\n{\r\n \"name\": \"node-1\",\r\n \"implementation\": \"models.NodeOne\",\r\n \"max_batch_size\": 5,\r\n \"max_batch_time\": 1,\r\n \"parameters\": {\r\n \"uri\": \"./fakeuri\"\r\n }\r\n}\r\n```\r\nThen they couldn't be overwritten by environment variables `MLSERVER_MODEL_MAX_BATCH_SIZE` and `MLSERVER_MODEL_MAX_BATCH_TIME` and mlserver will always revert back to the values set in the `model-settings.json`. However, it is expected to use the values in the json as defaults and env variable as user arguments for overwriting them.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis script will read the `settings.json` and `model-settings.json` on a given\nfolder and will export them as environment variables.\n\nThis is used when building custom Docker images, which may have a default set\nof settings that we want to source always (e.g. the default runtime to use).\n\"\"\"\n\nimport click\nimport json\nfrom json import JSONDecodeError\nimport os\n\nfrom typing import List, Tuple, Type\nfrom pydantic import BaseSettings\n\nfrom mlserver.settings import Settings, ModelSettings, ModelParameters\nfrom mlserver.cli.serve import DEFAULT_SETTINGS_FILENAME\nfrom mlserver.repository import DEFAULT_MODEL_SETTINGS_FILENAME\n\n\ndef load_default_settings(folder: str) -> List[Tuple[Type[BaseSettings], dict]]:\n default_settings = []\n\n settings_path = os.path.join(folder, DEFAULT_SETTINGS_FILENAME)\n if os.path.isfile(settings_path):\n default_settings.append((Settings, _read_json_file(settings_path)))\n\n model_parameters = None\n model_settings_path = os.path.join(folder, DEFAULT_MODEL_SETTINGS_FILENAME)\n if os.path.isfile(model_settings_path):\n raw_defaults = _read_json_file(model_settings_path)\n model_parameters = raw_defaults.pop(\"parameters\", None)\n default_settings.append((ModelSettings, raw_defaults))\n\n if model_parameters:\n default_settings.append((ModelParameters, model_parameters))\n\n return default_settings\n\n\ndef _read_json_file(file_path: str) -> dict:\n with open(file_path, \"r\") as file:\n return json.load(file)\n\n\ndef get_default_env(default_settings: List[Tuple[Type[BaseSettings], dict]]) -> dict:\n env = {}\n for settings_class, raw_defaults in default_settings:\n env.update(_convert_to_env(settings_class, raw_defaults))\n\n return env\n\n\ndef _convert_to_env(settings_class: Type[BaseSettings], raw_defaults: dict) -> dict:\n env = {}\n\n env_prefix = _get_env_prefix(settings_class)\n for field_name, field_value in raw_defaults.items():\n env_var_name = env_prefix + field_name.upper()\n env[env_var_name] = str(field_value)\n\n return env\n\n\ndef _get_env_prefix(settings_class: Type[BaseSettings]) -> str:\n if not hasattr(settings_class, \"Config\"):\n return \"\"\n\n config = settings_class.Config\n return getattr(config, \"env_prefix\", \"\")\n\n\ndef save_default_env(env: dict, output: str):\n with open(output, \"w\") as file:\n for name, value in env.items():\n file.write(_parse_dict_values(name, value))\n\n\ndef _parse_dict_values(name: str, value: str) -> str:\n try:\n value = value.replace(\"'\", '\"')\n json.loads(value)\n return f\"export {name}='{value}'\\n\"\n except JSONDecodeError:\n return f'export {name}=\"{value}\"\\n'\n\n\[email protected]()\[email protected](\"folder\", nargs=1)\[email protected](\"output\", nargs=1)\ndef main(folder: str, output: str):\n default_settings = load_default_settings(folder)\n default_env = get_default_env(default_settings)\n save_default_env(default_env, output)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "hack/generate_dotenv.py"}, {"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.12.0 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user\nRUN ./hack/build-env.sh . ./envs/base && \\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis script will read the `settings.json` and `model-settings.json` on a given\nfolder and will export them as environment variables.\n\nThis is used when building custom Docker images, which may have a default set\nof settings that we want to source always (e.g. the default runtime to use).\n\"\"\"\n\nimport click\nimport json\nfrom json import JSONDecodeError\nimport os\n\nfrom typing import List, Tuple, Type\nfrom pydantic import BaseSettings\n\nfrom mlserver.settings import Settings, ModelSettings, ModelParameters\nfrom mlserver.cli.serve import DEFAULT_SETTINGS_FILENAME\nfrom mlserver.repository import DEFAULT_MODEL_SETTINGS_FILENAME\n\n\ndef load_default_settings(folder: str) -> List[Tuple[Type[BaseSettings], dict]]:\n default_settings = []\n\n settings_path = os.path.join(folder, DEFAULT_SETTINGS_FILENAME)\n if os.path.isfile(settings_path):\n default_settings.append((Settings, _read_json_file(settings_path)))\n\n model_parameters = None\n model_settings_path = os.path.join(folder, DEFAULT_MODEL_SETTINGS_FILENAME)\n if os.path.isfile(model_settings_path):\n raw_defaults = _read_json_file(model_settings_path)\n model_parameters = raw_defaults.pop(\"parameters\", None)\n default_settings.append((ModelSettings, raw_defaults))\n\n if model_parameters:\n default_settings.append((ModelParameters, model_parameters))\n\n return default_settings\n\n\ndef _read_json_file(file_path: str) -> dict:\n with open(file_path, \"r\") as file:\n return json.load(file)\n\n\ndef get_default_env(default_settings: List[Tuple[Type[BaseSettings], dict]]) -> dict:\n env = {}\n for settings_class, raw_defaults in default_settings:\n env.update(_convert_to_env(settings_class, raw_defaults))\n\n return env\n\n\ndef _convert_to_env(settings_class: Type[BaseSettings], raw_defaults: dict) -> dict:\n env = {}\n\n env_prefix = _get_env_prefix(settings_class)\n for field_name, field_value in raw_defaults.items():\n env_var_name = env_prefix + field_name.upper()\n env[env_var_name] = str(field_value)\n\n return env\n\n\ndef _get_env_prefix(settings_class: Type[BaseSettings]) -> str:\n if not hasattr(settings_class, \"Config\"):\n return \"\"\n\n config = settings_class.Config\n return getattr(config, \"env_prefix\", \"\")\n\n\ndef save_default_env(env: dict, output: str):\n with open(output, \"w\") as file:\n for name, value in env.items():\n file.write(_parse_dict_values(name, value))\n\n\ndef _parse_dict_values(name: str, value: str) -> str:\n try:\n value = value.replace(\"'\", '\"')\n json.loads(value)\n return f\"{name}='{value}'\\n\"\n except JSONDecodeError:\n # If not JSON, then assume it's a plain string\n return f'{name}=\"{value}\"\\n'\n\n\[email protected]()\[email protected](\"folder\", nargs=1)\[email protected](\"output\", nargs=1)\ndef main(folder: str, output: str):\n default_settings = load_default_settings(folder)\n default_env = get_default_env(default_settings)\n save_default_env(default_env, output)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "hack/generate_dotenv.py"}, {"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.12.0 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user and give permissions to (future) environment folder.\nRUN ./hack/build-env.sh . && \\\\\n mkdir -p ./envs/base && \\\\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}]}
| 2,253 | 393 |
gh_patches_debug_22242
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-2856
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed to Reproduce Human Evaluation of Controllable Dialogue with Pretrained Model
**Bug description**
I failed to reproduce the human evaluation of the controllable dialogue with pre-trained model. The dialogue log in `evaluation_logs/` folder.
**Reproduction steps**
For the first dialogue in `evaluation_logs/baseline_model.jsonl`
*MetaData*
- model_persona: "i am terrified of clowns due to childhood trauma.\ni enjoy working with my hands and love gardening.\ni just got a job working in a gas station.\ni was adopted at a young age to an older couple.\nmy father keeps trying to set me up on blind dates."
- dialog:[{"speaker":"model","text":"Hello, how are you today?"},{"speaker":"human_evaluator","text":"hello! im doing well. how are you"},{"speaker":"model","text":"I am well, thank you. What do you do for a living?"},{"speaker":"human_evaluator","text":"i work part time at burger king making minimum wage. what about you"},{"speaker":"model","text":"I work in a gas station."},{"speaker":"human_evaluator","text":"what do you do for fun"},{"speaker":"model","text":"I work in a gas station. What about you?"},{"speaker":"human_evaluator","text":"i work at burger king"},{"speaker":"model","text":"What do you do for a living?"},{"speaker":"human_evaluator","text":"i live with my parents but i work part time in fast food"},{"speaker":"model","text":"What city are you from?"},{"speaker":"human_evaluator","text":"i am from boston. where are you"}]
Step 1. Open interactive chat with baseline agent.
```
python projects/controllable_dialogue/interactive.py \
-mf models:controllable_dialogue/convai2_finetuned_baseline
```
Step 2.
Enter your Message: `your persona: i am terrified of clowns due to childhood trauma.\nyour persona: i enjoy working with my hands and love gardening.\nyour persona: i just got a job working in a gas station.\nyour persona: i was adopted at a young age to an older couple.\nyour persona: my father keeps trying to set me up on blind dates.\n__SILENCE__`
Step 3.
Get agent response: `Hi how are you doing?`. Which does not match the evaluation log `Hello, how are you today?`
For all the 111 dialogues in `evaluation_logs/baseline_model.jsonl`, I can only reproduce 44 of them.
**Expected behavior**
When the pre-trained model is given the same model persona and human response, I expect to reproduce the dialogue in `evaluation_logs`.
**Logs**
Please paste the command line output:
The command line output is slightly different from the original script, where I compare the model output with the `evaluation_logs/baseline_model.jsonl`.
The first dialogue in `evaluation_logs/baseline_model.jsonl`:
```
{'text': 'your persona: i am terrified of clowns due to childhood trauma.\nyour persona: i enjoy workingwith my hands and love gardening.\nyour persona: i just got a job working in a gas station.\nyour persona: i was adopted at a young age to an older couple.\nyour persona: my father keeps trying to set me up onblind dates.\n__SILENCE__', 'episode_done': False}
[PERSON_2]: Hi how are you doing?
[REFDATA]: Hello, how are you today?
Warning! Mismatched
[Human]: hello! im doing well. how are you
[PERSON_2]: I am good thanks for asking
[REFDATA]: I am well, thank you. What do you do for a living?
Warning! Mismatched
[Human]: i work part time at burger king making minimum wage. what about you
[PERSON_2]: I work in a gas station
[REFDATA]: I work in a gas station.
Warning! Mismatched
[Human]: what do you do for fun
[PERSON_2]: I work at a gas station
[REFDATA]: I work in a gas station. What about you?
Warning! Mismatched
[Human]: i work at burger king
[PERSON_2]: What do you do for a living
[REFDATA]: What do you do for a living?
Warning! Mismatched
[Human]: i live with my parents but i work part time in fast food
[PERSON_2]: What city are you from?
[REFDATA]: What city are you from?
```
I can reproduce the second dialogue.
The third dialogue in `evaluation_logs/baseline_model.jsonl`:
```
{'text': 'your persona: i am a professional wrestler.\nyour persona: i enjoy being around people.\nyour persona: i like to workout a a times a week.\nyour persona: i volunteer in a homeless shelter.\nyour persona: in my spare time i do volunteer work.\n__SILENCE__', 'episode_done': False}
[PERSON_2]: Hello, how are you today?
[REFDATA]: Hi how are you today
Warning! Mismatched
[Human]: im good and how are you
[PERSON_2]: I am doing well. Just got back from a long workout.
[REFDATA]: I am good do you work
Warning! Mismatched
[Human]: yes i do work and what about you
[PERSON_2]: I am a professional wrestler.
[REFDATA]: I am a professional wrestler
Warning! Mismatched
[Human]: thats cool i work online
[PERSON_2]: What do you do online?
[REFDATA]: What do you do online
Warning! Mismatched
[Human]: i work for a company to answer phones
[PERSON_2]: That sounds like a lot of fun.
[REFDATA]: What do you do online
Warning! Mismatched
[Human]: i just told you
[PERSON_2]: What do you do online?
[REFDATA]: What do you do for a living
Warning! Mismatched
```
**Additional context**
I tried to roll back to different code-base versions and got no luck.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `projects/controllable_dialogue/tasks/build.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 import os
8 import parlai.core.params as params
9 import parlai.core.build_data as build_data
10
11
12 URL_ROOT = 'https://parl.ai/downloads/controllable_dialogue/'
13 FOLDER_NAME = 'controllable_dialogue'
14
15
16 def build(opt):
17 dpath = os.path.join(opt['datapath'], FOLDER_NAME)
18 # version 1.0: initial release
19 # version 1.1: add evaluation logs
20 version = '1.1'
21
22 if not build_data.built(dpath, version_string=version):
23 if build_data.built(dpath):
24 # older version exists, so remove the outdated files.
25 build_data.remove_dir(dpath)
26 build_data.make_dir(dpath)
27
28 # first download the data files
29 fname_data = 'data_v1.tar.gz'
30 build_data.download(URL_ROOT + fname_data, dpath, fname_data)
31 build_data.untar(dpath, fname_data)
32
33 # next download the wordstats files
34 fname_wordstats = 'wordstats_v1.tar.gz'
35 build_data.download(URL_ROOT + fname_wordstats, dpath, fname_wordstats)
36 build_data.untar(dpath, fname_wordstats)
37
38 # next download the evaluation logs
39 fname_evallogs = 'evaluationlogs_v1.tar.gz'
40 build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)
41 build_data.untar(dpath, fname_evallogs)
42
43 print("Data has been placed in " + dpath)
44
45 build_data.mark_done(dpath, version)
46
47
48 def make_path(opt, fname):
49 return os.path.join(opt['datapath'], FOLDER_NAME, fname)
50
51
52 if __name__ == '__main__':
53 opt = params.ParlaiParser().parse_args(print_args=False)
54 build(opt)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/projects/controllable_dialogue/tasks/build.py b/projects/controllable_dialogue/tasks/build.py
--- a/projects/controllable_dialogue/tasks/build.py
+++ b/projects/controllable_dialogue/tasks/build.py
@@ -17,7 +17,8 @@
dpath = os.path.join(opt['datapath'], FOLDER_NAME)
# version 1.0: initial release
# version 1.1: add evaluation logs
- version = '1.1'
+ # version 1.2: add reproducible evaluation logs
+ version = '1.2'
if not build_data.built(dpath, version_string=version):
if build_data.built(dpath):
@@ -40,6 +41,12 @@
build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)
build_data.untar(dpath, fname_evallogs)
+ # and the reproducible logs.
+ # for more info see https://github.com/facebookresearch/ParlAI/issues/2855
+ fname_evallogs = 'evaluation_logs_reproducible_v1.tar.gz'
+ build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)
+ build_data.untar(dpath, fname_evallogs)
+
print("Data has been placed in " + dpath)
build_data.mark_done(dpath, version)
|
{"golden_diff": "diff --git a/projects/controllable_dialogue/tasks/build.py b/projects/controllable_dialogue/tasks/build.py\n--- a/projects/controllable_dialogue/tasks/build.py\n+++ b/projects/controllable_dialogue/tasks/build.py\n@@ -17,7 +17,8 @@\n dpath = os.path.join(opt['datapath'], FOLDER_NAME)\n # version 1.0: initial release\n # version 1.1: add evaluation logs\n- version = '1.1'\n+ # version 1.2: add reproducible evaluation logs\n+ version = '1.2'\n \n if not build_data.built(dpath, version_string=version):\n if build_data.built(dpath):\n@@ -40,6 +41,12 @@\n build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)\n build_data.untar(dpath, fname_evallogs)\n \n+ # and the reproducible logs.\n+ # for more info see https://github.com/facebookresearch/ParlAI/issues/2855\n+ fname_evallogs = 'evaluation_logs_reproducible_v1.tar.gz'\n+ build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)\n+ build_data.untar(dpath, fname_evallogs)\n+\n print(\"Data has been placed in \" + dpath)\n \n build_data.mark_done(dpath, version)\n", "issue": "Failed to Reproduce Human Evaluation of Controllable Dialogue with Pretrained Model\n**Bug description**\r\nI failed to reproduce the human evaluation of the controllable dialogue with pre-trained model. The dialogue log in `evaluation_logs/` folder. \r\n\r\n**Reproduction steps**\r\nFor the first dialogue in `evaluation_logs/baseline_model.jsonl`\r\n*MetaData*\r\n- model_persona: \"i am terrified of clowns due to childhood trauma.\\ni enjoy working with my hands and love gardening.\\ni just got a job working in a gas station.\\ni was adopted at a young age to an older couple.\\nmy father keeps trying to set me up on blind dates.\"\r\n- dialog:[{\"speaker\":\"model\",\"text\":\"Hello, how are you today?\"},{\"speaker\":\"human_evaluator\",\"text\":\"hello! im doing well. how are you\"},{\"speaker\":\"model\",\"text\":\"I am well, thank you. What do you do for a living?\"},{\"speaker\":\"human_evaluator\",\"text\":\"i work part time at burger king making minimum wage. what about you\"},{\"speaker\":\"model\",\"text\":\"I work in a gas station.\"},{\"speaker\":\"human_evaluator\",\"text\":\"what do you do for fun\"},{\"speaker\":\"model\",\"text\":\"I work in a gas station. What about you?\"},{\"speaker\":\"human_evaluator\",\"text\":\"i work at burger king\"},{\"speaker\":\"model\",\"text\":\"What do you do for a living?\"},{\"speaker\":\"human_evaluator\",\"text\":\"i live with my parents but i work part time in fast food\"},{\"speaker\":\"model\",\"text\":\"What city are you from?\"},{\"speaker\":\"human_evaluator\",\"text\":\"i am from boston. where are you\"}]\r\n\r\nStep 1. Open interactive chat with baseline agent.\r\n```\r\npython projects/controllable_dialogue/interactive.py \\\r\n-mf models:controllable_dialogue/convai2_finetuned_baseline\r\n```\r\nStep 2.\r\nEnter your Message: `your persona: i am terrified of clowns due to childhood trauma.\\nyour persona: i enjoy working with my hands and love gardening.\\nyour persona: i just got a job working in a gas station.\\nyour persona: i was adopted at a young age to an older couple.\\nyour persona: my father keeps trying to set me up on blind dates.\\n__SILENCE__`\r\nStep 3.\r\nGet agent response: `Hi how are you doing?`. Which does not match the evaluation log `Hello, how are you today?`\r\n\r\nFor all the 111 dialogues in `evaluation_logs/baseline_model.jsonl`, I can only reproduce 44 of them. \r\n\r\n**Expected behavior**\r\nWhen the pre-trained model is given the same model persona and human response, I expect to reproduce the dialogue in `evaluation_logs`. \r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\nThe command line output is slightly different from the original script, where I compare the model output with the `evaluation_logs/baseline_model.jsonl`. \r\n\r\nThe first dialogue in `evaluation_logs/baseline_model.jsonl`:\r\n```\r\n{'text': 'your persona: i am terrified of clowns due to childhood trauma.\\nyour persona: i enjoy workingwith my hands and love gardening.\\nyour persona: i just got a job working in a gas station.\\nyour persona: i was adopted at a young age to an older couple.\\nyour persona: my father keeps trying to set me up onblind dates.\\n__SILENCE__', 'episode_done': False}\r\n[PERSON_2]: Hi how are you doing?\r\n[REFDATA]: Hello, how are you today? \r\nWarning! Mismatched\r\n[Human]: hello! im doing well. how are you \r\n[PERSON_2]: I am good thanks for asking\r\n[REFDATA]: I am well, thank you. What do you do for a living? \r\nWarning! Mismatched\r\n[Human]: i work part time at burger king making minimum wage. what about you \r\n[PERSON_2]: I work in a gas station\r\n[REFDATA]: I work in a gas station. \r\nWarning! Mismatched\r\n[Human]: what do you do for fun\r\n[PERSON_2]: I work at a gas station\r\n[REFDATA]: I work in a gas station. What about you? \r\nWarning! Mismatched\r\n[Human]: i work at burger king \r\n[PERSON_2]: What do you do for a living\r\n[REFDATA]: What do you do for a living? \r\nWarning! Mismatched\r\n[Human]: i live with my parents but i work part time in fast food \r\n[PERSON_2]: What city are you from?\r\n[REFDATA]: What city are you from?\r\n```\r\n\r\nI can reproduce the second dialogue.\r\n\r\nThe third dialogue in `evaluation_logs/baseline_model.jsonl`:\r\n```\r\n{'text': 'your persona: i am a professional wrestler.\\nyour persona: i enjoy being around people.\\nyour persona: i like to workout a a times a week.\\nyour persona: i volunteer in a homeless shelter.\\nyour persona: in my spare time i do volunteer work.\\n__SILENCE__', 'episode_done': False}\r\n[PERSON_2]: Hello, how are you today?\r\n[REFDATA]: Hi how are you today \r\nWarning! Mismatched\r\n[Human]: im good and how are you \r\n[PERSON_2]: I am doing well. Just got back from a long workout.\r\n[REFDATA]: I am good do you work \r\nWarning! Mismatched\r\n[Human]: yes i do work and what about you \r\n[PERSON_2]: I am a professional wrestler.\r\n[REFDATA]: I am a professional wrestler \r\nWarning! Mismatched\r\n[Human]: thats cool i work online \r\n[PERSON_2]: What do you do online?\r\n[REFDATA]: What do you do online \r\nWarning! Mismatched\r\n[Human]: i work for a company to answer phones \r\n[PERSON_2]: That sounds like a lot of fun.\r\n[REFDATA]: What do you do online \r\nWarning! Mismatched\r\n[Human]: i just told you \r\n[PERSON_2]: What do you do online?\r\n[REFDATA]: What do you do for a living \r\nWarning! Mismatched\r\n```\r\n\r\n**Additional context**\r\nI tried to roll back to different code-base versions and got no luck. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport parlai.core.params as params\nimport parlai.core.build_data as build_data\n\n\nURL_ROOT = 'https://parl.ai/downloads/controllable_dialogue/'\nFOLDER_NAME = 'controllable_dialogue'\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], FOLDER_NAME)\n # version 1.0: initial release\n # version 1.1: add evaluation logs\n version = '1.1'\n\n if not build_data.built(dpath, version_string=version):\n if build_data.built(dpath):\n # older version exists, so remove the outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # first download the data files\n fname_data = 'data_v1.tar.gz'\n build_data.download(URL_ROOT + fname_data, dpath, fname_data)\n build_data.untar(dpath, fname_data)\n\n # next download the wordstats files\n fname_wordstats = 'wordstats_v1.tar.gz'\n build_data.download(URL_ROOT + fname_wordstats, dpath, fname_wordstats)\n build_data.untar(dpath, fname_wordstats)\n\n # next download the evaluation logs\n fname_evallogs = 'evaluationlogs_v1.tar.gz'\n build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)\n build_data.untar(dpath, fname_evallogs)\n\n print(\"Data has been placed in \" + dpath)\n\n build_data.mark_done(dpath, version)\n\n\ndef make_path(opt, fname):\n return os.path.join(opt['datapath'], FOLDER_NAME, fname)\n\n\nif __name__ == '__main__':\n opt = params.ParlaiParser().parse_args(print_args=False)\n build(opt)\n", "path": "projects/controllable_dialogue/tasks/build.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport parlai.core.params as params\nimport parlai.core.build_data as build_data\n\n\nURL_ROOT = 'https://parl.ai/downloads/controllable_dialogue/'\nFOLDER_NAME = 'controllable_dialogue'\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], FOLDER_NAME)\n # version 1.0: initial release\n # version 1.1: add evaluation logs\n # version 1.2: add reproducible evaluation logs\n version = '1.2'\n\n if not build_data.built(dpath, version_string=version):\n if build_data.built(dpath):\n # older version exists, so remove the outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # first download the data files\n fname_data = 'data_v1.tar.gz'\n build_data.download(URL_ROOT + fname_data, dpath, fname_data)\n build_data.untar(dpath, fname_data)\n\n # next download the wordstats files\n fname_wordstats = 'wordstats_v1.tar.gz'\n build_data.download(URL_ROOT + fname_wordstats, dpath, fname_wordstats)\n build_data.untar(dpath, fname_wordstats)\n\n # next download the evaluation logs\n fname_evallogs = 'evaluationlogs_v1.tar.gz'\n build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)\n build_data.untar(dpath, fname_evallogs)\n\n # and the reproducible logs.\n # for more info see https://github.com/facebookresearch/ParlAI/issues/2855\n fname_evallogs = 'evaluation_logs_reproducible_v1.tar.gz'\n build_data.download(URL_ROOT + fname_evallogs, dpath, fname_evallogs)\n build_data.untar(dpath, fname_evallogs)\n\n print(\"Data has been placed in \" + dpath)\n\n build_data.mark_done(dpath, version)\n\n\ndef make_path(opt, fname):\n return os.path.join(opt['datapath'], FOLDER_NAME, fname)\n\n\nif __name__ == '__main__':\n opt = params.ParlaiParser().parse_args(print_args=False)\n build(opt)\n", "path": "projects/controllable_dialogue/tasks/build.py"}]}
| 2,122 | 310 |
gh_patches_debug_2104
|
rasdani/github-patches
|
git_diff
|
shuup__shuup-1574
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin: Main menu won't stay hidden
Two issues (at least):
Desktop: If I close (minimize, desktop) main-menu and click any link, the menu appears again.
Desktop to mobile: If I minimize the menu on a bigger desktop and then drag window smaller the menu appears again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shuup/admin/views/menu.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from django.http import JsonResponse
9 from django.views.generic import TemplateView, View
10
11
12 class MenuView(TemplateView):
13 template_name = "shuup/admin/base/_main_menu.jinja"
14
15
16 class MenuToggleView(View):
17 def post(self, request, *args, **kwargs):
18 request.session["menu_open"] = int(request.POST.get("menu_open", 0))
19 return JsonResponse({"success": True})
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shuup/admin/views/menu.py b/shuup/admin/views/menu.py
--- a/shuup/admin/views/menu.py
+++ b/shuup/admin/views/menu.py
@@ -15,5 +15,5 @@
class MenuToggleView(View):
def post(self, request, *args, **kwargs):
- request.session["menu_open"] = int(request.POST.get("menu_open", 0))
+ request.session["menu_open"] = not bool(request.session.get("menu_open", True))
return JsonResponse({"success": True})
|
{"golden_diff": "diff --git a/shuup/admin/views/menu.py b/shuup/admin/views/menu.py\n--- a/shuup/admin/views/menu.py\n+++ b/shuup/admin/views/menu.py\n@@ -15,5 +15,5 @@\n \n class MenuToggleView(View):\n def post(self, request, *args, **kwargs):\n- request.session[\"menu_open\"] = int(request.POST.get(\"menu_open\", 0))\n+ request.session[\"menu_open\"] = not bool(request.session.get(\"menu_open\", True))\n return JsonResponse({\"success\": True})\n", "issue": "Admin: Main menu won't stay hidden\nTwo issues (at least):\r\nDesktop: If I close (minimize, desktop) main-menu and click any link, the menu appears again.\r\nDesktop to mobile: If I minimize the menu on a bigger desktop and then drag window smaller the menu appears again. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django.http import JsonResponse\nfrom django.views.generic import TemplateView, View\n\n\nclass MenuView(TemplateView):\n template_name = \"shuup/admin/base/_main_menu.jinja\"\n\n\nclass MenuToggleView(View):\n def post(self, request, *args, **kwargs):\n request.session[\"menu_open\"] = int(request.POST.get(\"menu_open\", 0))\n return JsonResponse({\"success\": True})\n", "path": "shuup/admin/views/menu.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django.http import JsonResponse\nfrom django.views.generic import TemplateView, View\n\n\nclass MenuView(TemplateView):\n template_name = \"shuup/admin/base/_main_menu.jinja\"\n\n\nclass MenuToggleView(View):\n def post(self, request, *args, **kwargs):\n request.session[\"menu_open\"] = not bool(request.session.get(\"menu_open\", True))\n return JsonResponse({\"success\": True})\n", "path": "shuup/admin/views/menu.py"}]}
| 520 | 120 |
gh_patches_debug_4398
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-1114
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Investigate console errors from CA State Template
We get a ton of Javascript console errors coming from the CA State Template. Why? Can we avoid and/or fix some of them?
## To Reproduce
Steps to reproduce the behavior:
1. Go to the dev site
2. Open the browser console / dev tools with `F12`
3. Scroll on the page, view another page, etc.
4. See all kinds of console errors originating from `cagov.core.min.js`
## Expected behavior
We shouldn't have any console errors, especially for features we are not using.
## Screenshots

## Additional context
We upgraded the State Template in #952
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5 import benefits.logging
6
7
8 def _filter_empty(ls):
9 return [s for s in ls if s]
10
11
12 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
13 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
14
15 # SECURITY WARNING: keep the secret key used in production secret!
16 SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "secret")
17
18 # SECURITY WARNING: don't run with debug turned on in production!
19 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
20
21 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
22
23 ALLOWED_HOSTS = _filter_empty(os.environ.get("DJANGO_ALLOWED_HOSTS", "localhost,127.0.0.1").split(","))
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.messages",
29 "django.contrib.sessions",
30 "django.contrib.staticfiles",
31 "benefits.core",
32 "benefits.enrollment",
33 "benefits.eligibility",
34 "benefits.oauth",
35 ]
36
37 if ADMIN:
38 INSTALLED_APPS.extend(
39 [
40 "django.contrib.admin",
41 "django.contrib.auth",
42 "django.contrib.contenttypes",
43 ]
44 )
45
46 MIDDLEWARE = [
47 "django.middleware.security.SecurityMiddleware",
48 "django.contrib.sessions.middleware.SessionMiddleware",
49 "django.contrib.messages.middleware.MessageMiddleware",
50 "django.middleware.locale.LocaleMiddleware",
51 "benefits.core.middleware.Healthcheck",
52 "django.middleware.common.CommonMiddleware",
53 "django.middleware.csrf.CsrfViewMiddleware",
54 "django.middleware.clickjacking.XFrameOptionsMiddleware",
55 "csp.middleware.CSPMiddleware",
56 "benefits.core.middleware.ChangedLanguageEvent",
57 ]
58
59 if ADMIN:
60 MIDDLEWARE.extend(
61 [
62 "django.contrib.auth.middleware.AuthenticationMiddleware",
63 "django.contrib.messages.middleware.MessageMiddleware",
64 ]
65 )
66
67 if DEBUG:
68 MIDDLEWARE.append("benefits.core.middleware.DebugSession")
69
70
71 # Azure Insights
72 # https://docs.microsoft.com/en-us/azure/azure-monitor/app/opencensus-python-request#tracking-django-applications
73
74 ENABLE_AZURE_INSIGHTS = "APPLICATIONINSIGHTS_CONNECTION_STRING" in os.environ
75 print("ENABLE_AZURE_INSIGHTS: ", ENABLE_AZURE_INSIGHTS)
76 if ENABLE_AZURE_INSIGHTS:
77 MIDDLEWARE.extend(
78 [
79 "opencensus.ext.django.middleware.OpencensusMiddleware",
80 "benefits.core.middleware.LogErrorToAzure",
81 ]
82 )
83
84 # only used if enabled above
85 OPENCENSUS = {
86 "TRACE": {
87 "SAMPLER": "opencensus.trace.samplers.ProbabilitySampler(rate=1)",
88 "EXPORTER": "opencensus.ext.azure.trace_exporter.AzureExporter()",
89 }
90 }
91
92
93 CSRF_COOKIE_AGE = None
94 CSRF_COOKIE_SAMESITE = "Strict"
95 CSRF_COOKIE_HTTPONLY = True
96 CSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get("DJANGO_TRUSTED_ORIGINS", "http://localhost,http://127.0.0.1").split(","))
97
98 # With `Strict`, the user loses their Django session between leaving our app to
99 # sign in with OAuth, and coming back into our app from the OAuth redirect.
100 # This is because `Strict` disallows our cookie being sent from an external
101 # domain and so the session cookie is lost.
102 #
103 # `Lax` allows the cookie to travel with the user and be sent back to us by the
104 # OAuth server, as long as the request is "safe" i.e. GET
105 SESSION_COOKIE_SAMESITE = "Lax"
106 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
107 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
108 SESSION_COOKIE_NAME = "_benefitssessionid"
109
110 if not DEBUG:
111 CSRF_COOKIE_SECURE = True
112 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
113 SESSION_COOKIE_SECURE = True
114
115 SECURE_BROWSER_XSS_FILTER = True
116
117 # required so that cross-origin pop-ups (like the enrollment overlay) have access to parent window context
118 # https://github.com/cal-itp/benefits/pull/793
119 SECURE_CROSS_ORIGIN_OPENER_POLICY = "same-origin-allow-popups"
120
121 # the NGINX reverse proxy sits in front of the application in deployed environments
122 # SSL terminates before getting to Django, and NGINX adds this header to indicate
123 # if the original request was secure or not
124 #
125 # See https://docs.djangoproject.com/en/4.0/ref/settings/#secure-proxy-ssl-header
126 if not DEBUG:
127 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
128
129 ROOT_URLCONF = "benefits.urls"
130
131 template_ctx_processors = [
132 "django.template.context_processors.request",
133 "django.contrib.messages.context_processors.messages",
134 "benefits.core.context_processors.analytics",
135 "benefits.core.context_processors.authentication",
136 ]
137
138 if DEBUG:
139 template_ctx_processors.extend(
140 [
141 "django.template.context_processors.debug",
142 "benefits.core.context_processors.debug",
143 ]
144 )
145
146 if ADMIN:
147 template_ctx_processors.extend(
148 [
149 "django.contrib.auth.context_processors.auth",
150 "django.contrib.messages.context_processors.messages",
151 ]
152 )
153
154 TEMPLATES = [
155 {
156 "BACKEND": "django.template.backends.django.DjangoTemplates",
157 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
158 "APP_DIRS": True,
159 "OPTIONS": {
160 "context_processors": template_ctx_processors,
161 },
162 },
163 ]
164
165 WSGI_APPLICATION = "benefits.wsgi.application"
166
167 DATABASES = {
168 "default": {
169 "ENGINE": "django.db.backends.sqlite3",
170 "NAME": "django.db",
171 }
172 }
173
174 LOAD_SAMPLE_DATA = os.environ.get("DJANGO_LOAD_SAMPLE_DATA", "true").lower() != "false"
175
176 # Password validation
177
178 AUTH_PASSWORD_VALIDATORS = []
179
180 if ADMIN:
181 AUTH_PASSWORD_VALIDATORS.extend(
182 [
183 {
184 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
185 },
186 {
187 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
188 },
189 {
190 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
191 },
192 {
193 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
194 },
195 ]
196 )
197
198 # Internationalization
199
200 LANGUAGE_CODE = "en"
201
202 LANGUAGE_COOKIE_HTTPONLY = True
203 LANGUAGE_COOKIE_SAMESITE = "Strict"
204 LANGUAGE_COOKIE_SECURE = True
205
206 LANGUAGES = [("en", "English"), ("es", "Español")]
207
208 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
209
210 USE_I18N = True
211 USE_L10N = True
212
213 TIME_ZONE = "UTC"
214 USE_TZ = True
215
216 # Static files (CSS, JavaScript, Images)
217
218 STATIC_URL = "/static/"
219 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
220 # use Manifest Static Files Storage by default
221 STATICFILES_STORAGE = os.environ.get(
222 "DJANGO_STATICFILES_STORAGE", "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
223 )
224 STATIC_ROOT = os.path.join(BASE_DIR, "static")
225
226 # Logging configuration
227 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
228 LOGGING = benefits.logging.get_config(LOG_LEVEL, enable_azure=ENABLE_AZURE_INSIGHTS)
229
230 # Analytics configuration
231
232 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
233
234 # rate limit configuration
235
236 # number of requests allowed in the given period
237 RATE_LIMIT = int(os.environ.get("DJANGO_RATE_LIMIT", 5))
238
239 # HTTP request methods to rate limit
240 RATE_LIMIT_METHODS = os.environ.get("DJANGO_RATE_LIMIT_METHODS", "POST").upper().split(",")
241
242 # number of seconds before additional requests are denied
243 RATE_LIMIT_PERIOD = int(os.environ.get("DJANGO_RATE_LIMIT_PERIOD", 60))
244
245 # Rate Limit feature flag
246 RATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))
247
248 # reCAPTCHA configuration
249
250 RECAPTCHA_API_URL = os.environ.get("DJANGO_RECAPTCHA_API_URL", "https://www.google.com/recaptcha/api.js")
251 RECAPTCHA_SITE_KEY = os.environ.get("DJANGO_RECAPTCHA_SITE_KEY")
252 RECAPTCHA_API_KEY_URL = f"{RECAPTCHA_API_URL}?render={RECAPTCHA_SITE_KEY}"
253 RECAPTCHA_SECRET_KEY = os.environ.get("DJANGO_RECAPTCHA_SECRET_KEY")
254 RECAPTCHA_VERIFY_URL = os.environ.get("DJANGO_RECAPTCHA_VERIFY_URL", "https://www.google.com/recaptcha/api/siteverify")
255 RECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))
256
257 # Content Security Policy
258 # Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
259
260 # In particular, note that the inner single-quotes are required!
261 # https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
262
263 CSP_DEFAULT_SRC = ["'self'"]
264
265 CSP_CONNECT_SRC = ["'self'", "https://api.amplitude.com/"]
266 env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
267 CSP_CONNECT_SRC.extend(env_connect_src)
268
269 CSP_FONT_SRC = ["'self'", "https://california.azureedge.net/", "https://fonts.gstatic.com/"]
270 env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
271 CSP_FONT_SRC.extend(env_font_src)
272
273 CSP_FRAME_ANCESTORS = ["'none'"]
274
275 CSP_FRAME_SRC = ["'none'"]
276 env_frame_src = _filter_empty(os.environ.get("DJANGO_CSP_FRAME_SRC", "").split(","))
277 if RECAPTCHA_ENABLED:
278 env_frame_src.append("https://www.google.com")
279 if len(env_frame_src) > 0:
280 CSP_FRAME_SRC = env_frame_src
281
282 CSP_SCRIPT_SRC = [
283 "'unsafe-inline'",
284 "https://california.azureedge.net/",
285 "https://cdn.amplitude.com/libs/",
286 "https://code.jquery.com/",
287 "*.littlepay.com",
288 ]
289 env_script_src = _filter_empty(os.environ.get("DJANGO_CSP_SCRIPT_SRC", "").split(","))
290 CSP_SCRIPT_SRC.extend(env_script_src)
291 if RECAPTCHA_ENABLED:
292 CSP_SCRIPT_SRC.extend(["https://www.google.com/recaptcha/", "https://www.gstatic.com/recaptcha/releases/"])
293
294 CSP_STYLE_SRC = [
295 "'self'",
296 "'unsafe-inline'",
297 "https://california.azureedge.net/",
298 "https://fonts.googleapis.com/css",
299 ]
300 env_style_src = _filter_empty(os.environ.get("DJANGO_CSP_STYLE_SRC", "").split(","))
301 CSP_STYLE_SRC.extend(env_style_src)
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -281,9 +281,8 @@
CSP_SCRIPT_SRC = [
"'unsafe-inline'",
- "https://california.azureedge.net/",
"https://cdn.amplitude.com/libs/",
- "https://code.jquery.com/",
+ "https://cdn.jsdelivr.net/",
"*.littlepay.com",
]
env_script_src = _filter_empty(os.environ.get("DJANGO_CSP_SCRIPT_SRC", "").split(","))
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -281,9 +281,8 @@\n \n CSP_SCRIPT_SRC = [\n \"'unsafe-inline'\",\n- \"https://california.azureedge.net/\",\n \"https://cdn.amplitude.com/libs/\",\n- \"https://code.jquery.com/\",\n+ \"https://cdn.jsdelivr.net/\",\n \"*.littlepay.com\",\n ]\n env_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\n", "issue": "Investigate console errors from CA State Template\nWe get a ton of Javascript console errors coming from the CA State Template. Why? Can we avoid and/or fix some of them?\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to the dev site\r\n2. Open the browser console / dev tools with `F12`\r\n3. Scroll on the page, view another page, etc.\r\n4. See all kinds of console errors originating from `cagov.core.min.js`\r\n\r\n## Expected behavior\r\n\r\nWe shouldn't have any console errors, especially for features we are not using.\r\n\r\n## Screenshots\r\n\r\n\r\n\r\n## Additional context\r\n\r\nWe upgraded the State Template in #952\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\nimport benefits.logging\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"DJANGO_SECRET_KEY\", \"secret\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ.get(\"DJANGO_ALLOWED_HOSTS\", \"localhost,127.0.0.1\").split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.append(\"benefits.core.middleware.DebugSession\")\n\n\n# Azure Insights\n# https://docs.microsoft.com/en-us/azure/azure-monitor/app/opencensus-python-request#tracking-django-applications\n\nENABLE_AZURE_INSIGHTS = \"APPLICATIONINSIGHTS_CONNECTION_STRING\" in os.environ\nprint(\"ENABLE_AZURE_INSIGHTS: \", ENABLE_AZURE_INSIGHTS)\nif ENABLE_AZURE_INSIGHTS:\n MIDDLEWARE.extend(\n [\n \"opencensus.ext.django.middleware.OpencensusMiddleware\",\n \"benefits.core.middleware.LogErrorToAzure\",\n ]\n )\n\n# only used if enabled above\nOPENCENSUS = {\n \"TRACE\": {\n \"SAMPLER\": \"opencensus.trace.samplers.ProbabilitySampler(rate=1)\",\n \"EXPORTER\": \"opencensus.ext.azure.trace_exporter.AzureExporter()\",\n }\n}\n\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get(\"DJANGO_TRUSTED_ORIGINS\", \"http://localhost,http://127.0.0.1\").split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\n# required so that cross-origin pop-ups (like the enrollment overlay) have access to parent window context\n# https://github.com/cal-itp/benefits/pull/793\nSECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin-allow-popups\"\n\n# the NGINX reverse proxy sits in front of the application in deployed environments\n# SSL terminates before getting to Django, and NGINX adds this header to indicate\n# if the original request was secure or not\n#\n# See https://docs.djangoproject.com/en/4.0/ref/settings/#secure-proxy-ssl-header\nif not DEBUG:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.authentication\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"django.db\",\n }\n}\n\nLOAD_SAMPLE_DATA = os.environ.get(\"DJANGO_LOAD_SAMPLE_DATA\", \"true\").lower() != \"false\"\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\n# use Manifest Static Files Storage by default\nSTATICFILES_STORAGE = os.environ.get(\n \"DJANGO_STATICFILES_STORAGE\", \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = benefits.logging.get_config(LOG_LEVEL, enable_azure=ENABLE_AZURE_INSIGHTS)\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 5))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"POST\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 60))\n\n# Rate Limit feature flag\nRATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\", \"https://www.google.com/recaptcha/api.js\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_API_KEY_URL = f\"{RECAPTCHA_API_URL}?render={RECAPTCHA_SITE_KEY}\"\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\", \"https://www.google.com/recaptcha/api/siteverify\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nCSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nCSP_FONT_SRC = [\"'self'\", \"https://california.azureedge.net/\", \"https://fonts.gstatic.com/\"]\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC.extend(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\n\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nif RECAPTCHA_ENABLED:\n env_frame_src.append(\"https://www.google.com\")\nif len(env_frame_src) > 0:\n CSP_FRAME_SRC = env_frame_src\n\nCSP_SCRIPT_SRC = [\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/\",\n \"https://cdn.amplitude.com/libs/\",\n \"https://code.jquery.com/\",\n \"*.littlepay.com\",\n]\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC.extend(env_script_src)\nif RECAPTCHA_ENABLED:\n CSP_SCRIPT_SRC.extend([\"https://www.google.com/recaptcha/\", \"https://www.gstatic.com/recaptcha/releases/\"])\n\nCSP_STYLE_SRC = [\n \"'self'\",\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/\",\n \"https://fonts.googleapis.com/css\",\n]\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC.extend(env_style_src)\n", "path": "benefits/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\nimport benefits.logging\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"DJANGO_SECRET_KEY\", \"secret\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ.get(\"DJANGO_ALLOWED_HOSTS\", \"localhost,127.0.0.1\").split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.append(\"benefits.core.middleware.DebugSession\")\n\n\n# Azure Insights\n# https://docs.microsoft.com/en-us/azure/azure-monitor/app/opencensus-python-request#tracking-django-applications\n\nENABLE_AZURE_INSIGHTS = \"APPLICATIONINSIGHTS_CONNECTION_STRING\" in os.environ\nprint(\"ENABLE_AZURE_INSIGHTS: \", ENABLE_AZURE_INSIGHTS)\nif ENABLE_AZURE_INSIGHTS:\n MIDDLEWARE.extend(\n [\n \"opencensus.ext.django.middleware.OpencensusMiddleware\",\n \"benefits.core.middleware.LogErrorToAzure\",\n ]\n )\n\n# only used if enabled above\nOPENCENSUS = {\n \"TRACE\": {\n \"SAMPLER\": \"opencensus.trace.samplers.ProbabilitySampler(rate=1)\",\n \"EXPORTER\": \"opencensus.ext.azure.trace_exporter.AzureExporter()\",\n }\n}\n\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get(\"DJANGO_TRUSTED_ORIGINS\", \"http://localhost,http://127.0.0.1\").split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\n# required so that cross-origin pop-ups (like the enrollment overlay) have access to parent window context\n# https://github.com/cal-itp/benefits/pull/793\nSECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin-allow-popups\"\n\n# the NGINX reverse proxy sits in front of the application in deployed environments\n# SSL terminates before getting to Django, and NGINX adds this header to indicate\n# if the original request was secure or not\n#\n# See https://docs.djangoproject.com/en/4.0/ref/settings/#secure-proxy-ssl-header\nif not DEBUG:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.authentication\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"django.db\",\n }\n}\n\nLOAD_SAMPLE_DATA = os.environ.get(\"DJANGO_LOAD_SAMPLE_DATA\", \"true\").lower() != \"false\"\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\n# use Manifest Static Files Storage by default\nSTATICFILES_STORAGE = os.environ.get(\n \"DJANGO_STATICFILES_STORAGE\", \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = benefits.logging.get_config(LOG_LEVEL, enable_azure=ENABLE_AZURE_INSIGHTS)\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 5))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"POST\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 60))\n\n# Rate Limit feature flag\nRATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\", \"https://www.google.com/recaptcha/api.js\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_API_KEY_URL = f\"{RECAPTCHA_API_URL}?render={RECAPTCHA_SITE_KEY}\"\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\", \"https://www.google.com/recaptcha/api/siteverify\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nCSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nCSP_FONT_SRC = [\"'self'\", \"https://california.azureedge.net/\", \"https://fonts.gstatic.com/\"]\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC.extend(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\n\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nif RECAPTCHA_ENABLED:\n env_frame_src.append(\"https://www.google.com\")\nif len(env_frame_src) > 0:\n CSP_FRAME_SRC = env_frame_src\n\nCSP_SCRIPT_SRC = [\n \"'unsafe-inline'\",\n \"https://cdn.amplitude.com/libs/\",\n \"https://cdn.jsdelivr.net/\",\n \"*.littlepay.com\",\n]\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC.extend(env_script_src)\nif RECAPTCHA_ENABLED:\n CSP_SCRIPT_SRC.extend([\"https://www.google.com/recaptcha/\", \"https://www.gstatic.com/recaptcha/releases/\"])\n\nCSP_STYLE_SRC = [\n \"'self'\",\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/\",\n \"https://fonts.googleapis.com/css\",\n]\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC.extend(env_style_src)\n", "path": "benefits/settings.py"}]}
| 3,602 | 125 |
gh_patches_debug_34226
|
rasdani/github-patches
|
git_diff
|
dj-stripe__dj-stripe-268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
importing djstripe within setup.py causes race condition when installing from repo
Trying to install dj-stripe from a repo runs into a race condition at setup.py:
``` bash
pip install -e git://github.com/pydanny/dj-stripe.git#egg=djstripe
Obtaining djstripe from git+git://github.com/pydanny/dj-stripe.git#egg=djstripe
Cloning git://github.com/pydanny/dj-stripe.git to ./v/test_djstripe/src/djstripe
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 20, in <module>
File "/home/dave/v/test_djstripe/src/djstripe/setup.py", line 6, in <module>
import djstripe
File "/home/dave/v/test_djstripe/src/djstripe/djstripe/__init__.py", line 4, in <module>
from django import get_version as get_django_version
ImportError: No module named 'django'
----------------------------------------
```
There are a few ways to fix this. I would suggest the, for example, get_version(package) methods used in https://github.com/pydanny/django-admin2/blob/master/setup.py
This is a trivial fix, I'll get a patch together soon.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import sys
5
6 import djstripe
7
8 version = djstripe.__version__
9
10 try:
11 from setuptools import setup
12 except ImportError:
13 from distutils.core import setup
14
15 if sys.argv[-1] == 'publish':
16 os.system('python setup.py sdist upload')
17 os.system('python setup.py bdist_wheel upload')
18 sys.exit()
19
20 if sys.argv[-1] == 'tag':
21 print("Tagging the version on github:")
22 os.system("git tag -a %s -m 'version %s'" % (version, version))
23 os.system("git push --tags")
24 sys.exit()
25
26 readme = open('README.rst').read()
27 history = open('HISTORY.rst').read().replace('.. :changelog:', '')
28
29 INSTALL_REQUIRES = [
30 'django>=1.7',
31 'stripe>=1.22.2',
32 'django-model-utils>=2.2',
33 'django-braces>=1.8.0',
34 'jsonfield>=1.0.3',
35 'pytz>=2015.4'
36 ]
37
38 setup(
39 name='dj-stripe',
40 version=version,
41 description=djstripe.__summary__,
42 long_description=readme + '\n\n' + history,
43 author=djstripe.__author__,
44 author_email=djstripe.__email__,
45 url=djstripe.__uri__,
46 packages=[
47 'djstripe',
48 ],
49 package_dir={'djstripe': 'djstripe'},
50 include_package_data=True,
51 install_requires=INSTALL_REQUIRES,
52 license=djstripe.__license__,
53 zip_safe=False,
54 keywords='stripe django',
55 classifiers=[
56 'Development Status :: 4 - Beta',
57 'Environment :: Web Environment',
58 'Framework :: Django',
59 'Framework :: Django :: 1.7',
60 'Framework :: Django :: 1.8',
61 'Intended Audience :: Developers',
62 'License :: OSI Approved :: BSD License',
63 'Natural Language :: English',
64 "Programming Language :: Python :: 2",
65 'Programming Language :: Python :: 2.7',
66 'Programming Language :: Python :: 3',
67 'Programming Language :: Python :: 3.3',
68 'Programming Language :: Python :: 3.4',
69 'Programming Language :: Python :: 3.5'
70 ],
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,11 +1,37 @@
#!/usr/bin/env python
+import ast
import os
import sys
-import djstripe
-version = djstripe.__version__
+class MetadataFinder(ast.NodeVisitor):
+ def __init__(self):
+ self.version = None
+ self.summary = None
+ self.author = None
+ self.email = None
+ self.uri = None
+ self.licence = None
+
+ def visit_Assign(self, node):
+ if node.targets[0].id == '__version__':
+ self.version = node.value.s
+ elif node.targets[0].id == '__summary__':
+ self.summary = node.value.s
+ elif node.targets[0].id == '__author__':
+ self.author = node.value.s
+ elif node.targets[0].id == '__email__':
+ self.email = node.value.s
+ elif node.targets[0].id == '__uri__':
+ self.uri = node.value.s
+ elif node.targets[0].id == '__license__':
+ self.license = node.value.s
+
+
+with open(os.path.join('djstripe', '__init__.py')) as open_file:
+ finder = MetadataFinder()
+ finder.visit(ast.parse(open_file.read()))
try:
from setuptools import setup
@@ -19,7 +45,8 @@
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
- os.system("git tag -a %s -m 'version %s'" % (version, version))
+ os.system("git tag -a %s -m 'version %s'" % (finder.version,
+ finder.version))
os.system("git push --tags")
sys.exit()
@@ -37,19 +64,19 @@
setup(
name='dj-stripe',
- version=version,
- description=djstripe.__summary__,
+ version=finder.version,
+ description=finder.summary,
long_description=readme + '\n\n' + history,
- author=djstripe.__author__,
- author_email=djstripe.__email__,
- url=djstripe.__uri__,
+ author=finder.author,
+ author_email=finder.email,
+ url=finder.uri,
packages=[
'djstripe',
],
package_dir={'djstripe': 'djstripe'},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
- license=djstripe.__license__,
+ license=finder.license,
zip_safe=False,
keywords='stripe django',
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,11 +1,37 @@\n #!/usr/bin/env python\n \n+import ast\n import os\n import sys\n \n-import djstripe\n \n-version = djstripe.__version__\n+class MetadataFinder(ast.NodeVisitor):\n+ def __init__(self):\n+ self.version = None\n+ self.summary = None\n+ self.author = None\n+ self.email = None\n+ self.uri = None\n+ self.licence = None\n+\n+ def visit_Assign(self, node):\n+ if node.targets[0].id == '__version__':\n+ self.version = node.value.s\n+ elif node.targets[0].id == '__summary__':\n+ self.summary = node.value.s\n+ elif node.targets[0].id == '__author__':\n+ self.author = node.value.s\n+ elif node.targets[0].id == '__email__':\n+ self.email = node.value.s\n+ elif node.targets[0].id == '__uri__':\n+ self.uri = node.value.s\n+ elif node.targets[0].id == '__license__':\n+ self.license = node.value.s\n+\n+\n+with open(os.path.join('djstripe', '__init__.py')) as open_file:\n+ finder = MetadataFinder()\n+ finder.visit(ast.parse(open_file.read()))\n \n try:\n from setuptools import setup\n@@ -19,7 +45,8 @@\n \n if sys.argv[-1] == 'tag':\n print(\"Tagging the version on github:\")\n- os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n+ os.system(\"git tag -a %s -m 'version %s'\" % (finder.version,\n+ finder.version))\n os.system(\"git push --tags\")\n sys.exit()\n \n@@ -37,19 +64,19 @@\n \n setup(\n name='dj-stripe',\n- version=version,\n- description=djstripe.__summary__,\n+ version=finder.version,\n+ description=finder.summary,\n long_description=readme + '\\n\\n' + history,\n- author=djstripe.__author__,\n- author_email=djstripe.__email__,\n- url=djstripe.__uri__,\n+ author=finder.author,\n+ author_email=finder.email,\n+ url=finder.uri,\n packages=[\n 'djstripe',\n ],\n package_dir={'djstripe': 'djstripe'},\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n- license=djstripe.__license__,\n+ license=finder.license,\n zip_safe=False,\n keywords='stripe django',\n classifiers=[\n", "issue": "importing djstripe within setup.py causes race condition when installing from repo\nTrying to install dj-stripe from a repo runs into a race condition at setup.py:\n\n``` bash\npip install -e git://github.com/pydanny/dj-stripe.git#egg=djstripe \nObtaining djstripe from git+git://github.com/pydanny/dj-stripe.git#egg=djstripe\n Cloning git://github.com/pydanny/dj-stripe.git to ./v/test_djstripe/src/djstripe\n Complete output from command python setup.py egg_info:\n Traceback (most recent call last):\n File \"<string>\", line 20, in <module>\n File \"/home/dave/v/test_djstripe/src/djstripe/setup.py\", line 6, in <module>\n import djstripe\n File \"/home/dave/v/test_djstripe/src/djstripe/djstripe/__init__.py\", line 4, in <module>\n from django import get_version as get_django_version\n ImportError: No module named 'django'\n\n ----------------------------------------\n```\n\nThere are a few ways to fix this. I would suggest the, for example, get_version(package) methods used in https://github.com/pydanny/django-admin2/blob/master/setup.py\n\nThis is a trivial fix, I'll get a patch together soon. \n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\nimport djstripe\n\nversion = djstripe.__version__\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n print(\"Tagging the version on github:\")\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nreadme = open('README.rst').read()\nhistory = open('HISTORY.rst').read().replace('.. :changelog:', '')\n\nINSTALL_REQUIRES = [\n 'django>=1.7',\n 'stripe>=1.22.2',\n 'django-model-utils>=2.2',\n 'django-braces>=1.8.0',\n 'jsonfield>=1.0.3',\n 'pytz>=2015.4'\n]\n\nsetup(\n name='dj-stripe',\n version=version,\n description=djstripe.__summary__,\n long_description=readme + '\\n\\n' + history,\n author=djstripe.__author__,\n author_email=djstripe.__email__,\n url=djstripe.__uri__,\n packages=[\n 'djstripe',\n ],\n package_dir={'djstripe': 'djstripe'},\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n license=djstripe.__license__,\n zip_safe=False,\n keywords='stripe django',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 1.7',\n 'Framework :: Django :: 1.8',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport ast\nimport os\nimport sys\n\n\nclass MetadataFinder(ast.NodeVisitor):\n def __init__(self):\n self.version = None\n self.summary = None\n self.author = None\n self.email = None\n self.uri = None\n self.licence = None\n\n def visit_Assign(self, node):\n if node.targets[0].id == '__version__':\n self.version = node.value.s\n elif node.targets[0].id == '__summary__':\n self.summary = node.value.s\n elif node.targets[0].id == '__author__':\n self.author = node.value.s\n elif node.targets[0].id == '__email__':\n self.email = node.value.s\n elif node.targets[0].id == '__uri__':\n self.uri = node.value.s\n elif node.targets[0].id == '__license__':\n self.license = node.value.s\n\n\nwith open(os.path.join('djstripe', '__init__.py')) as open_file:\n finder = MetadataFinder()\n finder.visit(ast.parse(open_file.read()))\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n print(\"Tagging the version on github:\")\n os.system(\"git tag -a %s -m 'version %s'\" % (finder.version,\n finder.version))\n os.system(\"git push --tags\")\n sys.exit()\n\nreadme = open('README.rst').read()\nhistory = open('HISTORY.rst').read().replace('.. :changelog:', '')\n\nINSTALL_REQUIRES = [\n 'django>=1.7',\n 'stripe>=1.22.2',\n 'django-model-utils>=2.2',\n 'django-braces>=1.8.0',\n 'jsonfield>=1.0.3',\n 'pytz>=2015.4'\n]\n\nsetup(\n name='dj-stripe',\n version=finder.version,\n description=finder.summary,\n long_description=readme + '\\n\\n' + history,\n author=finder.author,\n author_email=finder.email,\n url=finder.uri,\n packages=[\n 'djstripe',\n ],\n package_dir={'djstripe': 'djstripe'},\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n license=finder.license,\n zip_safe=False,\n keywords='stripe django',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 1.7',\n 'Framework :: Django :: 1.8',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'\n ],\n)\n", "path": "setup.py"}]}
| 1,182 | 591 |
gh_patches_debug_9495
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-231
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
psycopg2 cursor's __enter__ method is not patched to be traced
See behavior here:
```python
>>> import ddtrace
>>> ddtrace.patch_all()
>>> import psycopg2
>>> conn = psycopg2.connect('postgresql://localhost')
>>> print(type(conn.cursor()))
<class 'ddtrace.contrib.dbapi.TracedCursor'>
>>> with conn.cursor() as cur:
... print(type(cur))
<type 'psycopg2.extensions.cursor'>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/dbapi/__init__.py`
Content:
```
1 """
2 Generic dbapi tracing code.
3 """
4
5 # stdlib
6 import logging
7
8 # 3p
9 import wrapt
10
11 # project
12 from ddtrace import Pin
13 from ddtrace.ext import sql
14
15
16 log = logging.getLogger(__name__)
17
18
19 class TracedCursor(wrapt.ObjectProxy):
20 """ TracedCursor wraps a psql cursor and traces it's queries. """
21
22 _datadog_pin = None
23 _datadog_name = None
24
25 def __init__(self, cursor, pin):
26 super(TracedCursor, self).__init__(cursor)
27 self._datadog_pin = pin
28 name = pin.app or 'sql'
29 self._datadog_name = '%s.query' % name
30
31 def executemany(self, query, *args, **kwargs):
32 pin = self._datadog_pin
33 if not pin or not pin.enabled():
34 return self.__wrapped__.executemany(query, *args, **kwargs)
35 service = pin.service
36
37 # FIXME[matt] properly handle kwargs here. arg names can be different
38 # with different libs.
39 with pin.tracer.trace(self._datadog_name, service=service, resource=query) as s:
40 s.span_type = sql.TYPE
41 s.set_tag(sql.QUERY, query)
42 s.set_tags(pin.tags)
43 s.set_tag("sql.executemany", "true")
44 try:
45 return self.__wrapped__.executemany(query, *args, **kwargs)
46 finally:
47 s.set_metric("db.rowcount", self.rowcount)
48
49 def execute(self, query, *args, **kwargs):
50 pin = self._datadog_pin
51 if not pin or not pin.enabled():
52 return self.__wrapped__.execute(query, *args, **kwargs)
53
54 service = pin.service
55 with pin.tracer.trace(self._datadog_name, service=service, resource=query) as s:
56 s.span_type = sql.TYPE
57 s.set_tag(sql.QUERY, query)
58 s.set_tags(pin.tags)
59 try:
60 return self.__wrapped__.execute(query, *args, **kwargs)
61 finally:
62 s.set_metric("db.rowcount", self.rowcount)
63
64 def callproc(self, proc, args):
65 pin = self._datadog_pin
66 if not pin or not pin.enabled():
67 return self.__wrapped__.callproc(proc, args)
68
69 with pin.tracer.trace(self._datadog_name, service=pin.service, resource=proc) as s:
70 s.span_type = sql.TYPE
71 s.set_tag(sql.QUERY, proc)
72 s.set_tags(pin.tags)
73 try:
74 return self.__wrapped__.callproc(proc, args)
75 finally:
76 s.set_metric("db.rowcount", self.rowcount)
77
78
79 class TracedConnection(wrapt.ObjectProxy):
80 """ TracedConnection wraps a Connection with tracing code. """
81
82 _datadog_pin = None
83
84 def __init__(self, conn):
85 super(TracedConnection, self).__init__(conn)
86 name = _get_vendor(conn)
87 Pin(service=name, app=name).onto(self)
88
89 def cursor(self, *args, **kwargs):
90 cursor = self.__wrapped__.cursor(*args, **kwargs)
91 pin = self._datadog_pin
92 if not pin:
93 return cursor
94 return TracedCursor(cursor, pin)
95
96
97 def _get_vendor(conn):
98 """ Return the vendor (e.g postgres, mysql) of the given
99 database.
100 """
101 try:
102 name = _get_module_name(conn)
103 except Exception:
104 log.debug("couldnt parse module name", exc_info=True)
105 name = "sql"
106 return sql.normalize_vendor(name)
107
108 def _get_module_name(conn):
109 return conn.__class__.__module__.split('.')[0]
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/dbapi/__init__.py b/ddtrace/contrib/dbapi/__init__.py
--- a/ddtrace/contrib/dbapi/__init__.py
+++ b/ddtrace/contrib/dbapi/__init__.py
@@ -75,6 +75,15 @@
finally:
s.set_metric("db.rowcount", self.rowcount)
+ def __enter__(self):
+ # previous versions of the dbapi didn't support context managers. let's
+ # reference the func that would be called to ensure that errors
+ # messages will be the same.
+ self.__wrapped__.__enter__
+
+ # and finally, yield the traced cursor.
+ return self
+
class TracedConnection(wrapt.ObjectProxy):
""" TracedConnection wraps a Connection with tracing code. """
|
{"golden_diff": "diff --git a/ddtrace/contrib/dbapi/__init__.py b/ddtrace/contrib/dbapi/__init__.py\n--- a/ddtrace/contrib/dbapi/__init__.py\n+++ b/ddtrace/contrib/dbapi/__init__.py\n@@ -75,6 +75,15 @@\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n \n+ def __enter__(self):\n+ # previous versions of the dbapi didn't support context managers. let's\n+ # reference the func that would be called to ensure that errors\n+ # messages will be the same.\n+ self.__wrapped__.__enter__\n+\n+ # and finally, yield the traced cursor.\n+ return self\n+\n \n class TracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n", "issue": "psycopg2 cursor's __enter__ method is not patched to be traced\nSee behavior here:\r\n\r\n```python\r\n>>> import ddtrace\r\n>>> ddtrace.patch_all()\r\n>>> import psycopg2\r\n>>> conn = psycopg2.connect('postgresql://localhost')\r\n>>> print(type(conn.cursor()))\r\n<class 'ddtrace.contrib.dbapi.TracedCursor'>\r\n>>> with conn.cursor() as cur:\r\n... print(type(cur))\r\n<type 'psycopg2.extensions.cursor'>\r\n```\n", "before_files": [{"content": "\"\"\"\nGeneric dbapi tracing code.\n\"\"\"\n\n# stdlib\nimport logging\n\n# 3p\nimport wrapt\n\n# project\nfrom ddtrace import Pin\nfrom ddtrace.ext import sql\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TracedCursor(wrapt.ObjectProxy):\n \"\"\" TracedCursor wraps a psql cursor and traces it's queries. \"\"\"\n\n _datadog_pin = None\n _datadog_name = None\n\n def __init__(self, cursor, pin):\n super(TracedCursor, self).__init__(cursor)\n self._datadog_pin = pin\n name = pin.app or 'sql'\n self._datadog_name = '%s.query' % name\n\n def executemany(self, query, *args, **kwargs):\n pin = self._datadog_pin\n if not pin or not pin.enabled():\n return self.__wrapped__.executemany(query, *args, **kwargs)\n service = pin.service\n\n # FIXME[matt] properly handle kwargs here. arg names can be different\n # with different libs.\n with pin.tracer.trace(self._datadog_name, service=service, resource=query) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, query)\n s.set_tags(pin.tags)\n s.set_tag(\"sql.executemany\", \"true\")\n try:\n return self.__wrapped__.executemany(query, *args, **kwargs)\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n def execute(self, query, *args, **kwargs):\n pin = self._datadog_pin\n if not pin or not pin.enabled():\n return self.__wrapped__.execute(query, *args, **kwargs)\n\n service = pin.service\n with pin.tracer.trace(self._datadog_name, service=service, resource=query) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, query)\n s.set_tags(pin.tags)\n try:\n return self.__wrapped__.execute(query, *args, **kwargs)\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n def callproc(self, proc, args):\n pin = self._datadog_pin\n if not pin or not pin.enabled():\n return self.__wrapped__.callproc(proc, args)\n\n with pin.tracer.trace(self._datadog_name, service=pin.service, resource=proc) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, proc)\n s.set_tags(pin.tags)\n try:\n return self.__wrapped__.callproc(proc, args)\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n\nclass TracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n\n _datadog_pin = None\n\n def __init__(self, conn):\n super(TracedConnection, self).__init__(conn)\n name = _get_vendor(conn)\n Pin(service=name, app=name).onto(self)\n\n def cursor(self, *args, **kwargs):\n cursor = self.__wrapped__.cursor(*args, **kwargs)\n pin = self._datadog_pin\n if not pin:\n return cursor\n return TracedCursor(cursor, pin)\n\n\ndef _get_vendor(conn):\n \"\"\" Return the vendor (e.g postgres, mysql) of the given\n database.\n \"\"\"\n try:\n name = _get_module_name(conn)\n except Exception:\n log.debug(\"couldnt parse module name\", exc_info=True)\n name = \"sql\"\n return sql.normalize_vendor(name)\n\ndef _get_module_name(conn):\n return conn.__class__.__module__.split('.')[0]\n", "path": "ddtrace/contrib/dbapi/__init__.py"}], "after_files": [{"content": "\"\"\"\nGeneric dbapi tracing code.\n\"\"\"\n\n# stdlib\nimport logging\n\n# 3p\nimport wrapt\n\n# project\nfrom ddtrace import Pin\nfrom ddtrace.ext import sql\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TracedCursor(wrapt.ObjectProxy):\n \"\"\" TracedCursor wraps a psql cursor and traces it's queries. \"\"\"\n\n _datadog_pin = None\n _datadog_name = None\n\n def __init__(self, cursor, pin):\n super(TracedCursor, self).__init__(cursor)\n self._datadog_pin = pin\n name = pin.app or 'sql'\n self._datadog_name = '%s.query' % name\n\n def executemany(self, query, *args, **kwargs):\n pin = self._datadog_pin\n if not pin or not pin.enabled():\n return self.__wrapped__.executemany(query, *args, **kwargs)\n service = pin.service\n\n # FIXME[matt] properly handle kwargs here. arg names can be different\n # with different libs.\n with pin.tracer.trace(self._datadog_name, service=service, resource=query) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, query)\n s.set_tags(pin.tags)\n s.set_tag(\"sql.executemany\", \"true\")\n try:\n return self.__wrapped__.executemany(query, *args, **kwargs)\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n def execute(self, query, *args, **kwargs):\n pin = self._datadog_pin\n if not pin or not pin.enabled():\n return self.__wrapped__.execute(query, *args, **kwargs)\n\n service = pin.service\n with pin.tracer.trace(self._datadog_name, service=service, resource=query) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, query)\n s.set_tags(pin.tags)\n try:\n return self.__wrapped__.execute(query, *args, **kwargs)\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n def callproc(self, proc, args):\n pin = self._datadog_pin\n if not pin or not pin.enabled():\n return self.__wrapped__.callproc(proc, args)\n\n with pin.tracer.trace(self._datadog_name, service=pin.service, resource=proc) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, proc)\n s.set_tags(pin.tags)\n try:\n return self.__wrapped__.callproc(proc, args)\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n def __enter__(self):\n # previous versions of the dbapi didn't support context managers. let's\n # reference the func that would be called to ensure that errors\n # messages will be the same.\n self.__wrapped__.__enter__\n\n # and finally, yield the traced cursor.\n return self\n\n\nclass TracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n\n _datadog_pin = None\n\n def __init__(self, conn):\n super(TracedConnection, self).__init__(conn)\n name = _get_vendor(conn)\n Pin(service=name, app=name).onto(self)\n\n def cursor(self, *args, **kwargs):\n cursor = self.__wrapped__.cursor(*args, **kwargs)\n pin = self._datadog_pin\n if not pin:\n return cursor\n return TracedCursor(cursor, pin)\n\n\ndef _get_vendor(conn):\n \"\"\" Return the vendor (e.g postgres, mysql) of the given\n database.\n \"\"\"\n try:\n name = _get_module_name(conn)\n except Exception:\n log.debug(\"couldnt parse module name\", exc_info=True)\n name = \"sql\"\n return sql.normalize_vendor(name)\n\ndef _get_module_name(conn):\n return conn.__class__.__module__.split('.')[0]\n", "path": "ddtrace/contrib/dbapi/__init__.py"}]}
| 1,403 | 182 |
gh_patches_debug_17283
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-14406
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ PornFlip ] Regex Invalid
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.01**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
```
youtube-dl --verbose -J https://www.pornflip.com/v/EkRD6-vS2-s
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'--verbose', u'-J', u'https://www.pornflip.com/v/EkRD6-vS2-s']
[debug] Encodings: locale UTF-8, fs UTF-8, out None, pref UTF-8
[debug] youtube-dl version 2017.10.01
[debug] Python version 2.7.12 - Linux-4.4.0-96-generic-x86_64-with-Ubuntu-16.04-xenial
[debug] exe versions: none
[debug] Proxy map: {}
WARNING: Falling back on generic information extractor.
WARNING: Failed to download MPD manifest: HTTP Error 404: Not Found
ERROR: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 2156, in _real_extract
doc = compat_etree_fromstring(webpage.encode('utf-8'))
File "/usr/local/bin/youtube-dl/youtube_dl/compat.py", line 2539, in compat_etree_fromstring
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
File "/usr/local/bin/youtube-dl/youtube_dl/compat.py", line 2528, in _XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1653, in feed
self._raiseerror(v)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1517, in _raiseerror
raise err
ParseError: mismatched tag: line 21, column 2
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 783, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 434, in extract
ie_result = self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 3051, in _real_extract
raise UnsupportedError(url)
UnsupportedError: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s
```
### Description of your *issue*, suggested solution and other information
Regex Valid:
```
_VALID_URL = r'https?://(?:www\.)?pornflip\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'
```
[ PornFlip ] Regex Invalid
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.01**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
```
youtube-dl --verbose -J https://www.pornflip.com/v/EkRD6-vS2-s
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'--verbose', u'-J', u'https://www.pornflip.com/v/EkRD6-vS2-s']
[debug] Encodings: locale UTF-8, fs UTF-8, out None, pref UTF-8
[debug] youtube-dl version 2017.10.01
[debug] Python version 2.7.12 - Linux-4.4.0-96-generic-x86_64-with-Ubuntu-16.04-xenial
[debug] exe versions: none
[debug] Proxy map: {}
WARNING: Falling back on generic information extractor.
WARNING: Failed to download MPD manifest: HTTP Error 404: Not Found
ERROR: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 2156, in _real_extract
doc = compat_etree_fromstring(webpage.encode('utf-8'))
File "/usr/local/bin/youtube-dl/youtube_dl/compat.py", line 2539, in compat_etree_fromstring
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
File "/usr/local/bin/youtube-dl/youtube_dl/compat.py", line 2528, in _XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1653, in feed
self._raiseerror(v)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1517, in _raiseerror
raise err
ParseError: mismatched tag: line 21, column 2
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 783, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 434, in extract
ie_result = self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 3051, in _real_extract
raise UnsupportedError(url)
UnsupportedError: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s
```
### Description of your *issue*, suggested solution and other information
Regex Valid:
```
_VALID_URL = r'https?://(?:www\.)?pornflip\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/pornflip.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..compat import (
6 compat_parse_qs,
7 compat_str,
8 )
9 from ..utils import (
10 int_or_none,
11 try_get,
12 unified_timestamp,
13 )
14
15
16 class PornFlipIE(InfoExtractor):
17 _VALID_URL = r'https?://(?:www\.)?pornflip\.com/(?:v|embed)/(?P<id>[0-9A-Za-z]{11})'
18 _TESTS = [{
19 'url': 'https://www.pornflip.com/v/wz7DfNhMmep',
20 'md5': '98c46639849145ae1fd77af532a9278c',
21 'info_dict': {
22 'id': 'wz7DfNhMmep',
23 'ext': 'mp4',
24 'title': '2 Amateurs swallow make his dream cumshots true',
25 'thumbnail': r're:^https?://.*\.jpg$',
26 'duration': 112,
27 'timestamp': 1481655502,
28 'upload_date': '20161213',
29 'uploader_id': '106786',
30 'uploader': 'figifoto',
31 'view_count': int,
32 'age_limit': 18,
33 }
34 }, {
35 'url': 'https://www.pornflip.com/embed/wz7DfNhMmep',
36 'only_matching': True,
37 }]
38
39 def _real_extract(self, url):
40 video_id = self._match_id(url)
41
42 webpage = self._download_webpage(
43 'https://www.pornflip.com/v/%s' % video_id, video_id)
44
45 flashvars = compat_parse_qs(self._search_regex(
46 r'<embed[^>]+flashvars=(["\'])(?P<flashvars>(?:(?!\1).)+)\1',
47 webpage, 'flashvars', group='flashvars'))
48
49 title = flashvars['video_vars[title]'][0]
50
51 def flashvar(kind):
52 return try_get(
53 flashvars, lambda x: x['video_vars[%s]' % kind][0], compat_str)
54
55 formats = []
56 for key, value in flashvars.items():
57 if not (value and isinstance(value, list)):
58 continue
59 format_url = value[0]
60 if key == 'video_vars[hds_manifest]':
61 formats.extend(self._extract_mpd_formats(
62 format_url, video_id, mpd_id='dash', fatal=False))
63 continue
64 height = self._search_regex(
65 r'video_vars\[video_urls\]\[(\d+)', key, 'height', default=None)
66 if not height:
67 continue
68 formats.append({
69 'url': format_url,
70 'format_id': 'http-%s' % height,
71 'height': int_or_none(height),
72 })
73 self._sort_formats(formats)
74
75 uploader = self._html_search_regex(
76 (r'<span[^>]+class="name"[^>]*>\s*<a[^>]+>\s*<strong>(?P<uploader>[^<]+)',
77 r'<meta[^>]+content=(["\'])[^>]*\buploaded by (?P<uploader>.+?)\1'),
78 webpage, 'uploader', fatal=False, group='uploader')
79
80 return {
81 'id': video_id,
82 'formats': formats,
83 'title': title,
84 'thumbnail': flashvar('big_thumb'),
85 'duration': int_or_none(flashvar('duration')),
86 'timestamp': unified_timestamp(self._html_search_meta(
87 'uploadDate', webpage, 'timestamp')),
88 'uploader_id': flashvar('author_id'),
89 'uploader': uploader,
90 'view_count': int_or_none(flashvar('views')),
91 'age_limit': 18,
92 }
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/pornflip.py b/youtube_dl/extractor/pornflip.py
--- a/youtube_dl/extractor/pornflip.py
+++ b/youtube_dl/extractor/pornflip.py
@@ -14,7 +14,7 @@
class PornFlipIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?pornflip\.com/(?:v|embed)/(?P<id>[0-9A-Za-z]{11})'
+ _VALID_URL = r'https?://(?:www\.)?pornflip\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'
_TESTS = [{
'url': 'https://www.pornflip.com/v/wz7DfNhMmep',
'md5': '98c46639849145ae1fd77af532a9278c',
@@ -34,6 +34,12 @@
}, {
'url': 'https://www.pornflip.com/embed/wz7DfNhMmep',
'only_matching': True,
+ }, {
+ 'url': 'https://www.pornflip.com/v/EkRD6-vS2-s',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.pornflip.com/embed/EkRD6-vS2-s',
+ 'only_matching': True,
}]
def _real_extract(self, url):
|
{"golden_diff": "diff --git a/youtube_dl/extractor/pornflip.py b/youtube_dl/extractor/pornflip.py\n--- a/youtube_dl/extractor/pornflip.py\n+++ b/youtube_dl/extractor/pornflip.py\n@@ -14,7 +14,7 @@\n \n \n class PornFlipIE(InfoExtractor):\n- _VALID_URL = r'https?://(?:www\\.)?pornflip\\.com/(?:v|embed)/(?P<id>[0-9A-Za-z]{11})'\n+ _VALID_URL = r'https?://(?:www\\.)?pornflip\\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'\n _TESTS = [{\n 'url': 'https://www.pornflip.com/v/wz7DfNhMmep',\n 'md5': '98c46639849145ae1fd77af532a9278c',\n@@ -34,6 +34,12 @@\n }, {\n 'url': 'https://www.pornflip.com/embed/wz7DfNhMmep',\n 'only_matching': True,\n+ }, {\n+ 'url': 'https://www.pornflip.com/v/EkRD6-vS2-s',\n+ 'only_matching': True,\n+ }, {\n+ 'url': 'https://www.pornflip.com/embed/EkRD6-vS2-s',\n+ 'only_matching': True,\n }]\n \n def _real_extract(self, url):\n", "issue": "[ PornFlip ] Regex Invalid\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.01**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n\r\n```\r\nyoutube-dl --verbose -J https://www.pornflip.com/v/EkRD6-vS2-s\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'--verbose', u'-J', u'https://www.pornflip.com/v/EkRD6-vS2-s']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out None, pref UTF-8\r\n[debug] youtube-dl version 2017.10.01\r\n[debug] Python version 2.7.12 - Linux-4.4.0-96-generic-x86_64-with-Ubuntu-16.04-xenial\r\n[debug] exe versions: none\r\n[debug] Proxy map: {}\r\nWARNING: Falling back on generic information extractor.\r\nWARNING: Failed to download MPD manifest: HTTP Error 404: Not Found\r\nERROR: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 2156, in _real_extract\r\n doc = compat_etree_fromstring(webpage.encode('utf-8'))\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/compat.py\", line 2539, in compat_etree_fromstring\r\n doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/compat.py\", line 2528, in _XML\r\n parser.feed(text)\r\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1653, in feed\r\n self._raiseerror(v)\r\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1517, in _raiseerror\r\n raise err\r\nParseError: mismatched tag: line 21, column 2\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 783, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 434, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 3051, in _real_extract\r\n raise UnsupportedError(url)\r\nUnsupportedError: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s\r\n```\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nRegex Valid:\r\n```\r\n_VALID_URL = r'https?://(?:www\\.)?pornflip\\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'\r\n```\r\n\n[ PornFlip ] Regex Invalid\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.01**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n\r\n```\r\nyoutube-dl --verbose -J https://www.pornflip.com/v/EkRD6-vS2-s\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'--verbose', u'-J', u'https://www.pornflip.com/v/EkRD6-vS2-s']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out None, pref UTF-8\r\n[debug] youtube-dl version 2017.10.01\r\n[debug] Python version 2.7.12 - Linux-4.4.0-96-generic-x86_64-with-Ubuntu-16.04-xenial\r\n[debug] exe versions: none\r\n[debug] Proxy map: {}\r\nWARNING: Falling back on generic information extractor.\r\nWARNING: Failed to download MPD manifest: HTTP Error 404: Not Found\r\nERROR: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 2156, in _real_extract\r\n doc = compat_etree_fromstring(webpage.encode('utf-8'))\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/compat.py\", line 2539, in compat_etree_fromstring\r\n doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/compat.py\", line 2528, in _XML\r\n parser.feed(text)\r\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1653, in feed\r\n self._raiseerror(v)\r\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1517, in _raiseerror\r\n raise err\r\nParseError: mismatched tag: line 21, column 2\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 783, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 434, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 3051, in _real_extract\r\n raise UnsupportedError(url)\r\nUnsupportedError: Unsupported URL: https://www.pornflip.com/v/EkRD6-vS2-s\r\n```\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nRegex Valid:\r\n```\r\n_VALID_URL = r'https?://(?:www\\.)?pornflip\\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'\r\n```\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_parse_qs,\n compat_str,\n)\nfrom ..utils import (\n int_or_none,\n try_get,\n unified_timestamp,\n)\n\n\nclass PornFlipIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?pornflip\\.com/(?:v|embed)/(?P<id>[0-9A-Za-z]{11})'\n _TESTS = [{\n 'url': 'https://www.pornflip.com/v/wz7DfNhMmep',\n 'md5': '98c46639849145ae1fd77af532a9278c',\n 'info_dict': {\n 'id': 'wz7DfNhMmep',\n 'ext': 'mp4',\n 'title': '2 Amateurs swallow make his dream cumshots true',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'duration': 112,\n 'timestamp': 1481655502,\n 'upload_date': '20161213',\n 'uploader_id': '106786',\n 'uploader': 'figifoto',\n 'view_count': int,\n 'age_limit': 18,\n }\n }, {\n 'url': 'https://www.pornflip.com/embed/wz7DfNhMmep',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'https://www.pornflip.com/v/%s' % video_id, video_id)\n\n flashvars = compat_parse_qs(self._search_regex(\n r'<embed[^>]+flashvars=([\"\\'])(?P<flashvars>(?:(?!\\1).)+)\\1',\n webpage, 'flashvars', group='flashvars'))\n\n title = flashvars['video_vars[title]'][0]\n\n def flashvar(kind):\n return try_get(\n flashvars, lambda x: x['video_vars[%s]' % kind][0], compat_str)\n\n formats = []\n for key, value in flashvars.items():\n if not (value and isinstance(value, list)):\n continue\n format_url = value[0]\n if key == 'video_vars[hds_manifest]':\n formats.extend(self._extract_mpd_formats(\n format_url, video_id, mpd_id='dash', fatal=False))\n continue\n height = self._search_regex(\n r'video_vars\\[video_urls\\]\\[(\\d+)', key, 'height', default=None)\n if not height:\n continue\n formats.append({\n 'url': format_url,\n 'format_id': 'http-%s' % height,\n 'height': int_or_none(height),\n })\n self._sort_formats(formats)\n\n uploader = self._html_search_regex(\n (r'<span[^>]+class=\"name\"[^>]*>\\s*<a[^>]+>\\s*<strong>(?P<uploader>[^<]+)',\n r'<meta[^>]+content=([\"\\'])[^>]*\\buploaded by (?P<uploader>.+?)\\1'),\n webpage, 'uploader', fatal=False, group='uploader')\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': title,\n 'thumbnail': flashvar('big_thumb'),\n 'duration': int_or_none(flashvar('duration')),\n 'timestamp': unified_timestamp(self._html_search_meta(\n 'uploadDate', webpage, 'timestamp')),\n 'uploader_id': flashvar('author_id'),\n 'uploader': uploader,\n 'view_count': int_or_none(flashvar('views')),\n 'age_limit': 18,\n }\n", "path": "youtube_dl/extractor/pornflip.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_parse_qs,\n compat_str,\n)\nfrom ..utils import (\n int_or_none,\n try_get,\n unified_timestamp,\n)\n\n\nclass PornFlipIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?pornflip\\.com/(?:v|embed)/(?P<id>[0-9A-Za-z-]{11})'\n _TESTS = [{\n 'url': 'https://www.pornflip.com/v/wz7DfNhMmep',\n 'md5': '98c46639849145ae1fd77af532a9278c',\n 'info_dict': {\n 'id': 'wz7DfNhMmep',\n 'ext': 'mp4',\n 'title': '2 Amateurs swallow make his dream cumshots true',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'duration': 112,\n 'timestamp': 1481655502,\n 'upload_date': '20161213',\n 'uploader_id': '106786',\n 'uploader': 'figifoto',\n 'view_count': int,\n 'age_limit': 18,\n }\n }, {\n 'url': 'https://www.pornflip.com/embed/wz7DfNhMmep',\n 'only_matching': True,\n }, {\n 'url': 'https://www.pornflip.com/v/EkRD6-vS2-s',\n 'only_matching': True,\n }, {\n 'url': 'https://www.pornflip.com/embed/EkRD6-vS2-s',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'https://www.pornflip.com/v/%s' % video_id, video_id)\n\n flashvars = compat_parse_qs(self._search_regex(\n r'<embed[^>]+flashvars=([\"\\'])(?P<flashvars>(?:(?!\\1).)+)\\1',\n webpage, 'flashvars', group='flashvars'))\n\n title = flashvars['video_vars[title]'][0]\n\n def flashvar(kind):\n return try_get(\n flashvars, lambda x: x['video_vars[%s]' % kind][0], compat_str)\n\n formats = []\n for key, value in flashvars.items():\n if not (value and isinstance(value, list)):\n continue\n format_url = value[0]\n if key == 'video_vars[hds_manifest]':\n formats.extend(self._extract_mpd_formats(\n format_url, video_id, mpd_id='dash', fatal=False))\n continue\n height = self._search_regex(\n r'video_vars\\[video_urls\\]\\[(\\d+)', key, 'height', default=None)\n if not height:\n continue\n formats.append({\n 'url': format_url,\n 'format_id': 'http-%s' % height,\n 'height': int_or_none(height),\n })\n self._sort_formats(formats)\n\n uploader = self._html_search_regex(\n (r'<span[^>]+class=\"name\"[^>]*>\\s*<a[^>]+>\\s*<strong>(?P<uploader>[^<]+)',\n r'<meta[^>]+content=([\"\\'])[^>]*\\buploaded by (?P<uploader>.+?)\\1'),\n webpage, 'uploader', fatal=False, group='uploader')\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': title,\n 'thumbnail': flashvar('big_thumb'),\n 'duration': int_or_none(flashvar('duration')),\n 'timestamp': unified_timestamp(self._html_search_meta(\n 'uploadDate', webpage, 'timestamp')),\n 'uploader_id': flashvar('author_id'),\n 'uploader': uploader,\n 'view_count': int_or_none(flashvar('views')),\n 'age_limit': 18,\n }\n", "path": "youtube_dl/extractor/pornflip.py"}]}
| 3,602 | 349 |
gh_patches_debug_6033
|
rasdani/github-patches
|
git_diff
|
encode__starlette-1504
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate `WSGIMiddleware` in favor of `a2wsgi`
### Checklist
- [X] There are no similar issues or pull requests for this yet.
- [X] I discussed this idea on the [community chat](https://gitter.im/encode/community) and feedback is positive.
### Is your feature related to a problem? Please describe.
I want to deprecate `WSGIMiddleware` and recommend [a2wsgi](https://github.com/abersheeran/a2wsgi) on the documentation.
Right now, the `WSGIMiddleware` is not documented, so not that harmful to deprecate. I expect the deprecation message to inform about `a2wsgi` or recommend the specific page on the docs so users using the middleware can fix the warning easily.
### Describe the solution you would like.
_No response_
### Describe alternatives you considered
_No response_
### Additional context
Gitter conversation about the topic:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/wsgi.py`
Content:
```
1 import io
2 import math
3 import sys
4 import typing
5
6 import anyio
7
8 from starlette.types import Receive, Scope, Send
9
10
11 def build_environ(scope: Scope, body: bytes) -> dict:
12 """
13 Builds a scope and request body into a WSGI environ object.
14 """
15 environ = {
16 "REQUEST_METHOD": scope["method"],
17 "SCRIPT_NAME": scope.get("root_path", "").encode("utf8").decode("latin1"),
18 "PATH_INFO": scope["path"].encode("utf8").decode("latin1"),
19 "QUERY_STRING": scope["query_string"].decode("ascii"),
20 "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}",
21 "wsgi.version": (1, 0),
22 "wsgi.url_scheme": scope.get("scheme", "http"),
23 "wsgi.input": io.BytesIO(body),
24 "wsgi.errors": sys.stdout,
25 "wsgi.multithread": True,
26 "wsgi.multiprocess": True,
27 "wsgi.run_once": False,
28 }
29
30 # Get server name and port - required in WSGI, not in ASGI
31 server = scope.get("server") or ("localhost", 80)
32 environ["SERVER_NAME"] = server[0]
33 environ["SERVER_PORT"] = server[1]
34
35 # Get client IP address
36 if scope.get("client"):
37 environ["REMOTE_ADDR"] = scope["client"][0]
38
39 # Go through headers and make them into environ entries
40 for name, value in scope.get("headers", []):
41 name = name.decode("latin1")
42 if name == "content-length":
43 corrected_name = "CONTENT_LENGTH"
44 elif name == "content-type":
45 corrected_name = "CONTENT_TYPE"
46 else:
47 corrected_name = f"HTTP_{name}".upper().replace("-", "_")
48 # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in
49 # case
50 value = value.decode("latin1")
51 if corrected_name in environ:
52 value = environ[corrected_name] + "," + value
53 environ[corrected_name] = value
54 return environ
55
56
57 class WSGIMiddleware:
58 def __init__(self, app: typing.Callable) -> None:
59 self.app = app
60
61 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
62 assert scope["type"] == "http"
63 responder = WSGIResponder(self.app, scope)
64 await responder(receive, send)
65
66
67 class WSGIResponder:
68 def __init__(self, app: typing.Callable, scope: Scope) -> None:
69 self.app = app
70 self.scope = scope
71 self.status = None
72 self.response_headers = None
73 self.stream_send, self.stream_receive = anyio.create_memory_object_stream(
74 math.inf
75 )
76 self.response_started = False
77 self.exc_info: typing.Any = None
78
79 async def __call__(self, receive: Receive, send: Send) -> None:
80 body = b""
81 more_body = True
82 while more_body:
83 message = await receive()
84 body += message.get("body", b"")
85 more_body = message.get("more_body", False)
86 environ = build_environ(self.scope, body)
87
88 async with anyio.create_task_group() as task_group:
89 task_group.start_soon(self.sender, send)
90 async with self.stream_send:
91 await anyio.to_thread.run_sync(self.wsgi, environ, self.start_response)
92 if self.exc_info is not None:
93 raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2])
94
95 async def sender(self, send: Send) -> None:
96 async with self.stream_receive:
97 async for message in self.stream_receive:
98 await send(message)
99
100 def start_response(
101 self,
102 status: str,
103 response_headers: typing.List[typing.Tuple[str, str]],
104 exc_info: typing.Any = None,
105 ) -> None:
106 self.exc_info = exc_info
107 if not self.response_started:
108 self.response_started = True
109 status_code_string, _ = status.split(" ", 1)
110 status_code = int(status_code_string)
111 headers = [
112 (name.strip().encode("ascii").lower(), value.strip().encode("ascii"))
113 for name, value in response_headers
114 ]
115 anyio.from_thread.run(
116 self.stream_send.send,
117 {
118 "type": "http.response.start",
119 "status": status_code,
120 "headers": headers,
121 },
122 )
123
124 def wsgi(self, environ: dict, start_response: typing.Callable) -> None:
125 for chunk in self.app(environ, start_response):
126 anyio.from_thread.run(
127 self.stream_send.send,
128 {"type": "http.response.body", "body": chunk, "more_body": True},
129 )
130
131 anyio.from_thread.run(
132 self.stream_send.send, {"type": "http.response.body", "body": b""}
133 )
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py
--- a/starlette/middleware/wsgi.py
+++ b/starlette/middleware/wsgi.py
@@ -2,11 +2,18 @@
import math
import sys
import typing
+import warnings
import anyio
from starlette.types import Receive, Scope, Send
+warnings.warn(
+ "starlette.middleware.wsgi is deprecated and will be removed in a future release. "
+ "Please refer to https://github.com/abersheeran/a2wsgi as a replacement.",
+ DeprecationWarning,
+)
+
def build_environ(scope: Scope, body: bytes) -> dict:
"""
|
{"golden_diff": "diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py\n--- a/starlette/middleware/wsgi.py\n+++ b/starlette/middleware/wsgi.py\n@@ -2,11 +2,18 @@\n import math\n import sys\n import typing\n+import warnings\n \n import anyio\n \n from starlette.types import Receive, Scope, Send\n \n+warnings.warn(\n+ \"starlette.middleware.wsgi is deprecated and will be removed in a future release. \"\n+ \"Please refer to https://github.com/abersheeran/a2wsgi as a replacement.\",\n+ DeprecationWarning,\n+)\n+\n \n def build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n", "issue": "Deprecate `WSGIMiddleware` in favor of `a2wsgi`\n### Checklist\n\n- [X] There are no similar issues or pull requests for this yet.\n- [X] I discussed this idea on the [community chat](https://gitter.im/encode/community) and feedback is positive.\n\n### Is your feature related to a problem? Please describe.\n\nI want to deprecate `WSGIMiddleware` and recommend [a2wsgi](https://github.com/abersheeran/a2wsgi) on the documentation.\r\n\r\nRight now, the `WSGIMiddleware` is not documented, so not that harmful to deprecate. I expect the deprecation message to inform about `a2wsgi` or recommend the specific page on the docs so users using the middleware can fix the warning easily.\n\n### Describe the solution you would like.\n\n_No response_\n\n### Describe alternatives you considered\n\n_No response_\n\n### Additional context\n\nGitter conversation about the topic:\r\n\r\n\r\n\n", "before_files": [{"content": "import io\nimport math\nimport sys\nimport typing\n\nimport anyio\n\nfrom starlette.types import Receive, Scope, Send\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": scope.get(\"root_path\", \"\").encode(\"utf8\").decode(\"latin1\"),\n \"PATH_INFO\": scope[\"path\"].encode(\"utf8\").decode(\"latin1\"),\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in\n # case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable) -> None:\n self.app = app\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n assert scope[\"type\"] == \"http\"\n responder = WSGIResponder(self.app, scope)\n await responder(receive, send)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.stream_send, self.stream_receive = anyio.create_memory_object_stream(\n math.inf\n )\n self.response_started = False\n self.exc_info: typing.Any = None\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n\n async with anyio.create_task_group() as task_group:\n task_group.start_soon(self.sender, send)\n async with self.stream_send:\n await anyio.to_thread.run_sync(self.wsgi, environ, self.start_response)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2])\n\n async def sender(self, send: Send) -> None:\n async with self.stream_receive:\n async for message in self.stream_receive:\n await send(message)\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.strip().encode(\"ascii\").lower(), value.strip().encode(\"ascii\"))\n for name, value in response_headers\n ]\n anyio.from_thread.run(\n self.stream_send.send,\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n },\n )\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n anyio.from_thread.run(\n self.stream_send.send,\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True},\n )\n\n anyio.from_thread.run(\n self.stream_send.send, {\"type\": \"http.response.body\", \"body\": b\"\"}\n )\n", "path": "starlette/middleware/wsgi.py"}], "after_files": [{"content": "import io\nimport math\nimport sys\nimport typing\nimport warnings\n\nimport anyio\n\nfrom starlette.types import Receive, Scope, Send\n\nwarnings.warn(\n \"starlette.middleware.wsgi is deprecated and will be removed in a future release. \"\n \"Please refer to https://github.com/abersheeran/a2wsgi as a replacement.\",\n DeprecationWarning,\n)\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": scope.get(\"root_path\", \"\").encode(\"utf8\").decode(\"latin1\"),\n \"PATH_INFO\": scope[\"path\"].encode(\"utf8\").decode(\"latin1\"),\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in\n # case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable) -> None:\n self.app = app\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n assert scope[\"type\"] == \"http\"\n responder = WSGIResponder(self.app, scope)\n await responder(receive, send)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.stream_send, self.stream_receive = anyio.create_memory_object_stream(\n math.inf\n )\n self.response_started = False\n self.exc_info: typing.Any = None\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n\n async with anyio.create_task_group() as task_group:\n task_group.start_soon(self.sender, send)\n async with self.stream_send:\n await anyio.to_thread.run_sync(self.wsgi, environ, self.start_response)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2])\n\n async def sender(self, send: Send) -> None:\n async with self.stream_receive:\n async for message in self.stream_receive:\n await send(message)\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.strip().encode(\"ascii\").lower(), value.strip().encode(\"ascii\"))\n for name, value in response_headers\n ]\n anyio.from_thread.run(\n self.stream_send.send,\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n },\n )\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n anyio.from_thread.run(\n self.stream_send.send,\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True},\n )\n\n anyio.from_thread.run(\n self.stream_send.send, {\"type\": \"http.response.body\", \"body\": b\"\"}\n )\n", "path": "starlette/middleware/wsgi.py"}]}
| 1,913 | 156 |
gh_patches_debug_4362
|
rasdani/github-patches
|
git_diff
|
psf__black-2836
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ignore __pypackages__ directory contents
**Describe the bug**
When using [PDM](https://pdm.fming.dev/), `black` does not ignore `__pypackages__` directory contents.
**To Reproduce**
Run `pdm run black .`
**Expected behavior**
`black` should reformat only project files.
**Environment**
- Black's version: 22.1.0
- PDM version: 1.12.6
- OS and Python version: Ubuntu 21.10 with Python 3.10.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/black/const.py`
Content:
```
1 DEFAULT_LINE_LENGTH = 88
2 DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950
3 DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$"
4 STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/black/const.py b/src/black/const.py
--- a/src/black/const.py
+++ b/src/black/const.py
@@ -1,4 +1,4 @@
DEFAULT_LINE_LENGTH = 88
-DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950
+DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|_build|buck-out|build|dist|__pypackages__)/" # noqa: B950
DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$"
STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__"
|
{"golden_diff": "diff --git a/src/black/const.py b/src/black/const.py\n--- a/src/black/const.py\n+++ b/src/black/const.py\n@@ -1,4 +1,4 @@\n DEFAULT_LINE_LENGTH = 88\n-DEFAULT_EXCLUDES = r\"/(\\.direnv|\\.eggs|\\.git|\\.hg|\\.mypy_cache|\\.nox|\\.tox|\\.venv|venv|\\.svn|_build|buck-out|build|dist)/\" # noqa: B950\n+DEFAULT_EXCLUDES = r\"/(\\.direnv|\\.eggs|\\.git|\\.hg|\\.mypy_cache|\\.nox|\\.tox|\\.venv|venv|\\.svn|_build|buck-out|build|dist|__pypackages__)/\" # noqa: B950\n DEFAULT_INCLUDES = r\"(\\.pyi?|\\.ipynb)$\"\n STDIN_PLACEHOLDER = \"__BLACK_STDIN_FILENAME__\"\n", "issue": "Ignore __pypackages__ directory contents\n**Describe the bug**\r\n\r\nWhen using [PDM](https://pdm.fming.dev/), `black` does not ignore `__pypackages__` directory contents.\r\n\r\n**To Reproduce**\r\n\r\nRun `pdm run black .`\r\n\r\n**Expected behavior**\r\n\r\n`black` should reformat only project files.\r\n\r\n**Environment**\r\n\r\n- Black's version: 22.1.0\r\n- PDM version: 1.12.6\r\n- OS and Python version: Ubuntu 21.10 with Python 3.10.1\r\n\n", "before_files": [{"content": "DEFAULT_LINE_LENGTH = 88\nDEFAULT_EXCLUDES = r\"/(\\.direnv|\\.eggs|\\.git|\\.hg|\\.mypy_cache|\\.nox|\\.tox|\\.venv|venv|\\.svn|_build|buck-out|build|dist)/\" # noqa: B950\nDEFAULT_INCLUDES = r\"(\\.pyi?|\\.ipynb)$\"\nSTDIN_PLACEHOLDER = \"__BLACK_STDIN_FILENAME__\"\n", "path": "src/black/const.py"}], "after_files": [{"content": "DEFAULT_LINE_LENGTH = 88\nDEFAULT_EXCLUDES = r\"/(\\.direnv|\\.eggs|\\.git|\\.hg|\\.mypy_cache|\\.nox|\\.tox|\\.venv|venv|\\.svn|_build|buck-out|build|dist|__pypackages__)/\" # noqa: B950\nDEFAULT_INCLUDES = r\"(\\.pyi?|\\.ipynb)$\"\nSTDIN_PLACEHOLDER = \"__BLACK_STDIN_FILENAME__\"\n", "path": "src/black/const.py"}]}
| 497 | 218 |
gh_patches_debug_25971
|
rasdani/github-patches
|
git_diff
|
flairNLP__flair-214
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add torch.no_grad() to LanguageModel.generate_text()
The autograd engine is not required when using an LM to generate text.
So, as pointed out in #167, `torch.no_grad()` needs to be added to `LanguageModel.generate_text()` for better performance and to avoid out of memory issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/language_model.py`
Content:
```
1 import torch.nn as nn
2 import torch
3 import math
4 from torch.autograd import Variable
5 from typing import List
6 from flair.data import Dictionary
7
8
9 class LanguageModel(nn.Module):
10 """Container module with an encoder, a recurrent module, and a decoder."""
11
12 def __init__(self,
13 dictionary: Dictionary,
14 is_forward_lm: bool,
15 hidden_size: int,
16 nlayers: int,
17 embedding_size: int = 100,
18 nout=None,
19 dropout=0.5):
20
21 super(LanguageModel, self).__init__()
22
23 self.dictionary = dictionary
24 self.is_forward_lm: bool = is_forward_lm
25
26 self.dropout = dropout
27 self.hidden_size = hidden_size
28 self.embedding_size = embedding_size
29 self.nlayers = nlayers
30
31 self.drop = nn.Dropout(dropout)
32 self.encoder = nn.Embedding(len(dictionary), embedding_size)
33
34 if nlayers == 1:
35 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)
36 else:
37 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)
38
39 self.hidden = None
40
41 self.nout = nout
42 if nout is not None:
43 self.proj = nn.Linear(hidden_size, nout)
44 self.initialize(self.proj.weight)
45 self.decoder = nn.Linear(nout, len(dictionary))
46 else:
47 self.proj = None
48 self.decoder = nn.Linear(hidden_size, len(dictionary))
49
50 self.init_weights()
51
52 # auto-spawn on GPU if available
53 if torch.cuda.is_available():
54 self.cuda()
55
56 def init_weights(self):
57 initrange = 0.1
58 self.encoder.weight.data.uniform_(-initrange, initrange)
59 self.decoder.bias.data.fill_(0)
60 self.decoder.weight.data.uniform_(-initrange, initrange)
61
62 def set_hidden(self, hidden):
63 self.hidden = hidden
64
65 def forward(self, input, hidden, ordered_sequence_lengths=None):
66 encoded = self.encoder(input)
67 emb = self.drop(encoded)
68
69 self.rnn.flatten_parameters()
70
71 output, hidden = self.rnn(emb, hidden)
72
73 if self.proj is not None:
74 output = self.proj(output)
75
76 output = self.drop(output)
77
78 decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
79
80 return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden
81
82 def init_hidden(self, bsz):
83 weight = next(self.parameters()).data
84 return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),
85 Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))
86
87 def get_representation(self, strings: List[str], detach_from_lm=True):
88
89 sequences_as_char_indices: List[List[int]] = []
90 for string in strings:
91 char_indices = [self.dictionary.get_idx_for_item(char) for char in string]
92 sequences_as_char_indices.append(char_indices)
93
94 batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))
95
96 if torch.cuda.is_available():
97 batch = batch.cuda()
98
99 hidden = self.init_hidden(len(strings))
100 prediction, rnn_output, hidden = self.forward(batch, hidden)
101
102 if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)
103
104 return rnn_output
105
106 def repackage_hidden(self, h):
107 """Wraps hidden states in new Variables, to detach them from their history."""
108 if type(h) == torch.Tensor:
109 return Variable(h.data)
110 else:
111 return tuple(self.repackage_hidden(v) for v in h)
112
113 def initialize(self, matrix):
114 in_, out_ = matrix.size()
115 stdv = math.sqrt(3. / (in_ + out_))
116 matrix.data.uniform_(-stdv, stdv)
117
118 @classmethod
119 def load_language_model(cls, model_file):
120
121 if not torch.cuda.is_available():
122 state = torch.load(model_file, map_location='cpu')
123 else:
124 state = torch.load(model_file)
125
126 model = LanguageModel(state['dictionary'],
127 state['is_forward_lm'],
128 state['hidden_size'],
129 state['nlayers'],
130 state['embedding_size'],
131 state['nout'],
132 state['dropout'])
133 model.load_state_dict(state['state_dict'])
134 model.eval()
135 if torch.cuda.is_available():
136 model.cuda()
137 return model
138
139 def save(self, file):
140 model_state = {
141 'state_dict': self.state_dict(),
142 'dictionary': self.dictionary,
143 'is_forward_lm': self.is_forward_lm,
144 'hidden_size': self.hidden_size,
145 'nlayers': self.nlayers,
146 'embedding_size': self.embedding_size,
147 'nout': self.nout,
148 'dropout': self.dropout
149 }
150 torch.save(model_state, file, pickle_protocol=4)
151
152 def generate_text(self, number_of_characters=1000) -> str:
153 characters = []
154
155 idx2item = self.dictionary.idx2item
156
157 # initial hidden state
158 hidden = self.init_hidden(1)
159 input = torch.rand(1, 1).mul(len(idx2item)).long()
160 if torch.cuda.is_available():
161 input = input.cuda()
162
163 for i in range(number_of_characters):
164 prediction, rnn_output, hidden = self.forward(input, hidden)
165 word_weights = prediction.squeeze().data.div(1.0).exp().cpu()
166 word_idx = torch.multinomial(word_weights, 1)[0]
167 input.data.fill_(word_idx)
168 word = idx2item[word_idx].decode('UTF-8')
169 characters.append(word)
170
171 return ''.join(characters)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flair/models/language_model.py b/flair/models/language_model.py
--- a/flair/models/language_model.py
+++ b/flair/models/language_model.py
@@ -150,22 +150,23 @@
torch.save(model_state, file, pickle_protocol=4)
def generate_text(self, number_of_characters=1000) -> str:
- characters = []
-
- idx2item = self.dictionary.idx2item
-
- # initial hidden state
- hidden = self.init_hidden(1)
- input = torch.rand(1, 1).mul(len(idx2item)).long()
- if torch.cuda.is_available():
- input = input.cuda()
-
- for i in range(number_of_characters):
- prediction, rnn_output, hidden = self.forward(input, hidden)
- word_weights = prediction.squeeze().data.div(1.0).exp().cpu()
- word_idx = torch.multinomial(word_weights, 1)[0]
- input.data.fill_(word_idx)
- word = idx2item[word_idx].decode('UTF-8')
- characters.append(word)
-
- return ''.join(characters)
+ with torch.no_grad():
+ characters = []
+
+ idx2item = self.dictionary.idx2item
+
+ # initial hidden state
+ hidden = self.init_hidden(1)
+ input = torch.rand(1, 1).mul(len(idx2item)).long()
+ if torch.cuda.is_available():
+ input = input.cuda()
+
+ for i in range(number_of_characters):
+ prediction, rnn_output, hidden = self.forward(input, hidden)
+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()
+ word_idx = torch.multinomial(word_weights, 1)[0]
+ input.data.fill_(word_idx)
+ word = idx2item[word_idx].decode('UTF-8')
+ characters.append(word)
+
+ return ''.join(characters)
|
{"golden_diff": "diff --git a/flair/models/language_model.py b/flair/models/language_model.py\n--- a/flair/models/language_model.py\n+++ b/flair/models/language_model.py\n@@ -150,22 +150,23 @@\n torch.save(model_state, file, pickle_protocol=4)\n \n def generate_text(self, number_of_characters=1000) -> str:\n- characters = []\n-\n- idx2item = self.dictionary.idx2item\n-\n- # initial hidden state\n- hidden = self.init_hidden(1)\n- input = torch.rand(1, 1).mul(len(idx2item)).long()\n- if torch.cuda.is_available():\n- input = input.cuda()\n-\n- for i in range(number_of_characters):\n- prediction, rnn_output, hidden = self.forward(input, hidden)\n- word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n- word_idx = torch.multinomial(word_weights, 1)[0]\n- input.data.fill_(word_idx)\n- word = idx2item[word_idx].decode('UTF-8')\n- characters.append(word)\n-\n- return ''.join(characters)\n+ with torch.no_grad():\n+ characters = []\n+\n+ idx2item = self.dictionary.idx2item\n+\n+ # initial hidden state\n+ hidden = self.init_hidden(1)\n+ input = torch.rand(1, 1).mul(len(idx2item)).long()\n+ if torch.cuda.is_available():\n+ input = input.cuda()\n+\n+ for i in range(number_of_characters):\n+ prediction, rnn_output, hidden = self.forward(input, hidden)\n+ word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n+ word_idx = torch.multinomial(word_weights, 1)[0]\n+ input.data.fill_(word_idx)\n+ word = idx2item[word_idx].decode('UTF-8')\n+ characters.append(word)\n+\n+ return ''.join(characters)\n", "issue": "Add torch.no_grad() to LanguageModel.generate_text()\nThe autograd engine is not required when using an LM to generate text.\r\n\r\nSo, as pointed out in #167, `torch.no_grad()` needs to be added to `LanguageModel.generate_text()` for better performance and to avoid out of memory issues.\n", "before_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n\n def generate_text(self, number_of_characters=1000) -> str:\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n input = torch.rand(1, 1).mul(len(idx2item)).long()\n if torch.cuda.is_available():\n input = input.cuda()\n\n for i in range(number_of_characters):\n prediction, rnn_output, hidden = self.forward(input, hidden)\n word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n input.data.fill_(word_idx)\n word = idx2item[word_idx].decode('UTF-8')\n characters.append(word)\n\n return ''.join(characters)\n", "path": "flair/models/language_model.py"}], "after_files": [{"content": "import torch.nn as nn\nimport torch\nimport math\nfrom torch.autograd import Variable\nfrom typing import List\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.5):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()),\n Variable(weight.new(self.nlayers, bsz, self.hidden_size).zero_()))\n\n def get_representation(self, strings: List[str], detach_from_lm=True):\n\n sequences_as_char_indices: List[List[int]] = []\n for string in strings:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = Variable(torch.LongTensor(sequences_as_char_indices).transpose(0, 1))\n\n if torch.cuda.is_available():\n batch = batch.cuda()\n\n hidden = self.init_hidden(len(strings))\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n\n if detach_from_lm: rnn_output = self.repackage_hidden(rnn_output)\n\n return rnn_output\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return Variable(h.data)\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.data.uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file):\n\n if not torch.cuda.is_available():\n state = torch.load(model_file, map_location='cpu')\n else:\n state = torch.load(model_file)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n if torch.cuda.is_available():\n model.cuda()\n return model\n\n def save(self, file):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n torch.save(model_state, file, pickle_protocol=4)\n\n def generate_text(self, number_of_characters=1000) -> str:\n with torch.no_grad():\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n input = torch.rand(1, 1).mul(len(idx2item)).long()\n if torch.cuda.is_available():\n input = input.cuda()\n\n for i in range(number_of_characters):\n prediction, rnn_output, hidden = self.forward(input, hidden)\n word_weights = prediction.squeeze().data.div(1.0).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n input.data.fill_(word_idx)\n word = idx2item[word_idx].decode('UTF-8')\n characters.append(word)\n\n return ''.join(characters)\n", "path": "flair/models/language_model.py"}]}
| 1,973 | 437 |
gh_patches_debug_23756
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-1398
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
log_name of LogReport with keys causes AttributeError
In MNIST example you change the code
https://github.com/pfnet/chainer/blob/master/examples/mnist/train_mnist.py#L84
```
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
```
to
```
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(log_name='log_{.iteration}'))
```
run train_mnist.py and you'll get `AttributeError: 'dict' object has no attribute 'iteration'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/training/extensions/log_report.py`
Content:
```
1 import json
2 import os
3 import tempfile
4
5 import six
6
7 from chainer import reporter
8 import chainer.serializer as serializer_module
9 from chainer.training import extension
10 import chainer.training.trigger as trigger_module
11
12
13 class LogReport(extension.Extension):
14
15 """Trainer extension to output the accumulated results to a log file.
16
17 This extension accumulates the observations of the trainer to
18 :class:`~chainer.DictSummary` at a regular interval specified by a supplied
19 trigger, and writes them into a log file in JSON format.
20
21 There are two triggers to handle this extension. One is the trigger to
22 invoke this extension, which is used to handle the timing of accumulating
23 the results. It is set to ``1, 'iteration'`` by default. The other is the
24 trigger to determine when to emit the result. When this trigger returns
25 True, this extension appends the summary of accumulated values to the list
26 of past summaries, and writes the list to the log file. Then, this
27 extension makes a new fresh summary object which is used until the next
28 time that the trigger fires.
29
30 It also adds ``'epoch'`` and ``'iteration'`` entries to each result
31 dictionary, which are the epoch and iteration counts at the output.
32
33 Args:
34 keys (iterable of strs): Keys of values to accumulate. If this is None,
35 all the values are accumulated and output to the log file.
36 trigger: Trigger that decides when to aggregate the result and output
37 the values. This is distinct from the trigger of this extension
38 itself. If it is a tuple in the form ``<int>, 'epoch'`` or
39 ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.
40 postprocess: Callback to postprocess the result dictionaries. Each
41 result dictionary is passed to this callback on the output. This
42 callback can modify the result dictionaries, which are used to
43 output to the log file.
44 log_name (str): Name of the log file under the output directory. It can
45 be a format string: the last result dictionary is passed for the
46 formatting. For example, users can use '{.iteration}' to separate
47 the log files for different iterations. If the log name is None, it
48 does not output the log to any file.
49
50 """
51 def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,
52 log_name='log'):
53 self._keys = keys
54 self._trigger = trigger_module.get_trigger(trigger)
55 self._postprocess = postprocess
56 self._log_name = log_name
57 self._log = []
58
59 self._init_summary()
60
61 def __call__(self, trainer):
62 # accumulate the observations
63 keys = self._keys
64 observation = trainer.observation
65 summary = self._summary
66
67 if keys is None:
68 summary.add(observation)
69 else:
70 summary.add({k: observation[k] for k in keys if k in observation})
71
72 if self._trigger(trainer):
73 # output the result
74 stats = self._summary.compute_mean()
75 stats_cpu = {}
76 for name, value in six.iteritems(stats):
77 stats_cpu[name] = float(value) # copy to CPU
78
79 updater = trainer.updater
80 stats_cpu['epoch'] = updater.epoch
81 stats_cpu['iteration'] = updater.iteration
82
83 if self._postprocess is not None:
84 self._postprocess(stats_cpu)
85
86 self._log.append(stats_cpu)
87
88 # write to the log file
89 if self._log_name is not None:
90 log_name = self._log_name.format(stats_cpu)
91 fd, path = tempfile.mkstemp(prefix=log_name, dir=trainer.out)
92 with os.fdopen(fd, 'w') as f:
93 json.dump(self._log, f, indent=4)
94 os.rename(path, os.path.join(trainer.out, log_name))
95
96 # reset the summary for the next output
97 self._init_summary()
98
99 @property
100 def log(self):
101 """The current list of observation dictionaries."""
102 return self._log
103
104 def serialize(self, serializer):
105 # Note that this serialization may lose some information of small
106 # numerical differences.
107 if isinstance(serializer, serializer_module.Serializer):
108 log = json.dumps(self._log)
109 serializer('_log', log)
110 else:
111 log = serializer('_log', '')
112 self._log = json.loads(log)
113
114 def _init_summary(self):
115 self._summary = reporter.DictSummary()
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/training/extensions/log_report.py b/chainer/training/extensions/log_report.py
--- a/chainer/training/extensions/log_report.py
+++ b/chainer/training/extensions/log_report.py
@@ -43,7 +43,7 @@
output to the log file.
log_name (str): Name of the log file under the output directory. It can
be a format string: the last result dictionary is passed for the
- formatting. For example, users can use '{.iteration}' to separate
+ formatting. For example, users can use '{iteration}' to separate
the log files for different iterations. If the log name is None, it
does not output the log to any file.
@@ -87,7 +87,7 @@
# write to the log file
if self._log_name is not None:
- log_name = self._log_name.format(stats_cpu)
+ log_name = self._log_name.format(**stats_cpu)
fd, path = tempfile.mkstemp(prefix=log_name, dir=trainer.out)
with os.fdopen(fd, 'w') as f:
json.dump(self._log, f, indent=4)
|
{"golden_diff": "diff --git a/chainer/training/extensions/log_report.py b/chainer/training/extensions/log_report.py\n--- a/chainer/training/extensions/log_report.py\n+++ b/chainer/training/extensions/log_report.py\n@@ -43,7 +43,7 @@\n output to the log file.\n log_name (str): Name of the log file under the output directory. It can\n be a format string: the last result dictionary is passed for the\n- formatting. For example, users can use '{.iteration}' to separate\n+ formatting. For example, users can use '{iteration}' to separate\n the log files for different iterations. If the log name is None, it\n does not output the log to any file.\n \n@@ -87,7 +87,7 @@\n \n # write to the log file\n if self._log_name is not None:\n- log_name = self._log_name.format(stats_cpu)\n+ log_name = self._log_name.format(**stats_cpu)\n fd, path = tempfile.mkstemp(prefix=log_name, dir=trainer.out)\n with os.fdopen(fd, 'w') as f:\n json.dump(self._log, f, indent=4)\n", "issue": "log_name of LogReport with keys causes AttributeError\nIn MNIST example you change the code\n\nhttps://github.com/pfnet/chainer/blob/master/examples/mnist/train_mnist.py#L84\n\n```\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport())\n```\n\nto \n\n```\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport(log_name='log_{.iteration}'))\n```\n\nrun train_mnist.py and you'll get `AttributeError: 'dict' object has no attribute 'iteration'`\n\n", "before_files": [{"content": "import json\nimport os\nimport tempfile\n\nimport six\n\nfrom chainer import reporter\nimport chainer.serializer as serializer_module\nfrom chainer.training import extension\nimport chainer.training.trigger as trigger_module\n\n\nclass LogReport(extension.Extension):\n\n \"\"\"Trainer extension to output the accumulated results to a log file.\n\n This extension accumulates the observations of the trainer to\n :class:`~chainer.DictSummary` at a regular interval specified by a supplied\n trigger, and writes them into a log file in JSON format.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n\n Args:\n keys (iterable of strs): Keys of values to accumulate. If this is None,\n all the values are accumulated and output to the log file.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or\n ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Each\n result dictionary is passed to this callback on the output. This\n callback can modify the result dictionaries, which are used to\n output to the log file.\n log_name (str): Name of the log file under the output directory. It can\n be a format string: the last result dictionary is passed for the\n formatting. For example, users can use '{.iteration}' to separate\n the log files for different iterations. If the log name is None, it\n does not output the log to any file.\n\n \"\"\"\n def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,\n log_name='log'):\n self._keys = keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._postprocess = postprocess\n self._log_name = log_name\n self._log = []\n\n self._init_summary()\n\n def __call__(self, trainer):\n # accumulate the observations\n keys = self._keys\n observation = trainer.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n if self._trigger(trainer):\n # output the result\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in six.iteritems(stats):\n stats_cpu[name] = float(value) # copy to CPU\n\n updater = trainer.updater\n stats_cpu['epoch'] = updater.epoch\n stats_cpu['iteration'] = updater.iteration\n\n if self._postprocess is not None:\n self._postprocess(stats_cpu)\n\n self._log.append(stats_cpu)\n\n # write to the log file\n if self._log_name is not None:\n log_name = self._log_name.format(stats_cpu)\n fd, path = tempfile.mkstemp(prefix=log_name, dir=trainer.out)\n with os.fdopen(fd, 'w') as f:\n json.dump(self._log, f, indent=4)\n os.rename(path, os.path.join(trainer.out, log_name))\n\n # reset the summary for the next output\n self._init_summary()\n\n @property\n def log(self):\n \"\"\"The current list of observation dictionaries.\"\"\"\n return self._log\n\n def serialize(self, serializer):\n # Note that this serialization may lose some information of small\n # numerical differences.\n if isinstance(serializer, serializer_module.Serializer):\n log = json.dumps(self._log)\n serializer('_log', log)\n else:\n log = serializer('_log', '')\n self._log = json.loads(log)\n\n def _init_summary(self):\n self._summary = reporter.DictSummary()\n", "path": "chainer/training/extensions/log_report.py"}], "after_files": [{"content": "import json\nimport os\nimport tempfile\n\nimport six\n\nfrom chainer import reporter\nimport chainer.serializer as serializer_module\nfrom chainer.training import extension\nimport chainer.training.trigger as trigger_module\n\n\nclass LogReport(extension.Extension):\n\n \"\"\"Trainer extension to output the accumulated results to a log file.\n\n This extension accumulates the observations of the trainer to\n :class:`~chainer.DictSummary` at a regular interval specified by a supplied\n trigger, and writes them into a log file in JSON format.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n\n Args:\n keys (iterable of strs): Keys of values to accumulate. If this is None,\n all the values are accumulated and output to the log file.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or\n ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Each\n result dictionary is passed to this callback on the output. This\n callback can modify the result dictionaries, which are used to\n output to the log file.\n log_name (str): Name of the log file under the output directory. It can\n be a format string: the last result dictionary is passed for the\n formatting. For example, users can use '{iteration}' to separate\n the log files for different iterations. If the log name is None, it\n does not output the log to any file.\n\n \"\"\"\n def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,\n log_name='log'):\n self._keys = keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._postprocess = postprocess\n self._log_name = log_name\n self._log = []\n\n self._init_summary()\n\n def __call__(self, trainer):\n # accumulate the observations\n keys = self._keys\n observation = trainer.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n if self._trigger(trainer):\n # output the result\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in six.iteritems(stats):\n stats_cpu[name] = float(value) # copy to CPU\n\n updater = trainer.updater\n stats_cpu['epoch'] = updater.epoch\n stats_cpu['iteration'] = updater.iteration\n\n if self._postprocess is not None:\n self._postprocess(stats_cpu)\n\n self._log.append(stats_cpu)\n\n # write to the log file\n if self._log_name is not None:\n log_name = self._log_name.format(**stats_cpu)\n fd, path = tempfile.mkstemp(prefix=log_name, dir=trainer.out)\n with os.fdopen(fd, 'w') as f:\n json.dump(self._log, f, indent=4)\n os.rename(path, os.path.join(trainer.out, log_name))\n\n # reset the summary for the next output\n self._init_summary()\n\n @property\n def log(self):\n \"\"\"The current list of observation dictionaries.\"\"\"\n return self._log\n\n def serialize(self, serializer):\n # Note that this serialization may lose some information of small\n # numerical differences.\n if isinstance(serializer, serializer_module.Serializer):\n log = json.dumps(self._log)\n serializer('_log', log)\n else:\n log = serializer('_log', '')\n self._log = json.loads(log)\n\n def _init_summary(self):\n self._summary = reporter.DictSummary()\n", "path": "chainer/training/extensions/log_report.py"}]}
| 1,611 | 260 |
gh_patches_debug_42503
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-12122
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nvda does not automatically read messages received on skype for business
### Steps to reproduce:
I'm sure I've seen this working for some time.
I have not used skype for business for a long time but in my current work I'm making use of it again, and when I received a message on skype for business, nvda did not automatically read the received message.
Open a conversation on skype for business type something and wait inside the conversation for your partner to respond.
### Actual behavior:
The nvda is mute.
### Expected behavior:
Nvda should automatically announce the response received.
### System configuration
#### NVDA installed/portable/running from source:
install
#### NVDA version:
2018.4.1
#### Windows version:
10 17134.556
#### Name and version of other software in use when reproducing the issue:
office 16.0.4266.1001
#### Other information about your system:
### Other questions
#### Does the issue still occur after restarting your PC?
yes
#### Have you tried any other versions of NVDA?
no
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/appModules/lync.py`
Content:
```
1 #A part of NonVisual Desktop Access (NVDA)
2 #This file is covered by the GNU General Public License.
3 #See the file COPYING for more details.
4 #Copyright (C) 2017 NV Access Limited
5
6 """appModule for Microsoft Skype for business. """
7
8 import ui
9 from NVDAObjects.UIA import UIA
10 import appModuleHandler
11
12 class NetUIRicherLabel(UIA):
13 """A label sometimes found within list items that can fire live region changes, such as for chat messages."""
14
15 def event_liveRegionChange(self):
16 # The base liveRegionChange event is not enough as Skype for Business concatinates recent chat messages from the same person within the same minute
17 # Therefore, specifically strip out the chat content and only report the most recent part added.
18 # The object's name contains the full message (I.e. person: content, timestamp) loosely separated by commas.
19 # Example string: "Michael Curran : , , Hello\r\n\r\nThis is a test , 10:45 am."
20 # Where person is "Michael Curran", content is "Hello\nThis is a test" and timestamp is "10:45 am"
21 # The object's value just contains the content.
22 # Example: "Hello\rThis is a test"
23 # We are only interested in person and content
24 # Therefore use value (content) to locate and split off the person from the name (fullText)
25 # Normalize the usage of end-of-line characters (name and value seem to expose them differently, which would break comparison)
26 content=self.value.replace('\r','\n').strip()
27 fullText=self.name.replace('\r\n\r\n','\n')
28 contentLines=content.split('\n')
29 contentStartIndex=fullText.find(content)
30 pretext=fullText[:contentStartIndex]
31 # There are some annoying comma characters after the person's name
32 pretext=pretext.replace(' ,','')
33 # If the objects are the same, the person is the same, and the new content is the old content but with more appended, report the appended content
34 # Otherwise, report the person and the initial content
35 runtimeID=self.UIAElement.getRuntimeId()
36 lastRuntimeID,lastPretext,lastContentLines=self.appModule._lastLiveChatMessageData
37 contentLinesLen=len(contentLines)
38 lastContentLinesLen=len(lastContentLines)
39 if runtimeID==lastRuntimeID and pretext==lastPretext and contentLinesLen>lastContentLinesLen and contentLines[:lastContentLinesLen]==lastContentLines:
40 message="\n".join(contentLines[lastContentLinesLen:])
41 else:
42 message=pretext+content
43 ui.message(message)
44 # Cache the message data for later possible comparisons
45 self.appModule._lastLiveChatMessageData=runtimeID,pretext,contentLines
46
47 class AppModule(appModuleHandler.AppModule):
48
49 # data to store the last chat message (runtime ID,person,content lines)
50 _lastLiveChatMessageData=[],"",[]
51
52 def chooseNVDAObjectOverlayClasses(self,obj,clsList):
53 if isinstance(obj,UIA) and obj.UIAElement.cachedClassName=='NetUIRicherLabel':
54 clsList.insert(0,NetUIRicherLabel)
55 return clsList
56
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/appModules/lync.py b/source/appModules/lync.py
--- a/source/appModules/lync.py
+++ b/source/appModules/lync.py
@@ -4,10 +4,14 @@
#Copyright (C) 2017 NV Access Limited
"""appModule for Microsoft Skype for business. """
-
+
import ui
from NVDAObjects.UIA import UIA
import appModuleHandler
+from logHandler import log
+
+import re
+
class NetUIRicherLabel(UIA):
"""A label sometimes found within list items that can fire live region changes, such as for chat messages."""
@@ -15,19 +19,45 @@
def event_liveRegionChange(self):
# The base liveRegionChange event is not enough as Skype for Business concatinates recent chat messages from the same person within the same minute
# Therefore, specifically strip out the chat content and only report the most recent part added.
- # The object's name contains the full message (I.e. person: content, timestamp) loosely separated by commas.
+ # When not empty, the object's name contains the full message (I.e. person: content, timestamp)
+ # loosely separated by commas.
# Example string: "Michael Curran : , , Hello\r\n\r\nThis is a test , 10:45 am."
# Where person is "Michael Curran", content is "Hello\nThis is a test" and timestamp is "10:45 am"
- # The object's value just contains the content.
- # Example: "Hello\rThis is a test"
- # We are only interested in person and content
- # Therefore use value (content) to locate and split off the person from the name (fullText)
+
# Normalize the usage of end-of-line characters (name and value seem to expose them differently, which would break comparison)
- content=self.value.replace('\r','\n').strip()
fullText=self.name.replace('\r\n\r\n','\n')
+
+ # At the object's creation, an unuseful liveRegionChange event is triggered with an empty name,
+ # so we discard it.
+ if not self.name.strip():
+ return
+
+ if self.value is not None:
+ # For some versions of Lync / Skype for Business, the object's value contains just the content.
+ # Example: "Hello\rThis is a test"
+ # We are only interested in person and content
+ # Therefore use value (content) to locate and split off the person from the name (fullText)
+ content = self.value.replace('\r', '\n').strip()
+ contentStartIndex = fullText.find(content)
+ pretext = fullText[:contentStartIndex]
+ else:
+ # For other versions of Lync / Skype for Business, self.value is just None.
+ # So we just look at self.name formatting to split content from person and timestamp (less robust).
+ pattern = r'^(?P<name>.+?): (?P<priority>.*?), , (?P<content>.+),(?!, , ) , (?P<timestamp>.+)'
+ match = re.match(pattern, self.name, flags=re.DOTALL)
+ if match:
+ pretext = match['name']
+ priority = match['priority']
+ content = match['content']
+ if priority:
+ content = priority + ', ' + content
+ else:
+ # In case no match is found, log the unexpected message and return the whole message.
+ log.error(f'Unrecognized pattern in the following message: {self.name}')
+ pretext = ''
+ content = self.name
+ content = content.replace('\r', '\n').strip()
contentLines=content.split('\n')
- contentStartIndex=fullText.find(content)
- pretext=fullText[:contentStartIndex]
# There are some annoying comma characters after the person's name
pretext=pretext.replace(' ,','')
# If the objects are the same, the person is the same, and the new content is the old content but with more appended, report the appended content
|
{"golden_diff": "diff --git a/source/appModules/lync.py b/source/appModules/lync.py\n--- a/source/appModules/lync.py\n+++ b/source/appModules/lync.py\n@@ -4,10 +4,14 @@\n #Copyright (C) 2017 NV Access Limited\r\n \r\n \"\"\"appModule for Microsoft Skype for business. \"\"\"\r\n- \r\n+\r\n import ui\r\n from NVDAObjects.UIA import UIA\r\n import appModuleHandler\r\n+from logHandler import log\r\n+\r\n+import re\r\n+\r\n \r\n class NetUIRicherLabel(UIA):\r\n \t\"\"\"A label sometimes found within list items that can fire live region changes, such as for chat messages.\"\"\"\r\n@@ -15,19 +19,45 @@\n \tdef event_liveRegionChange(self):\r\n \t\t# The base liveRegionChange event is not enough as Skype for Business concatinates recent chat messages from the same person within the same minute\r\n \t\t# Therefore, specifically strip out the chat content and only report the most recent part added.\r\n-\t\t# The object's name contains the full message (I.e. person: content, timestamp) loosely separated by commas.\r\n+\t\t# When not empty, the object's name contains the full message (I.e. person: content, timestamp)\r\n+\t\t# loosely separated by commas.\r\n \t\t# Example string: \"Michael Curran : , , Hello\\r\\n\\r\\nThis is a test , 10:45 am.\"\r\n \t\t# Where person is \"Michael Curran\", content is \"Hello\\nThis is a test\" and timestamp is \"10:45 am\" \r\n-\t\t# The object's value just contains the content.\r\n-\t\t# Example: \"Hello\\rThis is a test\"\r\n-\t\t# We are only interested in person and content\r\n-\t\t# Therefore use value (content) to locate and split off the person from the name (fullText)\r\n+\t\t\r\n \t\t# Normalize the usage of end-of-line characters (name and value seem to expose them differently, which would break comparison)\r\n-\t\tcontent=self.value.replace('\\r','\\n').strip()\r\n \t\tfullText=self.name.replace('\\r\\n\\r\\n','\\n')\r\n+\t\t\r\n+\t\t# At the object's creation, an unuseful liveRegionChange event is triggered with an empty name,\r\n+\t\t# so we discard it.\r\n+\t\tif not self.name.strip():\r\n+\t\t\treturn\r\n+\t\t\r\n+\t\tif self.value is not None:\r\n+\t\t\t# For some versions of Lync / Skype for Business, the object's value contains just the content.\r\n+\t\t\t# Example: \"Hello\\rThis is a test\"\r\n+\t\t\t# We are only interested in person and content\r\n+\t\t\t# Therefore use value (content) to locate and split off the person from the name (fullText)\r\n+\t\t\tcontent = self.value.replace('\\r', '\\n').strip()\r\n+\t\t\tcontentStartIndex = fullText.find(content)\r\n+\t\t\tpretext = fullText[:contentStartIndex]\r\n+\t\telse:\r\n+\t\t\t# For other versions of Lync / Skype for Business, self.value is just None.\r\n+\t\t\t# So we just look at self.name formatting to split content from person and timestamp (less robust).\r\n+\t\t\tpattern = r'^(?P<name>.+?): (?P<priority>.*?), , (?P<content>.+),(?!, , ) , (?P<timestamp>.+)'\r\n+\t\t\tmatch = re.match(pattern, self.name, flags=re.DOTALL)\r\n+\t\t\tif match:\r\n+\t\t\t\tpretext = match['name']\r\n+\t\t\t\tpriority = match['priority']\r\n+\t\t\t\tcontent = match['content']\r\n+\t\t\t\tif priority:\r\n+\t\t\t\t\tcontent = priority + ', ' + content\r\n+\t\t\telse:\r\n+\t\t\t\t# In case no match is found, log the unexpected message and return the whole message.\r\n+\t\t\t\tlog.error(f'Unrecognized pattern in the following message: {self.name}')\r\n+\t\t\t\tpretext = ''\r\n+\t\t\t\tcontent = self.name\r\n+\t\t\tcontent = content.replace('\\r', '\\n').strip()\r\n \t\tcontentLines=content.split('\\n')\r\n-\t\tcontentStartIndex=fullText.find(content)\r\n-\t\tpretext=fullText[:contentStartIndex]\r\n \t\t# There are some annoying comma characters after the person's name \r\n \t\tpretext=pretext.replace(' ,','')\r\n \t\t# If the objects are the same, the person is the same, and the new content is the old content but with more appended, report the appended content\n", "issue": "nvda does not automatically read messages received on skype for business\n\r\n### Steps to reproduce:\r\nI'm sure I've seen this working for some time.\r\nI have not used skype for business for a long time but in my current work I'm making use of it again, and when I received a message on skype for business, nvda did not automatically read the received message.\r\nOpen a conversation on skype for business type something and wait inside the conversation for your partner to respond.\r\n### Actual behavior:\r\nThe nvda is mute.\r\n### Expected behavior:\r\nNvda should automatically announce the response received.\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\ninstall\r\n#### NVDA version:\r\n2018.4.1\r\n#### Windows version:\r\n10 17134.556\r\n#### Name and version of other software in use when reproducing the issue:\r\noffice 16.0.4266.1001\r\n\r\n#### Other information about your system:\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your PC?\r\nyes\r\n#### Have you tried any other versions of NVDA?\r\nno\n", "before_files": [{"content": "#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n#Copyright (C) 2017 NV Access Limited\r\n\r\n\"\"\"appModule for Microsoft Skype for business. \"\"\"\r\n \r\nimport ui\r\nfrom NVDAObjects.UIA import UIA\r\nimport appModuleHandler\r\n\r\nclass NetUIRicherLabel(UIA):\r\n\t\"\"\"A label sometimes found within list items that can fire live region changes, such as for chat messages.\"\"\"\r\n\r\n\tdef event_liveRegionChange(self):\r\n\t\t# The base liveRegionChange event is not enough as Skype for Business concatinates recent chat messages from the same person within the same minute\r\n\t\t# Therefore, specifically strip out the chat content and only report the most recent part added.\r\n\t\t# The object's name contains the full message (I.e. person: content, timestamp) loosely separated by commas.\r\n\t\t# Example string: \"Michael Curran : , , Hello\\r\\n\\r\\nThis is a test , 10:45 am.\"\r\n\t\t# Where person is \"Michael Curran\", content is \"Hello\\nThis is a test\" and timestamp is \"10:45 am\" \r\n\t\t# The object's value just contains the content.\r\n\t\t# Example: \"Hello\\rThis is a test\"\r\n\t\t# We are only interested in person and content\r\n\t\t# Therefore use value (content) to locate and split off the person from the name (fullText)\r\n\t\t# Normalize the usage of end-of-line characters (name and value seem to expose them differently, which would break comparison)\r\n\t\tcontent=self.value.replace('\\r','\\n').strip()\r\n\t\tfullText=self.name.replace('\\r\\n\\r\\n','\\n')\r\n\t\tcontentLines=content.split('\\n')\r\n\t\tcontentStartIndex=fullText.find(content)\r\n\t\tpretext=fullText[:contentStartIndex]\r\n\t\t# There are some annoying comma characters after the person's name \r\n\t\tpretext=pretext.replace(' ,','')\r\n\t\t# If the objects are the same, the person is the same, and the new content is the old content but with more appended, report the appended content\r\n\t\t# Otherwise, report the person and the initial content\r\n\t\truntimeID=self.UIAElement.getRuntimeId()\r\n\t\tlastRuntimeID,lastPretext,lastContentLines=self.appModule._lastLiveChatMessageData\r\n\t\tcontentLinesLen=len(contentLines)\r\n\t\tlastContentLinesLen=len(lastContentLines)\r\n\t\tif runtimeID==lastRuntimeID and pretext==lastPretext and contentLinesLen>lastContentLinesLen and contentLines[:lastContentLinesLen]==lastContentLines:\r\n\t\t\tmessage=\"\\n\".join(contentLines[lastContentLinesLen:])\r\n\t\telse:\r\n\t\t\tmessage=pretext+content\r\n\t\tui.message(message)\r\n\t\t# Cache the message data for later possible comparisons \r\n\t\tself.appModule._lastLiveChatMessageData=runtimeID,pretext,contentLines\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\t# data to store the last chat message (runtime ID,person,content lines)\r\n\t_lastLiveChatMessageData=[],\"\",[]\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self,obj,clsList):\r\n\t\tif isinstance(obj,UIA) and obj.UIAElement.cachedClassName=='NetUIRicherLabel':\r\n\t\t\tclsList.insert(0,NetUIRicherLabel)\r\n\t\treturn clsList\r\n\r\n", "path": "source/appModules/lync.py"}], "after_files": [{"content": "#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n#Copyright (C) 2017 NV Access Limited\r\n\r\n\"\"\"appModule for Microsoft Skype for business. \"\"\"\r\n\r\nimport ui\r\nfrom NVDAObjects.UIA import UIA\r\nimport appModuleHandler\r\nfrom logHandler import log\r\n\r\nimport re\r\n\r\n\r\nclass NetUIRicherLabel(UIA):\r\n\t\"\"\"A label sometimes found within list items that can fire live region changes, such as for chat messages.\"\"\"\r\n\r\n\tdef event_liveRegionChange(self):\r\n\t\t# The base liveRegionChange event is not enough as Skype for Business concatinates recent chat messages from the same person within the same minute\r\n\t\t# Therefore, specifically strip out the chat content and only report the most recent part added.\r\n\t\t# When not empty, the object's name contains the full message (I.e. person: content, timestamp)\r\n\t\t# loosely separated by commas.\r\n\t\t# Example string: \"Michael Curran : , , Hello\\r\\n\\r\\nThis is a test , 10:45 am.\"\r\n\t\t# Where person is \"Michael Curran\", content is \"Hello\\nThis is a test\" and timestamp is \"10:45 am\" \r\n\t\t\r\n\t\t# Normalize the usage of end-of-line characters (name and value seem to expose them differently, which would break comparison)\r\n\t\tfullText=self.name.replace('\\r\\n\\r\\n','\\n')\r\n\t\t\r\n\t\t# At the object's creation, an unuseful liveRegionChange event is triggered with an empty name,\r\n\t\t# so we discard it.\r\n\t\tif not self.name.strip():\r\n\t\t\treturn\r\n\t\t\r\n\t\tif self.value is not None:\r\n\t\t\t# For some versions of Lync / Skype for Business, the object's value contains just the content.\r\n\t\t\t# Example: \"Hello\\rThis is a test\"\r\n\t\t\t# We are only interested in person and content\r\n\t\t\t# Therefore use value (content) to locate and split off the person from the name (fullText)\r\n\t\t\tcontent = self.value.replace('\\r', '\\n').strip()\r\n\t\t\tcontentStartIndex = fullText.find(content)\r\n\t\t\tpretext = fullText[:contentStartIndex]\r\n\t\telse:\r\n\t\t\t# For other versions of Lync / Skype for Business, self.value is just None.\r\n\t\t\t# So we just look at self.name formatting to split content from person and timestamp (less robust).\r\n\t\t\tpattern = r'^(?P<name>.+?): (?P<priority>.*?), , (?P<content>.+),(?!, , ) , (?P<timestamp>.+)'\r\n\t\t\tmatch = re.match(pattern, self.name, flags=re.DOTALL)\r\n\t\t\tif match:\r\n\t\t\t\tpretext = match['name']\r\n\t\t\t\tpriority = match['priority']\r\n\t\t\t\tcontent = match['content']\r\n\t\t\t\tif priority:\r\n\t\t\t\t\tcontent = priority + ', ' + content\r\n\t\t\telse:\r\n\t\t\t\t# In case no match is found, log the unexpected message and return the whole message.\r\n\t\t\t\tlog.error(f'Unrecognized pattern in the following message: {self.name}')\r\n\t\t\t\tpretext = ''\r\n\t\t\t\tcontent = self.name\r\n\t\t\tcontent = content.replace('\\r', '\\n').strip()\r\n\t\tcontentLines=content.split('\\n')\r\n\t\t# There are some annoying comma characters after the person's name \r\n\t\tpretext=pretext.replace(' ,','')\r\n\t\t# If the objects are the same, the person is the same, and the new content is the old content but with more appended, report the appended content\r\n\t\t# Otherwise, report the person and the initial content\r\n\t\truntimeID=self.UIAElement.getRuntimeId()\r\n\t\tlastRuntimeID,lastPretext,lastContentLines=self.appModule._lastLiveChatMessageData\r\n\t\tcontentLinesLen=len(contentLines)\r\n\t\tlastContentLinesLen=len(lastContentLines)\r\n\t\tif runtimeID==lastRuntimeID and pretext==lastPretext and contentLinesLen>lastContentLinesLen and contentLines[:lastContentLinesLen]==lastContentLines:\r\n\t\t\tmessage=\"\\n\".join(contentLines[lastContentLinesLen:])\r\n\t\telse:\r\n\t\t\tmessage=pretext+content\r\n\t\tui.message(message)\r\n\t\t# Cache the message data for later possible comparisons \r\n\t\tself.appModule._lastLiveChatMessageData=runtimeID,pretext,contentLines\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\t# data to store the last chat message (runtime ID,person,content lines)\r\n\t_lastLiveChatMessageData=[],\"\",[]\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self,obj,clsList):\r\n\t\tif isinstance(obj,UIA) and obj.UIAElement.cachedClassName=='NetUIRicherLabel':\r\n\t\t\tclsList.insert(0,NetUIRicherLabel)\r\n\t\treturn clsList\r\n\r\n", "path": "source/appModules/lync.py"}]}
| 1,326 | 960 |
gh_patches_debug_28116
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-1743
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Posters assign person bug
```
Internal Server Error: /dashboard/posters/assign_person/
ValueError at /dashboard/posters/assign_person/
invalid literal for int() with base 10: '------'
```
I think this happens if you click on the "Assign person" on a poster order, in the available poster orders list, and not select a person.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/posters/dashboard/views.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2
3 import json
4 import logging
5
6 from django.conf import settings
7 from django.contrib import messages
8 from django.contrib.auth.decorators import login_required
9 from django.contrib.auth.models import Group
10 from django.core.exceptions import ImproperlyConfigured, PermissionDenied
11 from django.core.mail import EmailMessage
12 from django.shortcuts import HttpResponse, HttpResponseRedirect, get_object_or_404, redirect, render
13 from django.template.loader import render_to_string
14 from django.utils.translation import ugettext as _
15 from django.views.decorators.csrf import ensure_csrf_cookie
16 from guardian.decorators import permission_required
17 from guardian.models import GroupObjectPermission, UserObjectPermission
18
19 from apps.authentication.models import OnlineUser as User
20 from apps.companyprofile.models import Company
21 from apps.dashboard.tools import get_base_context
22 from apps.posters.forms import (AddBongForm, AddOtherForm, AddPosterForm, EditOtherForm,
23 EditPosterForm)
24 from apps.posters.models import Poster
25 from apps.posters.permissions import has_edit_perms, has_view_all_perms, has_view_perms
26
27
28 @login_required
29 @permission_required('posters.overview_poster_order', return_403=True)
30 def index(request):
31
32 # The group with members who should populate the dropdownlist
33 group = Group.objects.get(name='proKom')
34 users_to_populate = group.user_set.all()
35
36 context = get_base_context(request)
37
38 # View to show if user not in committee, but wanting to see own orders
39 if not has_view_all_perms(request.user):
40 context['your_orders'] = [x for x in Poster.objects.filter(ordered_by=request.user)
41 if request.user.has_perm('view_poster_order', x)]
42 return render(request, 'posters/dashboard/index.html', context)
43
44 orders = Poster.objects.all()
45
46 context['new_orders'] = orders.filter(assigned_to=None)
47 context['active_orders'] = orders.filter(finished=False).exclude(assigned_to=None)
48 context['old_orders'] = orders.filter(finished=True)
49
50 context['workers'] = users_to_populate
51
52 return render(request, 'posters/dashboard/index.html', context)
53
54
55 @login_required
56 @permission_required('posters.add_poster_order', return_403=True)
57 def add(request, order_type=0):
58 order_type = int(order_type)
59 context = get_base_context(request)
60 type_names = ("Plakat", "Bong", "Generell ")
61 type_name = type_names[order_type-1]
62
63 poster = Poster()
64 form = None
65
66 if request.method == 'POST':
67 if order_type == 1:
68 form = AddPosterForm(data=request.POST, instance=poster)
69 elif order_type == 2:
70 form = AddBongForm(data=request.POST, instance=poster)
71 elif order_type == 3:
72 # poster = GeneralOrder()
73 form = AddOtherForm(data=request.POST, instance=poster)
74
75 if form.is_valid():
76 _handle_poster_add(request, form, order_type)
77 return redirect(poster.get_absolute_url())
78 else:
79 context['form'] = form
80 context['form'].fields['ordered_committee'].queryset = request.user.groups.all()
81 return render(request, 'posters/dashboard/add.html', context)
82
83 context["order_type_name"] = type_name
84 context['order_type'] = order_type
85 context['can_edit'] = request.user.has_perm('posters.view_poster')
86
87 if order_type == 1:
88 AddPosterForm()
89 elif order_type == 2:
90 AddBongForm()
91 elif order_type == 3:
92 AddOtherForm()
93
94 forms = (AddPosterForm(), AddBongForm(), AddOtherForm())
95
96 context['form'] = forms[order_type-1]
97 context['form'].fields['ordered_committee'].queryset = request.user.groups.all()
98
99 return render(request, 'posters/dashboard/add.html', context)
100
101
102 @ensure_csrf_cookie
103 @login_required
104 def edit(request, order_id=None):
105 context = get_base_context(request)
106 context['add_poster_form'] = AddPosterForm()
107
108 if order_id:
109 poster = get_object_or_404(Poster, pk=order_id)
110 else:
111 poster = order_id
112
113 if order_id and request.user != poster.ordered_by and 'proKom' not in request.user.groups.all():
114 raise PermissionDenied
115
116 selected_form = EditPosterForm
117
118 if request.POST:
119 if poster.title:
120 selected_form = EditOtherForm
121 form = selected_form(request.POST, instance=poster)
122 if form.is_valid():
123 form.save()
124 return HttpResponseRedirect("../detail/"+str(poster.id))
125 else:
126 context["form"] = form
127 context["poster"] = poster
128
129 else:
130 selected_form = EditPosterForm
131 if poster.title:
132 selected_form = EditOtherForm
133
134 context["form"] = selected_form(instance=poster)
135 context["poster"] = poster
136
137 return render(request, 'posters/dashboard/add.html', context)
138
139
140 @ensure_csrf_cookie
141 @login_required
142 @permission_required('view_poster_order', (Poster, 'pk', 'order_id'), return_403=True)
143 def detail(request, order_id=None):
144
145 if not order_id:
146 return HttpResponse(status=400)
147
148 context = get_base_context(request)
149 poster = get_object_or_404(Poster, pk=order_id)
150 context['poster'] = poster
151
152 if not has_view_perms(request.user, poster):
153 raise PermissionDenied
154
155 order_type = poster.order_type
156 type_names = ("Plakat", "Bong", "Generell ")
157 type_name = type_names[order_type-1]
158 context["order_type_name"] = type_name
159
160 if request.method == 'POST':
161 if not has_edit_perms(request.user, poster):
162 raise PermissionDenied
163 poster_status = request.POST.get('completed')
164 if poster_status == 'true' or poster_status == 'false':
165 poster.toggle_finished()
166
167 return render(request, 'posters/dashboard/details.html', context)
168
169
170 def _handle_poster_add(request, form, order_type):
171 logger = logging.getLogger(__name__)
172
173 poster = form.save(commit=False)
174 if request.POST.get('company'):
175 poster.company = Company.objects.get(pk=request.POST.get('company'))
176 poster.ordered_by = request.user
177 poster.order_type = order_type
178
179 poster.save()
180
181 # Let this user have permissions to show this order
182 UserObjectPermission.objects.assign_perm('view_poster_order', request.user, poster)
183 GroupObjectPermission.objects.assign_perm(
184 'view_poster_order',
185 Group.objects.get(name='proKom'),
186 poster
187 )
188
189 title = str(poster)
190
191 # The great sending of emails
192 subject = '[ProKom] Ny bestilling | %s' % title
193
194 poster.absolute_url = request.build_absolute_uri(poster.get_dashboard_url())
195 context = {}
196 context['poster'] = poster
197 message = render_to_string('posters/email/new_order_notification.txt', context)
198
199 from_email = settings.EMAIL_PROKOM
200 to_emails = [settings.EMAIL_PROKOM, request.user.get_email().email]
201
202 try:
203 email_sent = EmailMessage(subject, message, from_email, to_emails, []).send()
204 except ImproperlyConfigured:
205 email_sent = False
206 logger.warn("Failed to send email for new order")
207 if email_sent:
208 messages.success(request, 'Opprettet bestilling')
209 else:
210 messages.error(request, 'Klarte ikke å sende epost, men bestillingen din ble fortsatt opprettet')
211
212
213 # Ajax
214 @login_required
215 def assign_person(request):
216 if request.is_ajax():
217 if request.method == 'POST':
218 order_id = request.POST.get('order_id')
219 orders = Poster.objects.filter(pk=order_id)
220 assign_to_id = request.POST.get('assign_to_id')
221 assign_to = User.objects.get(pk=assign_to_id)
222
223 if orders.count() == 0:
224 logging.debug("Trying to assign to non-existing order \"%s\" (user: %s)." % (order_id, request.user))
225 response_text = json.dumps({'message': _(
226 """Kan ikke finne en ordre med denne IDen (%s).
227 Om feilen vedvarer etter en refresh, kontakt [email protected].""") % order_id})
228 return HttpResponse(status=412, content=response_text)
229
230 order = orders[0]
231
232 if order.finished or order.assigned_to is not None:
233 response_text = json.dumps({'message': _("Denne ordren er allerede behandlet.")})
234 return HttpResponse(status=412, content=response_text)
235
236 order.assigned_to = assign_to
237 order.save()
238
239 return HttpResponse(status=200)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/posters/dashboard/views.py b/apps/posters/dashboard/views.py
--- a/apps/posters/dashboard/views.py
+++ b/apps/posters/dashboard/views.py
@@ -215,23 +215,15 @@
def assign_person(request):
if request.is_ajax():
if request.method == 'POST':
- order_id = request.POST.get('order_id')
- orders = Poster.objects.filter(pk=order_id)
- assign_to_id = request.POST.get('assign_to_id')
- assign_to = User.objects.get(pk=assign_to_id)
-
- if orders.count() == 0:
- logging.debug("Trying to assign to non-existing order \"%s\" (user: %s)." % (order_id, request.user))
- response_text = json.dumps({'message': _(
- """Kan ikke finne en ordre med denne IDen (%s).
-Om feilen vedvarer etter en refresh, kontakt [email protected].""") % order_id})
- return HttpResponse(status=412, content=response_text)
-
- order = orders[0]
+ order = get_object_or_404(Poster, pk=request.POST.get('order_id'))
+ if request.POST.get('assign_to_id') and not str(request.POST.get('assign_to_id')).isnumeric():
+ return HttpResponse(status=400, content=json.dumps(
+ {'message': 'Denne brukerprofilen kunne ikke tilordnes til ordren.'}))
+ assign_to = get_object_or_404(User, pk=request.POST.get('assign_to_id'))
if order.finished or order.assigned_to is not None:
response_text = json.dumps({'message': _("Denne ordren er allerede behandlet.")})
- return HttpResponse(status=412, content=response_text)
+ return HttpResponse(status=400, content=response_text)
order.assigned_to = assign_to
order.save()
|
{"golden_diff": "diff --git a/apps/posters/dashboard/views.py b/apps/posters/dashboard/views.py\n--- a/apps/posters/dashboard/views.py\n+++ b/apps/posters/dashboard/views.py\n@@ -215,23 +215,15 @@\n def assign_person(request):\n if request.is_ajax():\n if request.method == 'POST':\n- order_id = request.POST.get('order_id')\n- orders = Poster.objects.filter(pk=order_id)\n- assign_to_id = request.POST.get('assign_to_id')\n- assign_to = User.objects.get(pk=assign_to_id)\n-\n- if orders.count() == 0:\n- logging.debug(\"Trying to assign to non-existing order \\\"%s\\\" (user: %s).\" % (order_id, request.user))\n- response_text = json.dumps({'message': _(\n- \"\"\"Kan ikke finne en ordre med denne IDen (%s).\n-Om feilen vedvarer etter en refresh, kontakt [email protected].\"\"\") % order_id})\n- return HttpResponse(status=412, content=response_text)\n-\n- order = orders[0]\n+ order = get_object_or_404(Poster, pk=request.POST.get('order_id'))\n+ if request.POST.get('assign_to_id') and not str(request.POST.get('assign_to_id')).isnumeric():\n+ return HttpResponse(status=400, content=json.dumps(\n+ {'message': 'Denne brukerprofilen kunne ikke tilordnes til ordren.'}))\n+ assign_to = get_object_or_404(User, pk=request.POST.get('assign_to_id'))\n \n if order.finished or order.assigned_to is not None:\n response_text = json.dumps({'message': _(\"Denne ordren er allerede behandlet.\")})\n- return HttpResponse(status=412, content=response_text)\n+ return HttpResponse(status=400, content=response_text)\n \n order.assigned_to = assign_to\n order.save()\n", "issue": "Posters assign person bug\n```\nInternal Server Error: /dashboard/posters/assign_person/\n\nValueError at /dashboard/posters/assign_person/\ninvalid literal for int() with base 10: '------'\n```\n\nI think this happens if you click on the \"Assign person\" on a poster order, in the available poster orders list, and not select a person. \n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ImproperlyConfigured, PermissionDenied\nfrom django.core.mail import EmailMessage\nfrom django.shortcuts import HttpResponse, HttpResponseRedirect, get_object_or_404, redirect, render\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom guardian.decorators import permission_required\nfrom guardian.models import GroupObjectPermission, UserObjectPermission\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.tools import get_base_context\nfrom apps.posters.forms import (AddBongForm, AddOtherForm, AddPosterForm, EditOtherForm,\n EditPosterForm)\nfrom apps.posters.models import Poster\nfrom apps.posters.permissions import has_edit_perms, has_view_all_perms, has_view_perms\n\n\n@login_required\n@permission_required('posters.overview_poster_order', return_403=True)\ndef index(request):\n\n # The group with members who should populate the dropdownlist\n group = Group.objects.get(name='proKom')\n users_to_populate = group.user_set.all()\n\n context = get_base_context(request)\n\n # View to show if user not in committee, but wanting to see own orders\n if not has_view_all_perms(request.user):\n context['your_orders'] = [x for x in Poster.objects.filter(ordered_by=request.user)\n if request.user.has_perm('view_poster_order', x)]\n return render(request, 'posters/dashboard/index.html', context)\n\n orders = Poster.objects.all()\n\n context['new_orders'] = orders.filter(assigned_to=None)\n context['active_orders'] = orders.filter(finished=False).exclude(assigned_to=None)\n context['old_orders'] = orders.filter(finished=True)\n\n context['workers'] = users_to_populate\n\n return render(request, 'posters/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('posters.add_poster_order', return_403=True)\ndef add(request, order_type=0):\n order_type = int(order_type)\n context = get_base_context(request)\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n\n poster = Poster()\n form = None\n\n if request.method == 'POST':\n if order_type == 1:\n form = AddPosterForm(data=request.POST, instance=poster)\n elif order_type == 2:\n form = AddBongForm(data=request.POST, instance=poster)\n elif order_type == 3:\n # poster = GeneralOrder()\n form = AddOtherForm(data=request.POST, instance=poster)\n\n if form.is_valid():\n _handle_poster_add(request, form, order_type)\n return redirect(poster.get_absolute_url())\n else:\n context['form'] = form\n context['form'].fields['ordered_committee'].queryset = request.user.groups.all()\n return render(request, 'posters/dashboard/add.html', context)\n\n context[\"order_type_name\"] = type_name\n context['order_type'] = order_type\n context['can_edit'] = request.user.has_perm('posters.view_poster')\n\n if order_type == 1:\n AddPosterForm()\n elif order_type == 2:\n AddBongForm()\n elif order_type == 3:\n AddOtherForm()\n\n forms = (AddPosterForm(), AddBongForm(), AddOtherForm())\n\n context['form'] = forms[order_type-1]\n context['form'].fields['ordered_committee'].queryset = request.user.groups.all()\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\ndef edit(request, order_id=None):\n context = get_base_context(request)\n context['add_poster_form'] = AddPosterForm()\n\n if order_id:\n poster = get_object_or_404(Poster, pk=order_id)\n else:\n poster = order_id\n\n if order_id and request.user != poster.ordered_by and 'proKom' not in request.user.groups.all():\n raise PermissionDenied\n\n selected_form = EditPosterForm\n\n if request.POST:\n if poster.title:\n selected_form = EditOtherForm\n form = selected_form(request.POST, instance=poster)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(\"../detail/\"+str(poster.id))\n else:\n context[\"form\"] = form\n context[\"poster\"] = poster\n\n else:\n selected_form = EditPosterForm\n if poster.title:\n selected_form = EditOtherForm\n\n context[\"form\"] = selected_form(instance=poster)\n context[\"poster\"] = poster\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('view_poster_order', (Poster, 'pk', 'order_id'), return_403=True)\ndef detail(request, order_id=None):\n\n if not order_id:\n return HttpResponse(status=400)\n\n context = get_base_context(request)\n poster = get_object_or_404(Poster, pk=order_id)\n context['poster'] = poster\n\n if not has_view_perms(request.user, poster):\n raise PermissionDenied\n\n order_type = poster.order_type\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n context[\"order_type_name\"] = type_name\n\n if request.method == 'POST':\n if not has_edit_perms(request.user, poster):\n raise PermissionDenied\n poster_status = request.POST.get('completed')\n if poster_status == 'true' or poster_status == 'false':\n poster.toggle_finished()\n\n return render(request, 'posters/dashboard/details.html', context)\n\n\ndef _handle_poster_add(request, form, order_type):\n logger = logging.getLogger(__name__)\n\n poster = form.save(commit=False)\n if request.POST.get('company'):\n poster.company = Company.objects.get(pk=request.POST.get('company'))\n poster.ordered_by = request.user\n poster.order_type = order_type\n\n poster.save()\n\n # Let this user have permissions to show this order\n UserObjectPermission.objects.assign_perm('view_poster_order', request.user, poster)\n GroupObjectPermission.objects.assign_perm(\n 'view_poster_order',\n Group.objects.get(name='proKom'),\n poster\n )\n\n title = str(poster)\n\n # The great sending of emails\n subject = '[ProKom] Ny bestilling | %s' % title\n\n poster.absolute_url = request.build_absolute_uri(poster.get_dashboard_url())\n context = {}\n context['poster'] = poster\n message = render_to_string('posters/email/new_order_notification.txt', context)\n\n from_email = settings.EMAIL_PROKOM\n to_emails = [settings.EMAIL_PROKOM, request.user.get_email().email]\n\n try:\n email_sent = EmailMessage(subject, message, from_email, to_emails, []).send()\n except ImproperlyConfigured:\n email_sent = False\n logger.warn(\"Failed to send email for new order\")\n if email_sent:\n messages.success(request, 'Opprettet bestilling')\n else:\n messages.error(request, 'Klarte ikke \u00e5 sende epost, men bestillingen din ble fortsatt opprettet')\n\n\n# Ajax\n@login_required\ndef assign_person(request):\n if request.is_ajax():\n if request.method == 'POST':\n order_id = request.POST.get('order_id')\n orders = Poster.objects.filter(pk=order_id)\n assign_to_id = request.POST.get('assign_to_id')\n assign_to = User.objects.get(pk=assign_to_id)\n\n if orders.count() == 0:\n logging.debug(\"Trying to assign to non-existing order \\\"%s\\\" (user: %s).\" % (order_id, request.user))\n response_text = json.dumps({'message': _(\n \"\"\"Kan ikke finne en ordre med denne IDen (%s).\nOm feilen vedvarer etter en refresh, kontakt [email protected].\"\"\") % order_id})\n return HttpResponse(status=412, content=response_text)\n\n order = orders[0]\n\n if order.finished or order.assigned_to is not None:\n response_text = json.dumps({'message': _(\"Denne ordren er allerede behandlet.\")})\n return HttpResponse(status=412, content=response_text)\n\n order.assigned_to = assign_to\n order.save()\n\n return HttpResponse(status=200)\n", "path": "apps/posters/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ImproperlyConfigured, PermissionDenied\nfrom django.core.mail import EmailMessage\nfrom django.shortcuts import HttpResponse, HttpResponseRedirect, get_object_or_404, redirect, render\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom guardian.decorators import permission_required\nfrom guardian.models import GroupObjectPermission, UserObjectPermission\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.tools import get_base_context\nfrom apps.posters.forms import (AddBongForm, AddOtherForm, AddPosterForm, EditOtherForm,\n EditPosterForm)\nfrom apps.posters.models import Poster\nfrom apps.posters.permissions import has_edit_perms, has_view_all_perms, has_view_perms\n\n\n@login_required\n@permission_required('posters.overview_poster_order', return_403=True)\ndef index(request):\n\n # The group with members who should populate the dropdownlist\n group = Group.objects.get(name='proKom')\n users_to_populate = group.user_set.all()\n\n context = get_base_context(request)\n\n # View to show if user not in committee, but wanting to see own orders\n if not has_view_all_perms(request.user):\n context['your_orders'] = [x for x in Poster.objects.filter(ordered_by=request.user)\n if request.user.has_perm('view_poster_order', x)]\n return render(request, 'posters/dashboard/index.html', context)\n\n orders = Poster.objects.all()\n\n context['new_orders'] = orders.filter(assigned_to=None)\n context['active_orders'] = orders.filter(finished=False).exclude(assigned_to=None)\n context['old_orders'] = orders.filter(finished=True)\n\n context['workers'] = users_to_populate\n\n return render(request, 'posters/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('posters.add_poster_order', return_403=True)\ndef add(request, order_type=0):\n order_type = int(order_type)\n context = get_base_context(request)\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n\n poster = Poster()\n form = None\n\n if request.method == 'POST':\n if order_type == 1:\n form = AddPosterForm(data=request.POST, instance=poster)\n elif order_type == 2:\n form = AddBongForm(data=request.POST, instance=poster)\n elif order_type == 3:\n # poster = GeneralOrder()\n form = AddOtherForm(data=request.POST, instance=poster)\n\n if form.is_valid():\n _handle_poster_add(request, form, order_type)\n return redirect(poster.get_absolute_url())\n else:\n context['form'] = form\n context['form'].fields['ordered_committee'].queryset = request.user.groups.all()\n return render(request, 'posters/dashboard/add.html', context)\n\n context[\"order_type_name\"] = type_name\n context['order_type'] = order_type\n context['can_edit'] = request.user.has_perm('posters.view_poster')\n\n if order_type == 1:\n AddPosterForm()\n elif order_type == 2:\n AddBongForm()\n elif order_type == 3:\n AddOtherForm()\n\n forms = (AddPosterForm(), AddBongForm(), AddOtherForm())\n\n context['form'] = forms[order_type-1]\n context['form'].fields['ordered_committee'].queryset = request.user.groups.all()\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\ndef edit(request, order_id=None):\n context = get_base_context(request)\n context['add_poster_form'] = AddPosterForm()\n\n if order_id:\n poster = get_object_or_404(Poster, pk=order_id)\n else:\n poster = order_id\n\n if order_id and request.user != poster.ordered_by and 'proKom' not in request.user.groups.all():\n raise PermissionDenied\n\n selected_form = EditPosterForm\n\n if request.POST:\n if poster.title:\n selected_form = EditOtherForm\n form = selected_form(request.POST, instance=poster)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(\"../detail/\"+str(poster.id))\n else:\n context[\"form\"] = form\n context[\"poster\"] = poster\n\n else:\n selected_form = EditPosterForm\n if poster.title:\n selected_form = EditOtherForm\n\n context[\"form\"] = selected_form(instance=poster)\n context[\"poster\"] = poster\n\n return render(request, 'posters/dashboard/add.html', context)\n\n\n@ensure_csrf_cookie\n@login_required\n@permission_required('view_poster_order', (Poster, 'pk', 'order_id'), return_403=True)\ndef detail(request, order_id=None):\n\n if not order_id:\n return HttpResponse(status=400)\n\n context = get_base_context(request)\n poster = get_object_or_404(Poster, pk=order_id)\n context['poster'] = poster\n\n if not has_view_perms(request.user, poster):\n raise PermissionDenied\n\n order_type = poster.order_type\n type_names = (\"Plakat\", \"Bong\", \"Generell \")\n type_name = type_names[order_type-1]\n context[\"order_type_name\"] = type_name\n\n if request.method == 'POST':\n if not has_edit_perms(request.user, poster):\n raise PermissionDenied\n poster_status = request.POST.get('completed')\n if poster_status == 'true' or poster_status == 'false':\n poster.toggle_finished()\n\n return render(request, 'posters/dashboard/details.html', context)\n\n\ndef _handle_poster_add(request, form, order_type):\n logger = logging.getLogger(__name__)\n\n poster = form.save(commit=False)\n if request.POST.get('company'):\n poster.company = Company.objects.get(pk=request.POST.get('company'))\n poster.ordered_by = request.user\n poster.order_type = order_type\n\n poster.save()\n\n # Let this user have permissions to show this order\n UserObjectPermission.objects.assign_perm('view_poster_order', request.user, poster)\n GroupObjectPermission.objects.assign_perm(\n 'view_poster_order',\n Group.objects.get(name='proKom'),\n poster\n )\n\n title = str(poster)\n\n # The great sending of emails\n subject = '[ProKom] Ny bestilling | %s' % title\n\n poster.absolute_url = request.build_absolute_uri(poster.get_dashboard_url())\n context = {}\n context['poster'] = poster\n message = render_to_string('posters/email/new_order_notification.txt', context)\n\n from_email = settings.EMAIL_PROKOM\n to_emails = [settings.EMAIL_PROKOM, request.user.get_email().email]\n\n try:\n email_sent = EmailMessage(subject, message, from_email, to_emails, []).send()\n except ImproperlyConfigured:\n email_sent = False\n logger.warn(\"Failed to send email for new order\")\n if email_sent:\n messages.success(request, 'Opprettet bestilling')\n else:\n messages.error(request, 'Klarte ikke \u00e5 sende epost, men bestillingen din ble fortsatt opprettet')\n\n\n# Ajax\n@login_required\ndef assign_person(request):\n if request.is_ajax():\n if request.method == 'POST':\n order = get_object_or_404(Poster, pk=request.POST.get('order_id'))\n if request.POST.get('assign_to_id') and not str(request.POST.get('assign_to_id')).isnumeric():\n return HttpResponse(status=400, content=json.dumps(\n {'message': 'Denne brukerprofilen kunne ikke tilordnes til ordren.'}))\n assign_to = get_object_or_404(User, pk=request.POST.get('assign_to_id'))\n\n if order.finished or order.assigned_to is not None:\n response_text = json.dumps({'message': _(\"Denne ordren er allerede behandlet.\")})\n return HttpResponse(status=400, content=response_text)\n\n order.assigned_to = assign_to\n order.save()\n\n return HttpResponse(status=200)\n", "path": "apps/posters/dashboard/views.py"}]}
| 2,875 | 425 |
gh_patches_debug_21060
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-1296
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: XSS in quicktour/views/views.py
http://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tour.html#views
https://github.com/Pylons/pyramid/blob/master/docs/quick_tour/views/views.py#L17
As there is no templating layer to autoescape the user-supplied `name` parameter and the response is by default `text/html`, `hello_view` contains an XSS vulnerability.
Templating is not the focus of (this part of) the quick tour.
I can think of two approaches:
1. Use `cgi.escape` before doing string interpolation (`body % cgi.escape(name)').
2. Add a note about XSS and the value of utilizing a good templating engine with autoescape.
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')" http://cwe.mitre.org/data/definitions/79.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/quick_tour/views/views.py`
Content:
```
1 from pyramid.httpexceptions import HTTPFound
2 from pyramid.response import Response
3 from pyramid.view import view_config
4
5
6 # First view, available at http://localhost:6543/
7 @view_config(route_name='home')
8 def home_view(request):
9 return Response('<p>Visit <a href="/howdy?name=lisa">hello</a></p>')
10
11
12 # /howdy?name=alice which links to the next view
13 @view_config(route_name='hello')
14 def hello_view(request):
15 name = request.params.get('name', 'No Name')
16 body = '<p>Hi %s, this <a href="/goto">redirects</a></p>'
17 return Response(body % name)
18
19
20 # /goto which issues HTTP redirect to the last view
21 @view_config(route_name='redirect')
22 def redirect_view(request):
23 return HTTPFound(location="/problem")
24
25
26 # /problem which causes an site error
27 @view_config(route_name='exception')
28 def exception_view(request):
29 raise Exception()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/quick_tour/views/views.py b/docs/quick_tour/views/views.py
--- a/docs/quick_tour/views/views.py
+++ b/docs/quick_tour/views/views.py
@@ -1,3 +1,5 @@
+import cgi
+
from pyramid.httpexceptions import HTTPFound
from pyramid.response import Response
from pyramid.view import view_config
@@ -14,7 +16,8 @@
def hello_view(request):
name = request.params.get('name', 'No Name')
body = '<p>Hi %s, this <a href="/goto">redirects</a></p>'
- return Response(body % name)
+ # cgi.escape to prevent Cross-Site Scripting (XSS) [CWE 79]
+ return Response(body % cgi.escape(name))
# /goto which issues HTTP redirect to the last view
@@ -23,7 +26,7 @@
return HTTPFound(location="/problem")
-# /problem which causes an site error
+# /problem which causes a site error
@view_config(route_name='exception')
def exception_view(request):
raise Exception()
|
{"golden_diff": "diff --git a/docs/quick_tour/views/views.py b/docs/quick_tour/views/views.py\n--- a/docs/quick_tour/views/views.py\n+++ b/docs/quick_tour/views/views.py\n@@ -1,3 +1,5 @@\n+import cgi\n+\n from pyramid.httpexceptions import HTTPFound\n from pyramid.response import Response\n from pyramid.view import view_config\n@@ -14,7 +16,8 @@\n def hello_view(request):\n name = request.params.get('name', 'No Name')\n body = '<p>Hi %s, this <a href=\"/goto\">redirects</a></p>'\n- return Response(body % name)\n+ # cgi.escape to prevent Cross-Site Scripting (XSS) [CWE 79]\n+ return Response(body % cgi.escape(name))\n \n \n # /goto which issues HTTP redirect to the last view\n@@ -23,7 +26,7 @@\n return HTTPFound(location=\"/problem\")\n \n \n-# /problem which causes an site error\n+# /problem which causes a site error\n @view_config(route_name='exception')\n def exception_view(request):\n raise Exception()\n", "issue": "DOC: XSS in quicktour/views/views.py\nhttp://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tour.html#views\n\nhttps://github.com/Pylons/pyramid/blob/master/docs/quick_tour/views/views.py#L17\n\nAs there is no templating layer to autoescape the user-supplied `name` parameter and the response is by default `text/html`, `hello_view` contains an XSS vulnerability.\n\nTemplating is not the focus of (this part of) the quick tour.\n\nI can think of two approaches:\n1. Use `cgi.escape` before doing string interpolation (`body % cgi.escape(name)').\n2. Add a note about XSS and the value of utilizing a good templating engine with autoescape.\n\n\"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')\" http://cwe.mitre.org/data/definitions/79.html\n\n", "before_files": [{"content": "from pyramid.httpexceptions import HTTPFound\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\n\n\n# First view, available at http://localhost:6543/\n@view_config(route_name='home')\ndef home_view(request):\n return Response('<p>Visit <a href=\"/howdy?name=lisa\">hello</a></p>')\n\n\n# /howdy?name=alice which links to the next view\n@view_config(route_name='hello')\ndef hello_view(request):\n name = request.params.get('name', 'No Name')\n body = '<p>Hi %s, this <a href=\"/goto\">redirects</a></p>'\n return Response(body % name)\n\n\n# /goto which issues HTTP redirect to the last view\n@view_config(route_name='redirect')\ndef redirect_view(request):\n return HTTPFound(location=\"/problem\")\n\n\n# /problem which causes an site error\n@view_config(route_name='exception')\ndef exception_view(request):\n raise Exception()\n", "path": "docs/quick_tour/views/views.py"}], "after_files": [{"content": "import cgi\n\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\n\n\n# First view, available at http://localhost:6543/\n@view_config(route_name='home')\ndef home_view(request):\n return Response('<p>Visit <a href=\"/howdy?name=lisa\">hello</a></p>')\n\n\n# /howdy?name=alice which links to the next view\n@view_config(route_name='hello')\ndef hello_view(request):\n name = request.params.get('name', 'No Name')\n body = '<p>Hi %s, this <a href=\"/goto\">redirects</a></p>'\n # cgi.escape to prevent Cross-Site Scripting (XSS) [CWE 79]\n return Response(body % cgi.escape(name))\n\n\n# /goto which issues HTTP redirect to the last view\n@view_config(route_name='redirect')\ndef redirect_view(request):\n return HTTPFound(location=\"/problem\")\n\n\n# /problem which causes a site error\n@view_config(route_name='exception')\ndef exception_view(request):\n raise Exception()\n", "path": "docs/quick_tour/views/views.py"}]}
| 718 | 245 |
gh_patches_debug_5685
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-3996
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/viewsets/replica.py`
Content:
```
1 """
2 ViewSet for replicating repositories and distributions from an upstream Pulp
3 """
4 from django.conf import settings
5 from drf_spectacular.utils import extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8
9 from pulpcore.app.models import TaskGroup, UpstreamPulp
10 from pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer
11 from pulpcore.app.viewsets import NamedModelViewSet
12 from pulpcore.app.response import TaskGroupOperationResponse
13 from pulpcore.app.tasks import replicate_distributions
14 from pulpcore.tasking.tasks import dispatch
15
16
17 class UpstreamPulpViewSet(
18 NamedModelViewSet,
19 mixins.CreateModelMixin,
20 mixins.RetrieveModelMixin,
21 mixins.ListModelMixin,
22 mixins.DestroyModelMixin,
23 mixins.UpdateModelMixin,
24 ):
25 """API for configuring an upstream Pulp to replicate. This API is provided as a tech preview."""
26
27 queryset = UpstreamPulp.objects.all()
28 endpoint_name = "upstream-pulps"
29 serializer_class = UpstreamPulpSerializer
30 ordering = "-pulp_created"
31
32 @extend_schema(
33 summary="Replicate",
34 description="Trigger an asynchronous repository replication task group. This API is "
35 "provided as a tech preview.",
36 responses={202: AsyncOperationResponseSerializer},
37 )
38 @action(detail=True, methods=["post"])
39 def replicate(self, request, pk):
40 """
41 Triggers an asynchronous repository replication operation.
42 """
43 server = UpstreamPulp.objects.get(pk=pk)
44 task_group = TaskGroup.objects.create(description=f"Replication of {server.name}")
45
46 uri = "/api/v3/servers/"
47 if settings.DOMAIN_ENABLED:
48 uri = f"/{request.domain.name}{uri}"
49
50 dispatch(
51 replicate_distributions,
52 exclusive_resources=[uri],
53 kwargs={"server_pk": pk},
54 task_group=task_group,
55 )
56
57 return TaskGroupOperationResponse(task_group, request)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
|
{"golden_diff": "diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py\n--- a/pulpcore/app/viewsets/replica.py\n+++ b/pulpcore/app/viewsets/replica.py\n@@ -33,6 +33,7 @@\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n+ request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n", "issue": "RESTAPI document fix for Upstream Pulp Replication API\n**Version**\r\nPulp installed through the Python modules.\r\n\"core:3.28.0\"\r\n\"certguard:3.28.0\"\r\n\"file:3.28.0\"\r\n\"python:3.28.0\"\r\n\"rpm:3.28.0\"\r\n\r\n**Describe the bug**\r\nWhy the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate\" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request \"https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/\"?\r\n\r\n**To Reproduce**\r\nNone.\r\n\r\n**Expected behavior**\r\nA fix is required in the REST API document.\r\n\r\n**Additional context**\r\nCreate Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create\r\nUpstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}], "after_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}]}
| 1,049 | 122 |
gh_patches_debug_24318
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-2244
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plotly privacy page outdated
The explanation on the "sharing" argument of the plotly_chart functions link to outdated content. I haven't checked how streamlit handles the new distinction between chart_studio and offline plotly but I guess the text should be rewritten
**Link to doc page in question (if any):**
[The streamlit.plotly_chart documentation says the following on the **sharing** argument:](https://docs.streamlit.io/api.html?highlight=plotly#streamlit.plotly_chart)
> **sharing** ({'streamlit', 'private', 'secret', 'public'}) – Use ‘streamlit’ to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly’s servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account.
**What you think the docs should say:**
**sharing** ({'streamlit', 'private', 'secret', 'public'}) – Use ‘streamlit’ to insert the plot and all its dependencies directly in the Streamlit app, which means it uses plotly's offline mode. Use any other sharing mode to use Plotly’s chart studio. Nb: Plotly chart studio requires an account.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/plotly_chart.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Streamlit support for Plotly charts."""
16
17 import json
18 import urllib.parse
19
20 from streamlit import caching
21 from streamlit import type_util
22
23 from streamlit.logger import get_logger
24 from streamlit.proto.PlotlyChart_pb2 import PlotlyChart as PlotlyChartProto
25
26 LOGGER = get_logger(__name__)
27
28 SHARING_MODES = set(
29 [
30 # This means the plot will be sent to the Streamlit app rather than to
31 # Plotly.
32 "streamlit",
33 # The three modes below are for plots that should be hosted in Plotly.
34 # These are the names Plotly uses for them.
35 "private",
36 "public",
37 "secret",
38 ]
39 )
40
41
42 class PlotlyMixin:
43 def plotly_chart(
44 dg, figure_or_data, use_container_width=False, sharing="streamlit", **kwargs,
45 ):
46 """Display an interactive Plotly chart.
47
48 Plotly is a charting library for Python. The arguments to this function
49 closely follow the ones for Plotly's `plot()` function. You can find
50 more about Plotly at https://plot.ly/python.
51
52 Parameters
53 ----------
54 figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data,
55 dict/list of plotly.graph_objs.Figure/Data
56
57 See https://plot.ly/python/ for examples of graph descriptions.
58
59 use_container_width : bool
60 If True, set the chart width to the column width. This takes
61 precedence over the figure's native `width` value.
62
63 sharing : {'streamlit', 'private', 'secret', 'public'}
64 Use 'streamlit' to insert the plot and all its dependencies
65 directly in the Streamlit app, which means it works offline too.
66 This is the default.
67 Use any other sharing mode to send the app to Plotly's servers,
68 and embed the result into the Streamlit app. See
69 https://plot.ly/python/privacy/ for more. Note that these sharing
70 modes require a Plotly account.
71
72 **kwargs
73 Any argument accepted by Plotly's `plot()` function.
74
75
76 To show Plotly charts in Streamlit, call `st.plotly_chart`
77 wherever you would call Plotly's `py.plot` or `py.iplot`.
78
79 Example
80 -------
81
82 The example below comes straight from the examples at
83 https://plot.ly/python:
84
85 >>> import streamlit as st
86 >>> import plotly.figure_factory as ff
87 >>> import numpy as np
88 >>>
89 >>> # Add histogram data
90 >>> x1 = np.random.randn(200) - 2
91 >>> x2 = np.random.randn(200)
92 >>> x3 = np.random.randn(200) + 2
93 >>>
94 >>> # Group data together
95 >>> hist_data = [x1, x2, x3]
96 >>>
97 >>> group_labels = ['Group 1', 'Group 2', 'Group 3']
98 >>>
99 >>> # Create distplot with custom bin_size
100 >>> fig = ff.create_distplot(
101 ... hist_data, group_labels, bin_size=[.1, .25, .5])
102 >>>
103 >>> # Plot!
104 >>> st.plotly_chart(fig, use_container_width=True)
105
106 .. output::
107 https://static.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ
108 height: 400px
109
110 """
111 # NOTE: "figure_or_data" is the name used in Plotly's .plot() method
112 # for their main parameter. I don't like the name, but it's best to
113 # keep it in sync with what Plotly calls it.
114 import streamlit.elements.plotly_chart as plotly_chart
115
116 plotly_chart_proto = PlotlyChartProto()
117 marshall(
118 plotly_chart_proto, figure_or_data, use_container_width, sharing, **kwargs
119 )
120 return dg._enqueue("plotly_chart", plotly_chart_proto) # type: ignore
121
122
123 def marshall(proto, figure_or_data, use_container_width, sharing, **kwargs):
124 """Marshall a proto with a Plotly spec.
125
126 See DeltaGenerator.plotly_chart for docs.
127 """
128 # NOTE: "figure_or_data" is the name used in Plotly's .plot() method
129 # for their main parameter. I don't like the name, but its best to keep
130 # it in sync with what Plotly calls it.
131
132 import plotly.tools
133
134 if type_util.is_type(figure_or_data, "matplotlib.figure.Figure"):
135 figure = plotly.tools.mpl_to_plotly(figure_or_data)
136
137 else:
138 figure = plotly.tools.return_figure_from_figure_or_data(
139 figure_or_data, validate_figure=True
140 )
141
142 if not isinstance(sharing, str) or sharing.lower() not in SHARING_MODES:
143 raise ValueError("Invalid sharing mode for Plotly chart: %s" % sharing)
144
145 proto.use_container_width = use_container_width
146
147 if sharing == "streamlit":
148 import plotly.utils
149
150 config = dict(kwargs.get("config", {}))
151 # Copy over some kwargs to config dict. Plotly does the same in plot().
152 config.setdefault("showLink", kwargs.get("show_link", False))
153 config.setdefault("linkText", kwargs.get("link_text", False))
154
155 proto.figure.spec = json.dumps(figure, cls=plotly.utils.PlotlyJSONEncoder)
156 proto.figure.config = json.dumps(config)
157
158 else:
159 url = _plot_to_url_or_load_cached_url(
160 figure, sharing=sharing, auto_open=False, **kwargs
161 )
162 proto.url = _get_embed_url(url)
163
164
165 @caching.cache
166 def _plot_to_url_or_load_cached_url(*args, **kwargs):
167 """Call plotly.plot wrapped in st.cache.
168
169 This is so we don't unecessarily upload data to Plotly's SASS if nothing
170 changed since the previous upload.
171 """
172 try:
173 # Plotly 4 changed its main package.
174 import chart_studio.plotly as ply
175 except ImportError:
176 import plotly.plotly as ply
177
178 return ply.plot(*args, **kwargs)
179
180
181 def _get_embed_url(url):
182 parsed_url = urllib.parse.urlparse(url)
183
184 # Plotly's embed URL is the normal URL plus ".embed".
185 # (Note that our use namedtuple._replace is fine because that's not a
186 # private method! It just has an underscore to avoid clashing with the
187 # tuple field names)
188 parsed_embed_url = parsed_url._replace(path=parsed_url.path + ".embed")
189
190 return urllib.parse.urlunparse(parsed_embed_url)
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/streamlit/elements/plotly_chart.py b/lib/streamlit/elements/plotly_chart.py
--- a/lib/streamlit/elements/plotly_chart.py
+++ b/lib/streamlit/elements/plotly_chart.py
@@ -41,7 +41,11 @@
class PlotlyMixin:
def plotly_chart(
- dg, figure_or_data, use_container_width=False, sharing="streamlit", **kwargs,
+ dg,
+ figure_or_data,
+ use_container_width=False,
+ sharing="streamlit",
+ **kwargs,
):
"""Display an interactive Plotly chart.
@@ -62,12 +66,9 @@
sharing : {'streamlit', 'private', 'secret', 'public'}
Use 'streamlit' to insert the plot and all its dependencies
- directly in the Streamlit app, which means it works offline too.
- This is the default.
- Use any other sharing mode to send the app to Plotly's servers,
- and embed the result into the Streamlit app. See
- https://plot.ly/python/privacy/ for more. Note that these sharing
- modes require a Plotly account.
+ directly in the Streamlit app using plotly's offline mode (default).
+ Use any other sharing mode to send the chart to Plotly chart studio, which
+ requires an account. See https://plotly.com/chart-studio/ for more information.
**kwargs
Any argument accepted by Plotly's `plot()` function.
|
{"golden_diff": "diff --git a/lib/streamlit/elements/plotly_chart.py b/lib/streamlit/elements/plotly_chart.py\n--- a/lib/streamlit/elements/plotly_chart.py\n+++ b/lib/streamlit/elements/plotly_chart.py\n@@ -41,7 +41,11 @@\n \n class PlotlyMixin:\n def plotly_chart(\n- dg, figure_or_data, use_container_width=False, sharing=\"streamlit\", **kwargs,\n+ dg,\n+ figure_or_data,\n+ use_container_width=False,\n+ sharing=\"streamlit\",\n+ **kwargs,\n ):\n \"\"\"Display an interactive Plotly chart.\n \n@@ -62,12 +66,9 @@\n \n sharing : {'streamlit', 'private', 'secret', 'public'}\n Use 'streamlit' to insert the plot and all its dependencies\n- directly in the Streamlit app, which means it works offline too.\n- This is the default.\n- Use any other sharing mode to send the app to Plotly's servers,\n- and embed the result into the Streamlit app. See\n- https://plot.ly/python/privacy/ for more. Note that these sharing\n- modes require a Plotly account.\n+ directly in the Streamlit app using plotly's offline mode (default).\n+ Use any other sharing mode to send the chart to Plotly chart studio, which\n+ requires an account. See https://plotly.com/chart-studio/ for more information.\n \n **kwargs\n Any argument accepted by Plotly's `plot()` function.\n", "issue": "Plotly privacy page outdated\nThe explanation on the \"sharing\" argument of the plotly_chart functions link to outdated content. I haven't checked how streamlit handles the new distinction between chart_studio and offline plotly but I guess the text should be rewritten\r\n\r\n\r\n**Link to doc page in question (if any):**\r\n[The streamlit.plotly_chart documentation says the following on the **sharing** argument:](https://docs.streamlit.io/api.html?highlight=plotly#streamlit.plotly_chart) \r\n\r\n\r\n> **sharing** ({'streamlit', 'private', 'secret', 'public'}) \u2013 Use \u2018streamlit\u2019 to insert the plot and all its dependencies directly in the Streamlit app, which means it works offline too. This is the default. Use any other sharing mode to send the app to Plotly\u2019s servers, and embed the result into the Streamlit app. See https://plot.ly/python/privacy/ for more. Note that these sharing modes require a Plotly account.\r\n\r\n**What you think the docs should say:**\r\n\r\n**sharing** ({'streamlit', 'private', 'secret', 'public'}) \u2013 Use \u2018streamlit\u2019 to insert the plot and all its dependencies directly in the Streamlit app, which means it uses plotly's offline mode. Use any other sharing mode to use Plotly\u2019s chart studio. Nb: Plotly chart studio requires an account.\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit support for Plotly charts.\"\"\"\n\nimport json\nimport urllib.parse\n\nfrom streamlit import caching\nfrom streamlit import type_util\n\nfrom streamlit.logger import get_logger\nfrom streamlit.proto.PlotlyChart_pb2 import PlotlyChart as PlotlyChartProto\n\nLOGGER = get_logger(__name__)\n\nSHARING_MODES = set(\n [\n # This means the plot will be sent to the Streamlit app rather than to\n # Plotly.\n \"streamlit\",\n # The three modes below are for plots that should be hosted in Plotly.\n # These are the names Plotly uses for them.\n \"private\",\n \"public\",\n \"secret\",\n ]\n)\n\n\nclass PlotlyMixin:\n def plotly_chart(\n dg, figure_or_data, use_container_width=False, sharing=\"streamlit\", **kwargs,\n ):\n \"\"\"Display an interactive Plotly chart.\n\n Plotly is a charting library for Python. The arguments to this function\n closely follow the ones for Plotly's `plot()` function. You can find\n more about Plotly at https://plot.ly/python.\n\n Parameters\n ----------\n figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data,\n dict/list of plotly.graph_objs.Figure/Data\n\n See https://plot.ly/python/ for examples of graph descriptions.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n sharing : {'streamlit', 'private', 'secret', 'public'}\n Use 'streamlit' to insert the plot and all its dependencies\n directly in the Streamlit app, which means it works offline too.\n This is the default.\n Use any other sharing mode to send the app to Plotly's servers,\n and embed the result into the Streamlit app. See\n https://plot.ly/python/privacy/ for more. Note that these sharing\n modes require a Plotly account.\n\n **kwargs\n Any argument accepted by Plotly's `plot()` function.\n\n\n To show Plotly charts in Streamlit, call `st.plotly_chart`\n wherever you would call Plotly's `py.plot` or `py.iplot`.\n\n Example\n -------\n\n The example below comes straight from the examples at\n https://plot.ly/python:\n\n >>> import streamlit as st\n >>> import plotly.figure_factory as ff\n >>> import numpy as np\n >>>\n >>> # Add histogram data\n >>> x1 = np.random.randn(200) - 2\n >>> x2 = np.random.randn(200)\n >>> x3 = np.random.randn(200) + 2\n >>>\n >>> # Group data together\n >>> hist_data = [x1, x2, x3]\n >>>\n >>> group_labels = ['Group 1', 'Group 2', 'Group 3']\n >>>\n >>> # Create distplot with custom bin_size\n >>> fig = ff.create_distplot(\n ... hist_data, group_labels, bin_size=[.1, .25, .5])\n >>>\n >>> # Plot!\n >>> st.plotly_chart(fig, use_container_width=True)\n\n .. output::\n https://static.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ\n height: 400px\n\n \"\"\"\n # NOTE: \"figure_or_data\" is the name used in Plotly's .plot() method\n # for their main parameter. I don't like the name, but it's best to\n # keep it in sync with what Plotly calls it.\n import streamlit.elements.plotly_chart as plotly_chart\n\n plotly_chart_proto = PlotlyChartProto()\n marshall(\n plotly_chart_proto, figure_or_data, use_container_width, sharing, **kwargs\n )\n return dg._enqueue(\"plotly_chart\", plotly_chart_proto) # type: ignore\n\n\ndef marshall(proto, figure_or_data, use_container_width, sharing, **kwargs):\n \"\"\"Marshall a proto with a Plotly spec.\n\n See DeltaGenerator.plotly_chart for docs.\n \"\"\"\n # NOTE: \"figure_or_data\" is the name used in Plotly's .plot() method\n # for their main parameter. I don't like the name, but its best to keep\n # it in sync with what Plotly calls it.\n\n import plotly.tools\n\n if type_util.is_type(figure_or_data, \"matplotlib.figure.Figure\"):\n figure = plotly.tools.mpl_to_plotly(figure_or_data)\n\n else:\n figure = plotly.tools.return_figure_from_figure_or_data(\n figure_or_data, validate_figure=True\n )\n\n if not isinstance(sharing, str) or sharing.lower() not in SHARING_MODES:\n raise ValueError(\"Invalid sharing mode for Plotly chart: %s\" % sharing)\n\n proto.use_container_width = use_container_width\n\n if sharing == \"streamlit\":\n import plotly.utils\n\n config = dict(kwargs.get(\"config\", {}))\n # Copy over some kwargs to config dict. Plotly does the same in plot().\n config.setdefault(\"showLink\", kwargs.get(\"show_link\", False))\n config.setdefault(\"linkText\", kwargs.get(\"link_text\", False))\n\n proto.figure.spec = json.dumps(figure, cls=plotly.utils.PlotlyJSONEncoder)\n proto.figure.config = json.dumps(config)\n\n else:\n url = _plot_to_url_or_load_cached_url(\n figure, sharing=sharing, auto_open=False, **kwargs\n )\n proto.url = _get_embed_url(url)\n\n\[email protected]\ndef _plot_to_url_or_load_cached_url(*args, **kwargs):\n \"\"\"Call plotly.plot wrapped in st.cache.\n\n This is so we don't unecessarily upload data to Plotly's SASS if nothing\n changed since the previous upload.\n \"\"\"\n try:\n # Plotly 4 changed its main package.\n import chart_studio.plotly as ply\n except ImportError:\n import plotly.plotly as ply\n\n return ply.plot(*args, **kwargs)\n\n\ndef _get_embed_url(url):\n parsed_url = urllib.parse.urlparse(url)\n\n # Plotly's embed URL is the normal URL plus \".embed\".\n # (Note that our use namedtuple._replace is fine because that's not a\n # private method! It just has an underscore to avoid clashing with the\n # tuple field names)\n parsed_embed_url = parsed_url._replace(path=parsed_url.path + \".embed\")\n\n return urllib.parse.urlunparse(parsed_embed_url)\n", "path": "lib/streamlit/elements/plotly_chart.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit support for Plotly charts.\"\"\"\n\nimport json\nimport urllib.parse\n\nfrom streamlit import caching\nfrom streamlit import type_util\n\nfrom streamlit.logger import get_logger\nfrom streamlit.proto.PlotlyChart_pb2 import PlotlyChart as PlotlyChartProto\n\nLOGGER = get_logger(__name__)\n\nSHARING_MODES = set(\n [\n # This means the plot will be sent to the Streamlit app rather than to\n # Plotly.\n \"streamlit\",\n # The three modes below are for plots that should be hosted in Plotly.\n # These are the names Plotly uses for them.\n \"private\",\n \"public\",\n \"secret\",\n ]\n)\n\n\nclass PlotlyMixin:\n def plotly_chart(\n dg,\n figure_or_data,\n use_container_width=False,\n sharing=\"streamlit\",\n **kwargs,\n ):\n \"\"\"Display an interactive Plotly chart.\n\n Plotly is a charting library for Python. The arguments to this function\n closely follow the ones for Plotly's `plot()` function. You can find\n more about Plotly at https://plot.ly/python.\n\n Parameters\n ----------\n figure_or_data : plotly.graph_objs.Figure, plotly.graph_objs.Data,\n dict/list of plotly.graph_objs.Figure/Data\n\n See https://plot.ly/python/ for examples of graph descriptions.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the figure's native `width` value.\n\n sharing : {'streamlit', 'private', 'secret', 'public'}\n Use 'streamlit' to insert the plot and all its dependencies\n directly in the Streamlit app using plotly's offline mode (default).\n Use any other sharing mode to send the chart to Plotly chart studio, which\n requires an account. See https://plotly.com/chart-studio/ for more information.\n\n **kwargs\n Any argument accepted by Plotly's `plot()` function.\n\n\n To show Plotly charts in Streamlit, call `st.plotly_chart`\n wherever you would call Plotly's `py.plot` or `py.iplot`.\n\n Example\n -------\n\n The example below comes straight from the examples at\n https://plot.ly/python:\n\n >>> import streamlit as st\n >>> import plotly.figure_factory as ff\n >>> import numpy as np\n >>>\n >>> # Add histogram data\n >>> x1 = np.random.randn(200) - 2\n >>> x2 = np.random.randn(200)\n >>> x3 = np.random.randn(200) + 2\n >>>\n >>> # Group data together\n >>> hist_data = [x1, x2, x3]\n >>>\n >>> group_labels = ['Group 1', 'Group 2', 'Group 3']\n >>>\n >>> # Create distplot with custom bin_size\n >>> fig = ff.create_distplot(\n ... hist_data, group_labels, bin_size=[.1, .25, .5])\n >>>\n >>> # Plot!\n >>> st.plotly_chart(fig, use_container_width=True)\n\n .. output::\n https://static.streamlit.io/0.56.0-xTAd/index.html?id=TuP96xX8JnsoQeUGAPjkGQ\n height: 400px\n\n \"\"\"\n # NOTE: \"figure_or_data\" is the name used in Plotly's .plot() method\n # for their main parameter. I don't like the name, but it's best to\n # keep it in sync with what Plotly calls it.\n import streamlit.elements.plotly_chart as plotly_chart\n\n plotly_chart_proto = PlotlyChartProto()\n marshall(\n plotly_chart_proto, figure_or_data, use_container_width, sharing, **kwargs\n )\n return dg._enqueue(\"plotly_chart\", plotly_chart_proto) # type: ignore\n\n\ndef marshall(proto, figure_or_data, use_container_width, sharing, **kwargs):\n \"\"\"Marshall a proto with a Plotly spec.\n\n See DeltaGenerator.plotly_chart for docs.\n \"\"\"\n # NOTE: \"figure_or_data\" is the name used in Plotly's .plot() method\n # for their main parameter. I don't like the name, but its best to keep\n # it in sync with what Plotly calls it.\n\n import plotly.tools\n\n if type_util.is_type(figure_or_data, \"matplotlib.figure.Figure\"):\n figure = plotly.tools.mpl_to_plotly(figure_or_data)\n\n else:\n figure = plotly.tools.return_figure_from_figure_or_data(\n figure_or_data, validate_figure=True\n )\n\n if not isinstance(sharing, str) or sharing.lower() not in SHARING_MODES:\n raise ValueError(\"Invalid sharing mode for Plotly chart: %s\" % sharing)\n\n proto.use_container_width = use_container_width\n\n if sharing == \"streamlit\":\n import plotly.utils\n\n config = dict(kwargs.get(\"config\", {}))\n # Copy over some kwargs to config dict. Plotly does the same in plot().\n config.setdefault(\"showLink\", kwargs.get(\"show_link\", False))\n config.setdefault(\"linkText\", kwargs.get(\"link_text\", False))\n\n proto.figure.spec = json.dumps(figure, cls=plotly.utils.PlotlyJSONEncoder)\n proto.figure.config = json.dumps(config)\n\n else:\n url = _plot_to_url_or_load_cached_url(\n figure, sharing=sharing, auto_open=False, **kwargs\n )\n proto.url = _get_embed_url(url)\n\n\[email protected]\ndef _plot_to_url_or_load_cached_url(*args, **kwargs):\n \"\"\"Call plotly.plot wrapped in st.cache.\n\n This is so we don't unecessarily upload data to Plotly's SASS if nothing\n changed since the previous upload.\n \"\"\"\n try:\n # Plotly 4 changed its main package.\n import chart_studio.plotly as ply\n except ImportError:\n import plotly.plotly as ply\n\n return ply.plot(*args, **kwargs)\n\n\ndef _get_embed_url(url):\n parsed_url = urllib.parse.urlparse(url)\n\n # Plotly's embed URL is the normal URL plus \".embed\".\n # (Note that our use namedtuple._replace is fine because that's not a\n # private method! It just has an underscore to avoid clashing with the\n # tuple field names)\n parsed_embed_url = parsed_url._replace(path=parsed_url.path + \".embed\")\n\n return urllib.parse.urlunparse(parsed_embed_url)\n", "path": "lib/streamlit/elements/plotly_chart.py"}]}
| 2,633 | 339 |
gh_patches_debug_35642
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-1776
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mail relaying (Relayed domain) does't work when no Remote Host is configured.
Description:
When in the Web Administration Interface in Relayed domain, a domain is added without a Remote Host, emails for this domain fail to be relayed. When no Remote Host is configured, Mailu is supposed to relay the email message directly to the relayed domain.
In case it is relevant I use the docker-compose deployment.
Replication steps:
1). Deploy a standard Mailu installation (no special configuration required).
2). In the Web Administration interface, on Relayed domains page add a new relayed domain with
Relayed domain name: test.mailu.io
Remote Host: (blank, keep it empty).
3). Configure your email client SMTP settings to use your Mailu server on port 25 without any authentication.
4). Send an email to [email protected]. If you receive the message, server is busy try again. Then Rspamd has greylisted you. Try sending the email again in 10 to 15 minutes. It should be accepted now.
5). Check the postfix logs (docker logs --tail 100 mailu_smtp_1 )
Aug 13 22:47:32 mail postfix/smtpd[270]: A1C493E2CE1: client=<redacted>.nl[xx.xxx.xx.xxx]
Aug 13 22:47:32 mail postfix/cleanup[315]: A1C493E2CE1: message-id=<[email protected]>
Aug 13 22:47:33 mail postfix/qmgr[177]: A1C493E2CE1: from=<[email protected]>, size=2765, nrcpt=1 (queue active)
Aug 13 22:47:33 mail postfix/smtpd[270]: disconnect from <redacted>.nl[xx.xxx.xx.xxx] ehlo=2 xclient=0/1 mail=1 rcpt=1 data=1 quit=1 commands=6/7
**Aug 13 22:47:33 mail postfix/smtp[316]: fatal: valid hostname or network address required in server description: []
Aug 13 22:47:34 mail postfix/qmgr[177]: warning: private/smtp socket: malformed response**
Aug 13 22:47:34 mail postfix/qmgr[177]: warning: transport smtp failure -- see a previous warning/fatal/panic logfile record for the problem description
Aug 13 22:47:34 mail postfix/master[175]: warning: process /usr/libexec/postfix/smtp pid 316 exit status 1
Aug 13 22:47:34 mail postfix/master[175]: warning: /usr/libexec/postfix/smtp: bad command startup -- throttling
Aug 13 22:47:34 mail postfix/error[318]: A1C493E2CE1: to=<[email protected]>, relay=none, delay=1.6, delays=0.59/1/0/0.01, dsn=4.3.0, status=deferred (unknown mail transport error)
Workaround:
A workaround is to enter the hostname or IP address of the SMTP server of the relayed domain name as Remote Host. No MX lookup is performed for a hostname entered as Remote Host. For a public domain, you could use the mxtoolbox to lookup the SMTP server. E.g. in my example for test.mailu.io https://mxtoolbox.com/SuperTool.aspx?action=mx%3atest.mailu.io&run=toolpage .
Entering 78.47.92.244 or test.mailu.io as Remote Host will work as a workaround for the above example.
Can relay domains support relaying directly to a LMTP port?
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [ ] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
We're using the `master` docker tag.
## Description
When using relay domains in the web interface, the implementation assumes the use of SMTP. However, I am trying to relay directly using another protocol, namely, LMTP (the receiver is a mailman3 instance).
When I see the remote host of my relay domain to `<host>:8024`, I see `refused to talk to me: 500 Error: command "HELO" not recognized)`. The Mailu postfix relay configuration assumes another postfix on the receiving end.
In my postfix configuration, I see it is possible to support a map such as `lmtp:/<ip>:<port>` and then postfix understands not to start with a `EHLO` command and pass the mail using LMTP (as far as I understand).
So, would it be possible to support LMTP relaying using relay domains?
## Replication Steps
- Setup any relay domain
- Specify a receiving port of 8024
- Setup a receiver on port 8024 expecting LMTP-based requests
## Expected behaviour
Relay domains can support LMTP.
## Logs
```
mail_example_com_smtp.1.zh0tmwq3qnjn@autonomic-swarm | May 23 09:21:25 mail postfix/smtp[378]: 63D4536B052: to=<[email protected]>, relay=lists_example_com_core[10.0.44.41]:8024, delay=1.4, delays=1.3/0.08/0.01/0, dsn=5.0.0, status=bounced (host lists_example_com_core[10.0.52.41] refused to talk to me: 500 Error: command "HELO" not recognized)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/internal/views/postfix.py`
Content:
```
1 from mailu import models
2 from mailu.internal import internal
3
4 import flask
5 import re
6 import srslib
7
8
9 @internal.route("/postfix/domain/<domain_name>")
10 def postfix_mailbox_domain(domain_name):
11 if re.match("^\[.*\]$", domain_name):
12 return flask.abort(404)
13 domain = models.Domain.query.get(domain_name) or \
14 models.Alternative.query.get(domain_name) or \
15 flask.abort(404)
16 return flask.jsonify(domain.name)
17
18
19 @internal.route("/postfix/mailbox/<path:email>")
20 def postfix_mailbox_map(email):
21 user = models.User.query.get(email) or flask.abort(404)
22 return flask.jsonify(user.email)
23
24
25 @internal.route("/postfix/alias/<path:alias>")
26 def postfix_alias_map(alias):
27 localpart, domain_name = models.Email.resolve_domain(alias)
28 if localpart is None:
29 return flask.jsonify(domain_name)
30 destination = models.Email.resolve_destination(localpart, domain_name)
31 return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
32
33
34 @internal.route("/postfix/transport/<path:email>")
35 def postfix_transport(email):
36 if email == '*' or re.match("(^|.*@)\[.*\]$", email):
37 return flask.abort(404)
38 localpart, domain_name = models.Email.resolve_domain(email)
39 relay = models.Relay.query.get(domain_name) or flask.abort(404)
40 ret = "smtp:[{0}]".format(relay.smtp)
41 if ":" in relay.smtp:
42 split = relay.smtp.split(':')
43 ret = "smtp:[{0}]:{1}".format(split[0], split[1])
44 return flask.jsonify(ret)
45
46
47 @internal.route("/postfix/recipient/map/<path:recipient>")
48 def postfix_recipient_map(recipient):
49 """ Rewrite the envelope recipient if it is a valid SRS address.
50
51 This is meant for bounces to go back to the original sender.
52 """
53 srs = srslib.SRS(flask.current_app.config["SECRET_KEY"])
54 if srslib.SRS.is_srs_address(recipient):
55 try:
56 return flask.jsonify(srs.reverse(recipient))
57 except srslib.Error as error:
58 return flask.abort(404)
59 return flask.abort(404)
60
61
62 @internal.route("/postfix/sender/map/<path:sender>")
63 def postfix_sender_map(sender):
64 """ Rewrite the envelope sender in case the mail was not emitted by us.
65
66 This is for bounces to come back the reverse path properly.
67 """
68 srs = srslib.SRS(flask.current_app.config["SECRET_KEY"])
69 domain = flask.current_app.config["DOMAIN"]
70 try:
71 localpart, domain_name = models.Email.resolve_domain(sender)
72 except Exception as error:
73 return flask.abort(404)
74 if models.Domain.query.get(domain_name):
75 return flask.abort(404)
76 return flask.jsonify(srs.forward(sender, domain))
77
78
79 @internal.route("/postfix/sender/login/<path:sender>")
80 def postfix_sender_login(sender):
81 localpart, domain_name = models.Email.resolve_domain(sender)
82 if localpart is None:
83 return flask.abort(404)
84 destination = models.Email.resolve_destination(localpart, domain_name, True)
85 return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
86
87
88 @internal.route("/postfix/sender/access/<path:sender>")
89 def postfix_sender_access(sender):
90 """ Simply reject any sender that pretends to be from a local domain
91 """
92 if not is_void_address(sender):
93 localpart, domain_name = models.Email.resolve_domain(sender)
94 return flask.jsonify("REJECT") if models.Domain.query.get(domain_name) else flask.abort(404)
95 else:
96 return flask.abort(404)
97
98
99 def is_void_address(email):
100 '''True if the email is void (null) email address.
101 '''
102 if email.startswith('<') and email.endswith('>'):
103 email = email[1:-1]
104 # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's
105 # consider void any such thing.
106 return '@' not in email
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py
--- a/core/admin/mailu/internal/views/postfix.py
+++ b/core/admin/mailu/internal/views/postfix.py
@@ -2,6 +2,7 @@
from mailu.internal import internal
import flask
+import idna
import re
import srslib
@@ -35,13 +36,67 @@
def postfix_transport(email):
if email == '*' or re.match("(^|.*@)\[.*\]$", email):
return flask.abort(404)
- localpart, domain_name = models.Email.resolve_domain(email)
+ _, domain_name = models.Email.resolve_domain(email)
relay = models.Relay.query.get(domain_name) or flask.abort(404)
- ret = "smtp:[{0}]".format(relay.smtp)
- if ":" in relay.smtp:
- split = relay.smtp.split(':')
- ret = "smtp:[{0}]:{1}".format(split[0], split[1])
- return flask.jsonify(ret)
+ target = relay.smtp.lower()
+ port = None
+ use_lmtp = False
+ use_mx = False
+ # strip prefixes mx: and lmtp:
+ if target.startswith('mx:'):
+ target = target[3:]
+ use_mx = True
+ elif target.startswith('lmtp:'):
+ target = target[5:]
+ use_lmtp = True
+ # split host:port or [host]:port
+ if target.startswith('['):
+ if use_mx or ']' not in target:
+ # invalid target (mx: and [] or missing ])
+ flask.abort(400)
+ host, rest = target[1:].split(']', 1)
+ if rest.startswith(':'):
+ port = rest[1:]
+ elif rest:
+ # invalid target (rest should be :port)
+ flask.abort(400)
+ else:
+ if ':' in target:
+ host, port = target.rsplit(':', 1)
+ else:
+ host = target
+ # default for empty host part is mx:domain
+ if not host:
+ if not use_lmtp:
+ host = relay.name.lower()
+ use_mx = True
+ else:
+ # lmtp: needs a host part
+ flask.abort(400)
+ # detect ipv6 address or encode host
+ if ':' in host:
+ host = f'ipv6:{host}'
+ else:
+ try:
+ host = idna.encode(host).decode('ascii')
+ except idna.IDNAError:
+ # invalid host (fqdn not encodable)
+ flask.abort(400)
+ # validate port
+ if port is not None:
+ try:
+ port = int(port, 10)
+ except ValueError:
+ # invalid port (should be numeric)
+ flask.abort(400)
+ # create transport
+ transport = 'lmtp' if use_lmtp else 'smtp'
+ # use [] when not using MX lookups or host is an ipv6 address
+ if host.startswith('ipv6:') or (not use_lmtp and not use_mx):
+ host = f'[{host}]'
+ # create port suffix
+ port = '' if port is None else f':{port}'
+ return flask.jsonify(f'{transport}:{host}{port}')
@internal.route("/postfix/recipient/map/<path:recipient>")
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py\n--- a/core/admin/mailu/internal/views/postfix.py\n+++ b/core/admin/mailu/internal/views/postfix.py\n@@ -2,6 +2,7 @@\n from mailu.internal import internal\n \n import flask\n+import idna\n import re\n import srslib\n \n@@ -35,13 +36,67 @@\n def postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n- localpart, domain_name = models.Email.resolve_domain(email)\n+ _, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n- ret = \"smtp:[{0}]\".format(relay.smtp)\n- if \":\" in relay.smtp:\n- split = relay.smtp.split(':')\n- ret = \"smtp:[{0}]:{1}\".format(split[0], split[1])\n- return flask.jsonify(ret)\n+ target = relay.smtp.lower()\n+ port = None\n+ use_lmtp = False\n+ use_mx = False\n+ # strip prefixes mx: and lmtp:\n+ if target.startswith('mx:'):\n+ target = target[3:]\n+ use_mx = True\n+ elif target.startswith('lmtp:'):\n+ target = target[5:]\n+ use_lmtp = True\n+ # split host:port or [host]:port\n+ if target.startswith('['):\n+ if use_mx or ']' not in target:\n+ # invalid target (mx: and [] or missing ])\n+ flask.abort(400)\n+ host, rest = target[1:].split(']', 1)\n+ if rest.startswith(':'):\n+ port = rest[1:]\n+ elif rest:\n+ # invalid target (rest should be :port)\n+ flask.abort(400)\n+ else:\n+ if ':' in target:\n+ host, port = target.rsplit(':', 1)\n+ else:\n+ host = target\n+ # default for empty host part is mx:domain\n+ if not host:\n+ if not use_lmtp:\n+ host = relay.name.lower()\n+ use_mx = True\n+ else:\n+ # lmtp: needs a host part\n+ flask.abort(400)\n+ # detect ipv6 address or encode host\n+ if ':' in host:\n+ host = f'ipv6:{host}'\n+ else:\n+ try:\n+ host = idna.encode(host).decode('ascii')\n+ except idna.IDNAError:\n+ # invalid host (fqdn not encodable)\n+ flask.abort(400)\n+ # validate port\n+ if port is not None:\n+ try:\n+ port = int(port, 10)\n+ except ValueError:\n+ # invalid port (should be numeric)\n+ flask.abort(400)\n+ # create transport\n+ transport = 'lmtp' if use_lmtp else 'smtp'\n+ # use [] when not using MX lookups or host is an ipv6 address\n+ if host.startswith('ipv6:') or (not use_lmtp and not use_mx):\n+ host = f'[{host}]'\n+ # create port suffix\n+ port = '' if port is None else f':{port}'\n+ return flask.jsonify(f'{transport}:{host}{port}')\n \n \n @internal.route(\"/postfix/recipient/map/<path:recipient>\")\n", "issue": "Mail relaying (Relayed domain) does't work when no Remote Host is configured.\nDescription: \r\nWhen in the Web Administration Interface in Relayed domain, a domain is added without a Remote Host, emails for this domain fail to be relayed. When no Remote Host is configured, Mailu is supposed to relay the email message directly to the relayed domain.\r\n\r\nIn case it is relevant I use the docker-compose deployment.\r\n\r\nReplication steps:\r\n1). Deploy a standard Mailu installation (no special configuration required). \r\n2). In the Web Administration interface, on Relayed domains page add a new relayed domain with\r\nRelayed domain name: test.mailu.io\r\nRemote Host: (blank, keep it empty).\r\n3). Configure your email client SMTP settings to use your Mailu server on port 25 without any authentication.\r\n4). Send an email to [email protected]. If you receive the message, server is busy try again. Then Rspamd has greylisted you. Try sending the email again in 10 to 15 minutes. It should be accepted now.\r\n5). Check the postfix logs (docker logs --tail 100 mailu_smtp_1 )\r\n\r\nAug 13 22:47:32 mail postfix/smtpd[270]: A1C493E2CE1: client=<redacted>.nl[xx.xxx.xx.xxx]\r\nAug 13 22:47:32 mail postfix/cleanup[315]: A1C493E2CE1: message-id=<[email protected]>\r\nAug 13 22:47:33 mail postfix/qmgr[177]: A1C493E2CE1: from=<[email protected]>, size=2765, nrcpt=1 (queue active)\r\nAug 13 22:47:33 mail postfix/smtpd[270]: disconnect from <redacted>.nl[xx.xxx.xx.xxx] ehlo=2 xclient=0/1 mail=1 rcpt=1 data=1 quit=1 commands=6/7\r\n**Aug 13 22:47:33 mail postfix/smtp[316]: fatal: valid hostname or network address required in server description: []\r\nAug 13 22:47:34 mail postfix/qmgr[177]: warning: private/smtp socket: malformed response**\r\nAug 13 22:47:34 mail postfix/qmgr[177]: warning: transport smtp failure -- see a previous warning/fatal/panic logfile record for the problem description\r\nAug 13 22:47:34 mail postfix/master[175]: warning: process /usr/libexec/postfix/smtp pid 316 exit status 1\r\nAug 13 22:47:34 mail postfix/master[175]: warning: /usr/libexec/postfix/smtp: bad command startup -- throttling\r\nAug 13 22:47:34 mail postfix/error[318]: A1C493E2CE1: to=<[email protected]>, relay=none, delay=1.6, delays=0.59/1/0/0.01, dsn=4.3.0, status=deferred (unknown mail transport error)\r\n\r\nWorkaround:\r\nA workaround is to enter the hostname or IP address of the SMTP server of the relayed domain name as Remote Host. No MX lookup is performed for a hostname entered as Remote Host. For a public domain, you could use the mxtoolbox to lookup the SMTP server. E.g. in my example for test.mailu.io https://mxtoolbox.com/SuperTool.aspx?action=mx%3atest.mailu.io&run=toolpage .\r\n\r\nEntering 78.47.92.244 or test.mailu.io as Remote Host will work as a workaround for the above example.\nCan relay domains support relaying directly to a LMTP port?\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n\r\n### Environment\r\n - [ ] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\n\r\nWe're using the `master` docker tag.\r\n\r\n## Description\r\n\r\nWhen using relay domains in the web interface, the implementation assumes the use of SMTP. However, I am trying to relay directly using another protocol, namely, LMTP (the receiver is a mailman3 instance). \r\n\r\nWhen I see the remote host of my relay domain to `<host>:8024`, I see `refused to talk to me: 500 Error: command \"HELO\" not recognized)`. The Mailu postfix relay configuration assumes another postfix on the receiving end.\r\n\r\nIn my postfix configuration, I see it is possible to support a map such as `lmtp:/<ip>:<port>` and then postfix understands not to start with a `EHLO` command and pass the mail using LMTP (as far as I understand).\r\n\r\nSo, would it be possible to support LMTP relaying using relay domains?\r\n\r\n## Replication Steps\r\n\r\n- Setup any relay domain\r\n- Specify a receiving port of 8024\r\n- Setup a receiver on port 8024 expecting LMTP-based requests\r\n\r\n## Expected behaviour\r\n\r\nRelay domains can support LMTP.\r\n\r\n## Logs\r\n\r\n```\r\nmail_example_com_smtp.1.zh0tmwq3qnjn@autonomic-swarm | May 23 09:21:25 mail postfix/smtp[378]: 63D4536B052: to=<[email protected]>, relay=lists_example_com_core[10.0.44.41]:8024, delay=1.4, delays=1.3/0.08/0.01/0, dsn=5.0.0, status=bounced (host lists_example_com_core[10.0.52.41] refused to talk to me: 500 Error: command \"HELO\" not recognized)\r\n```\r\n\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\nimport srslib\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n ret = \"smtp:[{0}]\".format(relay.smtp)\n if \":\" in relay.smtp:\n split = relay.smtp.split(':')\n ret = \"smtp:[{0}]:{1}\".format(split[0], split[1])\n return flask.jsonify(ret)\n\n\[email protected](\"/postfix/recipient/map/<path:recipient>\")\ndef postfix_recipient_map(recipient):\n \"\"\" Rewrite the envelope recipient if it is a valid SRS address.\n\n This is meant for bounces to go back to the original sender.\n \"\"\"\n srs = srslib.SRS(flask.current_app.config[\"SECRET_KEY\"])\n if srslib.SRS.is_srs_address(recipient):\n try:\n return flask.jsonify(srs.reverse(recipient))\n except srslib.Error as error:\n return flask.abort(404)\n return flask.abort(404)\n\n\[email protected](\"/postfix/sender/map/<path:sender>\")\ndef postfix_sender_map(sender):\n \"\"\" Rewrite the envelope sender in case the mail was not emitted by us.\n\n This is for bounces to come back the reverse path properly.\n \"\"\"\n srs = srslib.SRS(flask.current_app.config[\"SECRET_KEY\"])\n domain = flask.current_app.config[\"DOMAIN\"]\n try:\n localpart, domain_name = models.Email.resolve_domain(sender)\n except Exception as error:\n return flask.abort(404)\n if models.Domain.query.get(domain_name):\n return flask.abort(404)\n return flask.jsonify(srs.forward(sender, domain))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport idna\nimport re\nimport srslib\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n _, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n target = relay.smtp.lower()\n port = None\n use_lmtp = False\n use_mx = False\n # strip prefixes mx: and lmtp:\n if target.startswith('mx:'):\n target = target[3:]\n use_mx = True\n elif target.startswith('lmtp:'):\n target = target[5:]\n use_lmtp = True\n # split host:port or [host]:port\n if target.startswith('['):\n if use_mx or ']' not in target:\n # invalid target (mx: and [] or missing ])\n flask.abort(400)\n host, rest = target[1:].split(']', 1)\n if rest.startswith(':'):\n port = rest[1:]\n elif rest:\n # invalid target (rest should be :port)\n flask.abort(400)\n else:\n if ':' in target:\n host, port = target.rsplit(':', 1)\n else:\n host = target\n # default for empty host part is mx:domain\n if not host:\n if not use_lmtp:\n host = relay.name.lower()\n use_mx = True\n else:\n # lmtp: needs a host part\n flask.abort(400)\n # detect ipv6 address or encode host\n if ':' in host:\n host = f'ipv6:{host}'\n else:\n try:\n host = idna.encode(host).decode('ascii')\n except idna.IDNAError:\n # invalid host (fqdn not encodable)\n flask.abort(400)\n # validate port\n if port is not None:\n try:\n port = int(port, 10)\n except ValueError:\n # invalid port (should be numeric)\n flask.abort(400)\n # create transport\n transport = 'lmtp' if use_lmtp else 'smtp'\n # use [] when not using MX lookups or host is an ipv6 address\n if host.startswith('ipv6:') or (not use_lmtp and not use_mx):\n host = f'[{host}]'\n # create port suffix\n port = '' if port is None else f':{port}'\n return flask.jsonify(f'{transport}:{host}{port}')\n\n\[email protected](\"/postfix/recipient/map/<path:recipient>\")\ndef postfix_recipient_map(recipient):\n \"\"\" Rewrite the envelope recipient if it is a valid SRS address.\n\n This is meant for bounces to go back to the original sender.\n \"\"\"\n srs = srslib.SRS(flask.current_app.config[\"SECRET_KEY\"])\n if srslib.SRS.is_srs_address(recipient):\n try:\n return flask.jsonify(srs.reverse(recipient))\n except srslib.Error as error:\n return flask.abort(404)\n return flask.abort(404)\n\n\[email protected](\"/postfix/sender/map/<path:sender>\")\ndef postfix_sender_map(sender):\n \"\"\" Rewrite the envelope sender in case the mail was not emitted by us.\n\n This is for bounces to come back the reverse path properly.\n \"\"\"\n srs = srslib.SRS(flask.current_app.config[\"SECRET_KEY\"])\n domain = flask.current_app.config[\"DOMAIN\"]\n try:\n localpart, domain_name = models.Email.resolve_domain(sender)\n except Exception as error:\n return flask.abort(404)\n if models.Domain.query.get(domain_name):\n return flask.abort(404)\n return flask.jsonify(srs.forward(sender, domain))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}]}
| 2,866 | 796 |
gh_patches_debug_20820
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1419
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sitemaps are not rebuilt
`touch pages/test-1.html
nikola build
mv pages/test-1.html pages/test-2.html
nikola build`
sitemap.xml is not rebuilt. Contains test-1.html (expected) but not test-2.html (woopsy). Output contains both (expected).
`nikola check --clean-files`
Removes test-1.html from output but also does not rebuild sitemap.xml.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/sitemap/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import print_function, absolute_import, unicode_literals
28 import io
29 import datetime
30 import os
31 try:
32 from urlparse import urljoin, urlparse
33 import robotparser as robotparser
34 except ImportError:
35 from urllib.parse import urljoin, urlparse # NOQA
36 import urllib.robotparser as robotparser # NOQA
37
38 from nikola.plugin_categories import LateTask
39 from nikola.utils import config_changed
40
41
42 urlset_header = """<?xml version="1.0" encoding="UTF-8"?>
43 <urlset
44 xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
45 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
46 xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
47 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
48 """
49
50 loc_format = """ <url>
51 <loc>{0}</loc>
52 <lastmod>{1}</lastmod>
53 </url>
54 """
55
56 urlset_footer = "</urlset>"
57
58 sitemapindex_header = """<?xml version="1.0" encoding="UTF-8"?>
59 <sitemapindex
60 xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
61 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
62 xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
63 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
64 """
65
66 sitemap_format = """ <sitemap>
67 <loc>{0}</loc>
68 <lastmod>{1}</lastmod>
69 </sitemap>
70 """
71
72 sitemapindex_footer = "</sitemapindex>"
73
74
75 def get_base_path(base):
76 """returns the path of a base URL if it contains one.
77
78 >>> get_base_path('http://some.site') == '/'
79 True
80 >>> get_base_path('http://some.site/') == '/'
81 True
82 >>> get_base_path('http://some.site/some/sub-path') == '/some/sub-path/'
83 True
84 >>> get_base_path('http://some.site/some/sub-path/') == '/some/sub-path/'
85 True
86 """
87 # first parse the base_url for some path
88 base_parsed = urlparse(base)
89
90 if not base_parsed.path:
91 sub_path = ''
92 else:
93 sub_path = base_parsed.path
94 if sub_path.endswith('/'):
95 return sub_path
96 else:
97 return sub_path + '/'
98
99
100 class Sitemap(LateTask):
101 """Generate a sitemap."""
102
103 name = "sitemap"
104
105 def gen_tasks(self):
106 """Generate a sitemap."""
107 kw = {
108 "base_url": self.site.config["BASE_URL"],
109 "site_url": self.site.config["SITE_URL"],
110 "output_folder": self.site.config["OUTPUT_FOLDER"],
111 "strip_indexes": self.site.config["STRIP_INDEXES"],
112 "index_file": self.site.config["INDEX_FILE"],
113 "sitemap_include_fileless_dirs": self.site.config["SITEMAP_INCLUDE_FILELESS_DIRS"],
114 "mapped_extensions": self.site.config.get('MAPPED_EXTENSIONS', ['.html', '.htm', '.xml', '.rss']),
115 "robots_exclusions": self.site.config["ROBOTS_EXCLUSIONS"]
116 }
117
118 output = kw['output_folder']
119 base_url = kw['base_url']
120 mapped_exts = kw['mapped_extensions']
121
122 output_path = kw['output_folder']
123 sitemapindex_path = os.path.join(output_path, "sitemapindex.xml")
124 sitemap_path = os.path.join(output_path, "sitemap.xml")
125 base_path = get_base_path(kw['base_url'])
126 sitemapindex = {}
127 urlset = {}
128
129 def scan_locs():
130 for root, dirs, files in os.walk(output, followlinks=True):
131 if not dirs and not files and not kw['sitemap_include_fileless_dirs']:
132 continue # Totally empty, not on sitemap
133 path = os.path.relpath(root, output)
134 # ignore the current directory.
135 path = (path.replace(os.sep, '/') + '/').replace('./', '')
136 lastmod = self.get_lastmod(root)
137 loc = urljoin(base_url, base_path + path)
138 if kw['index_file'] in files and kw['strip_indexes']: # ignore folders when not stripping urls
139 urlset[loc] = loc_format.format(loc, lastmod)
140 for fname in files:
141 if kw['strip_indexes'] and fname == kw['index_file']:
142 continue # We already mapped the folder
143 if os.path.splitext(fname)[-1] in mapped_exts:
144 real_path = os.path.join(root, fname)
145 path = os.path.relpath(real_path, output)
146 if path.endswith(kw['index_file']) and kw['strip_indexes']:
147 # ignore index files when stripping urls
148 continue
149 if not robot_fetch(path):
150 continue
151 if path.endswith('.html') or path.endswith('.htm'):
152 try:
153 if u'<!doctype html' not in io.open(real_path, 'r', encoding='utf8').read(1024).lower():
154 # ignores "html" files without doctype
155 # alexa-verify, google-site-verification, etc.
156 continue
157 except UnicodeDecodeError:
158 # ignore ancient files
159 # most non-utf8 files are worthless anyways
160 continue
161 """ put RSS in sitemapindex[] instead of in urlset[], sitemap_path is included after it is generated """
162 if path.endswith('.xml') or path.endswith('.rss'):
163 filehead = io.open(real_path, 'r', encoding='utf8').read(512)
164 if u'<rss' in filehead or (u'<urlset' in filehead and path != sitemap_path):
165 path = path.replace(os.sep, '/')
166 lastmod = self.get_lastmod(real_path)
167 loc = urljoin(base_url, base_path + path)
168 sitemapindex[loc] = sitemap_format.format(loc, lastmod)
169 continue
170 else:
171 continue # ignores all XML files except those presumed to be RSS
172 post = self.site.post_per_file.get(path)
173 if post and (post.is_draft or post.is_private or post.publish_later):
174 continue
175 path = path.replace(os.sep, '/')
176 lastmod = self.get_lastmod(real_path)
177 loc = urljoin(base_url, base_path + path)
178 urlset[loc] = loc_format.format(loc, lastmod)
179
180 def robot_fetch(path):
181 for rule in kw["robots_exclusions"]:
182 robot = robotparser.RobotFileParser()
183 robot.parse(["User-Agent: *", "Disallow: {0}".format(rule)])
184 if not robot.can_fetch("*", '/' + path):
185 return False # not robot food
186 return True
187
188 def write_sitemap():
189 # Have to rescan, because files may have been added between
190 # task dep scanning and task execution
191 with io.open(sitemap_path, 'w+', encoding='utf8') as outf:
192 outf.write(urlset_header)
193 for k in sorted(urlset.keys()):
194 outf.write(urlset[k])
195 outf.write(urlset_footer)
196 sitemap_url = urljoin(base_url, base_path + "sitemap.xml")
197 sitemapindex[sitemap_url] = sitemap_format.format(sitemap_url, self.get_lastmod(sitemap_path))
198
199 def write_sitemapindex():
200 with io.open(sitemapindex_path, 'w+', encoding='utf8') as outf:
201 outf.write(sitemapindex_header)
202 for k in sorted(sitemapindex.keys()):
203 outf.write(sitemapindex[k])
204 outf.write(sitemapindex_footer)
205
206 # Yield a task to calculate the dependencies of the sitemap
207 # Other tasks can depend on this output, instead of having
208 # to scan locations.
209 def scan_locs_task():
210 scan_locs()
211 return {'locations': list(urlset.keys()) + list(sitemapindex.keys())}
212
213 yield {
214 "basename": "_scan_locs",
215 "name": "sitemap",
216 "actions": [(scan_locs_task)]
217 }
218
219 yield self.group_task()
220 yield {
221 "basename": "sitemap",
222 "name": sitemap_path,
223 "targets": [sitemap_path],
224 "actions": [(write_sitemap,)],
225 "uptodate": [config_changed(kw)],
226 "clean": True,
227 "task_dep": ["render_site"],
228 "calc_dep": ["_scan_locs:sitemap"],
229 }
230 yield {
231 "basename": "sitemap",
232 "name": sitemapindex_path,
233 "targets": [sitemapindex_path],
234 "actions": [(write_sitemapindex,)],
235 "uptodate": [config_changed(kw)],
236 "clean": True,
237 "file_dep": [sitemap_path]
238 }
239
240 def get_lastmod(self, p):
241 if self.site.invariant:
242 return '2014-01-01'
243 else:
244 return datetime.datetime.fromtimestamp(os.stat(p).st_mtime).isoformat().split('T')[0]
245
246 if __name__ == '__main__':
247 import doctest
248 doctest.testmod()
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/task/sitemap/__init__.py b/nikola/plugins/task/sitemap/__init__.py
--- a/nikola/plugins/task/sitemap/__init__.py
+++ b/nikola/plugins/task/sitemap/__init__.py
@@ -208,7 +208,27 @@
# to scan locations.
def scan_locs_task():
scan_locs()
- return {'locations': list(urlset.keys()) + list(sitemapindex.keys())}
+
+ # Generate a list of file dependencies for the actual generation
+ # task, so rebuilds are triggered. (Issue #1032)
+ output = kw["output_folder"]
+ file_dep = []
+
+ for i in urlset.keys():
+ p = os.path.join(output, urlparse(i).path.lstrip('/'))
+ if not p.endswith('sitemap.xml') and not p.endswith('/'):
+ file_dep.append(p)
+ if p.endswith('/') and os.path.exists(p + 'index.html'):
+ file_dep.append(p + 'index.html')
+
+ for i in sitemapindex.keys():
+ p = os.path.join(output, urlparse(i).path.lstrip('/'))
+ if not p.endswith('sitemap.xml') and not p.endswith('/'):
+ file_dep.append(p)
+ if p.endswith('/') and os.path.exists(p + 'index.html'):
+ file_dep.append(p + 'index.html')
+
+ return {'file_dep': file_dep}
yield {
"basename": "_scan_locs",
|
{"golden_diff": "diff --git a/nikola/plugins/task/sitemap/__init__.py b/nikola/plugins/task/sitemap/__init__.py\n--- a/nikola/plugins/task/sitemap/__init__.py\n+++ b/nikola/plugins/task/sitemap/__init__.py\n@@ -208,7 +208,27 @@\n # to scan locations.\n def scan_locs_task():\n scan_locs()\n- return {'locations': list(urlset.keys()) + list(sitemapindex.keys())}\n+\n+ # Generate a list of file dependencies for the actual generation\n+ # task, so rebuilds are triggered. (Issue #1032)\n+ output = kw[\"output_folder\"]\n+ file_dep = []\n+\n+ for i in urlset.keys():\n+ p = os.path.join(output, urlparse(i).path.lstrip('/'))\n+ if not p.endswith('sitemap.xml') and not p.endswith('/'):\n+ file_dep.append(p)\n+ if p.endswith('/') and os.path.exists(p + 'index.html'):\n+ file_dep.append(p + 'index.html')\n+\n+ for i in sitemapindex.keys():\n+ p = os.path.join(output, urlparse(i).path.lstrip('/'))\n+ if not p.endswith('sitemap.xml') and not p.endswith('/'):\n+ file_dep.append(p)\n+ if p.endswith('/') and os.path.exists(p + 'index.html'):\n+ file_dep.append(p + 'index.html')\n+\n+ return {'file_dep': file_dep}\n \n yield {\n \"basename\": \"_scan_locs\",\n", "issue": "Sitemaps are not rebuilt\n`touch pages/test-1.html\nnikola build\nmv pages/test-1.html pages/test-2.html\nnikola build`\n\nsitemap.xml is not rebuilt. Contains test-1.html (expected) but not test-2.html (woopsy). Output contains both (expected).\n\n`nikola check --clean-files`\n\nRemoves test-1.html from output but also does not rebuild sitemap.xml.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, absolute_import, unicode_literals\nimport io\nimport datetime\nimport os\ntry:\n from urlparse import urljoin, urlparse\n import robotparser as robotparser\nexcept ImportError:\n from urllib.parse import urljoin, urlparse # NOQA\n import urllib.robotparser as robotparser # NOQA\n\nfrom nikola.plugin_categories import LateTask\nfrom nikola.utils import config_changed\n\n\nurlset_header = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<urlset\n xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9\n http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\">\n\"\"\"\n\nloc_format = \"\"\" <url>\n <loc>{0}</loc>\n <lastmod>{1}</lastmod>\n </url>\n\"\"\"\n\nurlset_footer = \"</urlset>\"\n\nsitemapindex_header = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sitemapindex\n xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9\n http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\">\n\"\"\"\n\nsitemap_format = \"\"\" <sitemap>\n <loc>{0}</loc>\n <lastmod>{1}</lastmod>\n </sitemap>\n\"\"\"\n\nsitemapindex_footer = \"</sitemapindex>\"\n\n\ndef get_base_path(base):\n \"\"\"returns the path of a base URL if it contains one.\n\n >>> get_base_path('http://some.site') == '/'\n True\n >>> get_base_path('http://some.site/') == '/'\n True\n >>> get_base_path('http://some.site/some/sub-path') == '/some/sub-path/'\n True\n >>> get_base_path('http://some.site/some/sub-path/') == '/some/sub-path/'\n True\n \"\"\"\n # first parse the base_url for some path\n base_parsed = urlparse(base)\n\n if not base_parsed.path:\n sub_path = ''\n else:\n sub_path = base_parsed.path\n if sub_path.endswith('/'):\n return sub_path\n else:\n return sub_path + '/'\n\n\nclass Sitemap(LateTask):\n \"\"\"Generate a sitemap.\"\"\"\n\n name = \"sitemap\"\n\n def gen_tasks(self):\n \"\"\"Generate a sitemap.\"\"\"\n kw = {\n \"base_url\": self.site.config[\"BASE_URL\"],\n \"site_url\": self.site.config[\"SITE_URL\"],\n \"output_folder\": self.site.config[\"OUTPUT_FOLDER\"],\n \"strip_indexes\": self.site.config[\"STRIP_INDEXES\"],\n \"index_file\": self.site.config[\"INDEX_FILE\"],\n \"sitemap_include_fileless_dirs\": self.site.config[\"SITEMAP_INCLUDE_FILELESS_DIRS\"],\n \"mapped_extensions\": self.site.config.get('MAPPED_EXTENSIONS', ['.html', '.htm', '.xml', '.rss']),\n \"robots_exclusions\": self.site.config[\"ROBOTS_EXCLUSIONS\"]\n }\n\n output = kw['output_folder']\n base_url = kw['base_url']\n mapped_exts = kw['mapped_extensions']\n\n output_path = kw['output_folder']\n sitemapindex_path = os.path.join(output_path, \"sitemapindex.xml\")\n sitemap_path = os.path.join(output_path, \"sitemap.xml\")\n base_path = get_base_path(kw['base_url'])\n sitemapindex = {}\n urlset = {}\n\n def scan_locs():\n for root, dirs, files in os.walk(output, followlinks=True):\n if not dirs and not files and not kw['sitemap_include_fileless_dirs']:\n continue # Totally empty, not on sitemap\n path = os.path.relpath(root, output)\n # ignore the current directory.\n path = (path.replace(os.sep, '/') + '/').replace('./', '')\n lastmod = self.get_lastmod(root)\n loc = urljoin(base_url, base_path + path)\n if kw['index_file'] in files and kw['strip_indexes']: # ignore folders when not stripping urls\n urlset[loc] = loc_format.format(loc, lastmod)\n for fname in files:\n if kw['strip_indexes'] and fname == kw['index_file']:\n continue # We already mapped the folder\n if os.path.splitext(fname)[-1] in mapped_exts:\n real_path = os.path.join(root, fname)\n path = os.path.relpath(real_path, output)\n if path.endswith(kw['index_file']) and kw['strip_indexes']:\n # ignore index files when stripping urls\n continue\n if not robot_fetch(path):\n continue\n if path.endswith('.html') or path.endswith('.htm'):\n try:\n if u'<!doctype html' not in io.open(real_path, 'r', encoding='utf8').read(1024).lower():\n # ignores \"html\" files without doctype\n # alexa-verify, google-site-verification, etc.\n continue\n except UnicodeDecodeError:\n # ignore ancient files\n # most non-utf8 files are worthless anyways\n continue\n \"\"\" put RSS in sitemapindex[] instead of in urlset[], sitemap_path is included after it is generated \"\"\"\n if path.endswith('.xml') or path.endswith('.rss'):\n filehead = io.open(real_path, 'r', encoding='utf8').read(512)\n if u'<rss' in filehead or (u'<urlset' in filehead and path != sitemap_path):\n path = path.replace(os.sep, '/')\n lastmod = self.get_lastmod(real_path)\n loc = urljoin(base_url, base_path + path)\n sitemapindex[loc] = sitemap_format.format(loc, lastmod)\n continue\n else:\n continue # ignores all XML files except those presumed to be RSS\n post = self.site.post_per_file.get(path)\n if post and (post.is_draft or post.is_private or post.publish_later):\n continue\n path = path.replace(os.sep, '/')\n lastmod = self.get_lastmod(real_path)\n loc = urljoin(base_url, base_path + path)\n urlset[loc] = loc_format.format(loc, lastmod)\n\n def robot_fetch(path):\n for rule in kw[\"robots_exclusions\"]:\n robot = robotparser.RobotFileParser()\n robot.parse([\"User-Agent: *\", \"Disallow: {0}\".format(rule)])\n if not robot.can_fetch(\"*\", '/' + path):\n return False # not robot food\n return True\n\n def write_sitemap():\n # Have to rescan, because files may have been added between\n # task dep scanning and task execution\n with io.open(sitemap_path, 'w+', encoding='utf8') as outf:\n outf.write(urlset_header)\n for k in sorted(urlset.keys()):\n outf.write(urlset[k])\n outf.write(urlset_footer)\n sitemap_url = urljoin(base_url, base_path + \"sitemap.xml\")\n sitemapindex[sitemap_url] = sitemap_format.format(sitemap_url, self.get_lastmod(sitemap_path))\n\n def write_sitemapindex():\n with io.open(sitemapindex_path, 'w+', encoding='utf8') as outf:\n outf.write(sitemapindex_header)\n for k in sorted(sitemapindex.keys()):\n outf.write(sitemapindex[k])\n outf.write(sitemapindex_footer)\n\n # Yield a task to calculate the dependencies of the sitemap\n # Other tasks can depend on this output, instead of having\n # to scan locations.\n def scan_locs_task():\n scan_locs()\n return {'locations': list(urlset.keys()) + list(sitemapindex.keys())}\n\n yield {\n \"basename\": \"_scan_locs\",\n \"name\": \"sitemap\",\n \"actions\": [(scan_locs_task)]\n }\n\n yield self.group_task()\n yield {\n \"basename\": \"sitemap\",\n \"name\": sitemap_path,\n \"targets\": [sitemap_path],\n \"actions\": [(write_sitemap,)],\n \"uptodate\": [config_changed(kw)],\n \"clean\": True,\n \"task_dep\": [\"render_site\"],\n \"calc_dep\": [\"_scan_locs:sitemap\"],\n }\n yield {\n \"basename\": \"sitemap\",\n \"name\": sitemapindex_path,\n \"targets\": [sitemapindex_path],\n \"actions\": [(write_sitemapindex,)],\n \"uptodate\": [config_changed(kw)],\n \"clean\": True,\n \"file_dep\": [sitemap_path]\n }\n\n def get_lastmod(self, p):\n if self.site.invariant:\n return '2014-01-01'\n else:\n return datetime.datetime.fromtimestamp(os.stat(p).st_mtime).isoformat().split('T')[0]\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n", "path": "nikola/plugins/task/sitemap/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, absolute_import, unicode_literals\nimport io\nimport datetime\nimport os\ntry:\n from urlparse import urljoin, urlparse\n import robotparser as robotparser\nexcept ImportError:\n from urllib.parse import urljoin, urlparse # NOQA\n import urllib.robotparser as robotparser # NOQA\n\nfrom nikola.plugin_categories import LateTask\nfrom nikola.utils import config_changed\n\n\nurlset_header = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<urlset\n xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9\n http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\">\n\"\"\"\n\nloc_format = \"\"\" <url>\n <loc>{0}</loc>\n <lastmod>{1}</lastmod>\n </url>\n\"\"\"\n\nurlset_footer = \"</urlset>\"\n\nsitemapindex_header = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sitemapindex\n xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.sitemaps.org/schemas/sitemap/0.9\n http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd\">\n\"\"\"\n\nsitemap_format = \"\"\" <sitemap>\n <loc>{0}</loc>\n <lastmod>{1}</lastmod>\n </sitemap>\n\"\"\"\n\nsitemapindex_footer = \"</sitemapindex>\"\n\n\ndef get_base_path(base):\n \"\"\"returns the path of a base URL if it contains one.\n\n >>> get_base_path('http://some.site') == '/'\n True\n >>> get_base_path('http://some.site/') == '/'\n True\n >>> get_base_path('http://some.site/some/sub-path') == '/some/sub-path/'\n True\n >>> get_base_path('http://some.site/some/sub-path/') == '/some/sub-path/'\n True\n \"\"\"\n # first parse the base_url for some path\n base_parsed = urlparse(base)\n\n if not base_parsed.path:\n sub_path = ''\n else:\n sub_path = base_parsed.path\n if sub_path.endswith('/'):\n return sub_path\n else:\n return sub_path + '/'\n\n\nclass Sitemap(LateTask):\n \"\"\"Generate a sitemap.\"\"\"\n\n name = \"sitemap\"\n\n def gen_tasks(self):\n \"\"\"Generate a sitemap.\"\"\"\n kw = {\n \"base_url\": self.site.config[\"BASE_URL\"],\n \"site_url\": self.site.config[\"SITE_URL\"],\n \"output_folder\": self.site.config[\"OUTPUT_FOLDER\"],\n \"strip_indexes\": self.site.config[\"STRIP_INDEXES\"],\n \"index_file\": self.site.config[\"INDEX_FILE\"],\n \"sitemap_include_fileless_dirs\": self.site.config[\"SITEMAP_INCLUDE_FILELESS_DIRS\"],\n \"mapped_extensions\": self.site.config.get('MAPPED_EXTENSIONS', ['.html', '.htm', '.xml', '.rss']),\n \"robots_exclusions\": self.site.config[\"ROBOTS_EXCLUSIONS\"]\n }\n\n output = kw['output_folder']\n base_url = kw['base_url']\n mapped_exts = kw['mapped_extensions']\n\n output_path = kw['output_folder']\n sitemapindex_path = os.path.join(output_path, \"sitemapindex.xml\")\n sitemap_path = os.path.join(output_path, \"sitemap.xml\")\n base_path = get_base_path(kw['base_url'])\n sitemapindex = {}\n urlset = {}\n\n def scan_locs():\n for root, dirs, files in os.walk(output, followlinks=True):\n if not dirs and not files and not kw['sitemap_include_fileless_dirs']:\n continue # Totally empty, not on sitemap\n path = os.path.relpath(root, output)\n # ignore the current directory.\n path = (path.replace(os.sep, '/') + '/').replace('./', '')\n lastmod = self.get_lastmod(root)\n loc = urljoin(base_url, base_path + path)\n if kw['index_file'] in files and kw['strip_indexes']: # ignore folders when not stripping urls\n urlset[loc] = loc_format.format(loc, lastmod)\n for fname in files:\n if kw['strip_indexes'] and fname == kw['index_file']:\n continue # We already mapped the folder\n if os.path.splitext(fname)[-1] in mapped_exts:\n real_path = os.path.join(root, fname)\n path = os.path.relpath(real_path, output)\n if path.endswith(kw['index_file']) and kw['strip_indexes']:\n # ignore index files when stripping urls\n continue\n if not robot_fetch(path):\n continue\n if path.endswith('.html') or path.endswith('.htm'):\n try:\n if u'<!doctype html' not in io.open(real_path, 'r', encoding='utf8').read(1024).lower():\n # ignores \"html\" files without doctype\n # alexa-verify, google-site-verification, etc.\n continue\n except UnicodeDecodeError:\n # ignore ancient files\n # most non-utf8 files are worthless anyways\n continue\n \"\"\" put RSS in sitemapindex[] instead of in urlset[], sitemap_path is included after it is generated \"\"\"\n if path.endswith('.xml') or path.endswith('.rss'):\n filehead = io.open(real_path, 'r', encoding='utf8').read(512)\n if u'<rss' in filehead or (u'<urlset' in filehead and path != sitemap_path):\n path = path.replace(os.sep, '/')\n lastmod = self.get_lastmod(real_path)\n loc = urljoin(base_url, base_path + path)\n sitemapindex[loc] = sitemap_format.format(loc, lastmod)\n continue\n else:\n continue # ignores all XML files except those presumed to be RSS\n post = self.site.post_per_file.get(path)\n if post and (post.is_draft or post.is_private or post.publish_later):\n continue\n path = path.replace(os.sep, '/')\n lastmod = self.get_lastmod(real_path)\n loc = urljoin(base_url, base_path + path)\n urlset[loc] = loc_format.format(loc, lastmod)\n\n def robot_fetch(path):\n for rule in kw[\"robots_exclusions\"]:\n robot = robotparser.RobotFileParser()\n robot.parse([\"User-Agent: *\", \"Disallow: {0}\".format(rule)])\n if not robot.can_fetch(\"*\", '/' + path):\n return False # not robot food\n return True\n\n def write_sitemap():\n # Have to rescan, because files may have been added between\n # task dep scanning and task execution\n with io.open(sitemap_path, 'w+', encoding='utf8') as outf:\n outf.write(urlset_header)\n for k in sorted(urlset.keys()):\n outf.write(urlset[k])\n outf.write(urlset_footer)\n sitemap_url = urljoin(base_url, base_path + \"sitemap.xml\")\n sitemapindex[sitemap_url] = sitemap_format.format(sitemap_url, self.get_lastmod(sitemap_path))\n\n def write_sitemapindex():\n with io.open(sitemapindex_path, 'w+', encoding='utf8') as outf:\n outf.write(sitemapindex_header)\n for k in sorted(sitemapindex.keys()):\n outf.write(sitemapindex[k])\n outf.write(sitemapindex_footer)\n\n # Yield a task to calculate the dependencies of the sitemap\n # Other tasks can depend on this output, instead of having\n # to scan locations.\n def scan_locs_task():\n scan_locs()\n\n # Generate a list of file dependencies for the actual generation\n # task, so rebuilds are triggered. (Issue #1032)\n output = kw[\"output_folder\"]\n file_dep = []\n\n for i in urlset.keys():\n p = os.path.join(output, urlparse(i).path.lstrip('/'))\n if not p.endswith('sitemap.xml') and not p.endswith('/'):\n file_dep.append(p)\n if p.endswith('/') and os.path.exists(p + 'index.html'):\n file_dep.append(p + 'index.html')\n\n for i in sitemapindex.keys():\n p = os.path.join(output, urlparse(i).path.lstrip('/'))\n if not p.endswith('sitemap.xml') and not p.endswith('/'):\n file_dep.append(p)\n if p.endswith('/') and os.path.exists(p + 'index.html'):\n file_dep.append(p + 'index.html')\n\n return {'file_dep': file_dep}\n\n yield {\n \"basename\": \"_scan_locs\",\n \"name\": \"sitemap\",\n \"actions\": [(scan_locs_task)]\n }\n\n yield self.group_task()\n yield {\n \"basename\": \"sitemap\",\n \"name\": sitemap_path,\n \"targets\": [sitemap_path],\n \"actions\": [(write_sitemap,)],\n \"uptodate\": [config_changed(kw)],\n \"clean\": True,\n \"task_dep\": [\"render_site\"],\n \"calc_dep\": [\"_scan_locs:sitemap\"],\n }\n yield {\n \"basename\": \"sitemap\",\n \"name\": sitemapindex_path,\n \"targets\": [sitemapindex_path],\n \"actions\": [(write_sitemapindex,)],\n \"uptodate\": [config_changed(kw)],\n \"clean\": True,\n \"file_dep\": [sitemap_path]\n }\n\n def get_lastmod(self, p):\n if self.site.invariant:\n return '2014-01-01'\n else:\n return datetime.datetime.fromtimestamp(os.stat(p).st_mtime).isoformat().split('T')[0]\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n", "path": "nikola/plugins/task/sitemap/__init__.py"}]}
| 3,232 | 334 |
gh_patches_debug_8439
|
rasdani/github-patches
|
git_diff
|
google__osv.dev-394
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
V2 UI: Grouped versions on vulnerability page
Group by the first component leading up to the first period. e.g. "1.", "2."

If no period, maybe just group this under something generic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcp/appengine/frontend_handlers.py`
Content:
```
1 # Copyright 2021 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Handlers for the OSV web frontend."""
15
16 import os
17 import math
18
19 from flask import abort
20 from flask import Blueprint
21 from flask import jsonify
22 from flask import make_response
23 from flask import render_template
24 from flask import request
25
26 import cache
27 import osv
28 import rate_limiter
29 import source_mapper
30 import utils
31
32 blueprint = Blueprint('frontend_handlers', __name__)
33
34 _BACKEND_ROUTE = '/backend'
35 _PAGE_SIZE = 16
36 _PAGE_LOOKAHEAD = 4
37 _REQUESTS_PER_MIN = 30
38
39 if utils.is_prod():
40 redis_host = os.environ.get('REDISHOST', 'localhost')
41 redis_port = int(os.environ.get('REDISPORT', 6379))
42 limiter = rate_limiter.RateLimiter(
43 redis_host, redis_port, requests_per_min=_REQUESTS_PER_MIN)
44
45 @blueprint.before_request
46 def check_rate_limit():
47 ip_addr = request.headers.get('X-Appengine-User-Ip', 'unknown')
48 if not limiter.check_request(ip_addr):
49 abort(429)
50
51
52 @blueprint.before_request
53 def check_cors_preflight():
54 """Handle CORS preflight requests."""
55 if request.method != 'OPTIONS':
56 return None
57
58 response = make_response()
59 response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080')
60 response.headers.add('Access-Control-Allow-Methods', '*')
61 response.headers.add('Access-Control-Allow-Headers', '*')
62 return response
63
64
65 @blueprint.after_request
66 def add_cors_headers(response):
67 """Add CORS headers."""
68 response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080')
69 return response
70
71
72 @blueprint.route('/')
73 def index():
74 """Main page."""
75 return render_template('index.html')
76
77
78 @blueprint.route('/v2/')
79 def index_v2():
80 return render_template(
81 'home.html', ecosystem_counts=osv_get_ecosystem_counts())
82
83
84 @blueprint.route('/v2/about')
85 def about():
86 return render_template('about.html')
87
88
89 @blueprint.route('/v2/list')
90 def list_vulnerabilities():
91 """Main page."""
92 query = request.args.get('q', '')
93 page = int(request.args.get('page', 1))
94 ecosystem = request.args.get('ecosystem')
95 results = osv_query(query, page, False, ecosystem)
96
97 # Fetch ecosystems by default. As an optimization, skip when rendering page
98 # fragments.
99 ecosystem_counts = osv_get_ecosystem_counts(
100 ) if not request.headers.get('Turbo-Frame') else None
101
102 return render_template(
103 'list.html',
104 page=page,
105 query=query,
106 selected_ecosystem=ecosystem,
107 ecosystem_counts=ecosystem_counts,
108 vulnerabilities=results['items'])
109
110
111 @blueprint.route('/v2/vulnerability/<vuln_id>')
112 def vulnerability(vuln_id):
113 """Vulnerability page."""
114 vuln = osv_get_by_id(vuln_id)
115 return render_template('vulnerability.html', vulnerability=vuln)
116
117
118 def bug_to_response(bug, detailed=True):
119 """Convert a Bug entity to a response object."""
120 response = osv.vulnerability_to_dict(bug.to_vulnerability())
121 response.update({
122 'isFixed': bug.is_fixed,
123 'invalid': bug.status == osv.BugStatus.INVALID
124 })
125
126 if detailed:
127 add_links(response)
128 add_source_info(bug, response)
129 return response
130
131
132 def add_links(bug):
133 """Add VCS links where possible."""
134
135 for entry in bug.get('affected', []):
136 for i, affected_range in enumerate(entry.get('ranges', [])):
137 affected_range['id'] = i
138 if affected_range['type'] != 'GIT':
139 continue
140
141 repo_url = affected_range.get('repo')
142 if not repo_url:
143 continue
144
145 for event in affected_range.get('events', []):
146 if event.get('introduced'):
147 event['introduced_link'] = _commit_to_link(repo_url,
148 event['introduced'])
149 continue
150
151 if event.get('fixed'):
152 event['fixed_link'] = _commit_to_link(repo_url, event['fixed'])
153 continue
154
155 if event.get('limit'):
156 event['limit_link'] = _commit_to_link(repo_url, event['limit'])
157 continue
158
159
160 def add_source_info(bug, response):
161 """Add source information to `response`."""
162 if bug.source_of_truth == osv.SourceOfTruth.INTERNAL:
163 response['source'] = 'INTERNAL'
164 return
165
166 source_repo = osv.get_source_repository(bug.source)
167 if not source_repo or not source_repo.link:
168 return
169
170 source_path = osv.source_path(source_repo, bug)
171 response['source'] = source_repo.link + source_path
172 response['source_link'] = response['source']
173
174
175 def _commit_to_link(repo_url, commit):
176 """Convert commit to link."""
177 vcs = source_mapper.get_vcs_viewer_for_url(repo_url)
178 if not vcs:
179 return None
180
181 if ':' not in commit:
182 return vcs.get_source_url_for_revision(commit)
183
184 commit_parts = commit.split(':')
185 if len(commit_parts) != 2:
186 return None
187
188 start, end = commit_parts
189 if start == 'unknown':
190 return None
191
192 return vcs.get_source_url_for_revision_diff(start, end)
193
194
195 def osv_get_ecosystems():
196 """Get list of ecosystems."""
197 query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)
198 return sorted([bug.ecosystem[0] for bug in query if bug.ecosystem])
199
200
201 @cache.instance.cached(
202 timeout=24 * 60 * 60, key_prefix='osv_get_ecosystem_counts')
203 def osv_get_ecosystem_counts():
204 """Get count of vulnerabilities per ecosystem."""
205 counts = {}
206 ecosystems = osv_get_ecosystems()
207 for ecosystem in ecosystems:
208 counts[ecosystem] = osv.Bug.query(osv.Bug.ecosystem == ecosystem).count()
209
210 return counts
211
212
213 def osv_query(search_string, page, affected_only, ecosystem):
214 """Run an OSV query."""
215 query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,
216 osv.Bug.public == True) # pylint: disable=singleton-comparison
217
218 if search_string:
219 query = query.filter(osv.Bug.search_indices == search_string.lower())
220
221 if affected_only:
222 query = query.filter(osv.Bug.has_affected == True) # pylint: disable=singleton-comparison
223
224 if ecosystem:
225 query = query.filter(osv.Bug.ecosystem == ecosystem)
226
227 query = query.order(-osv.Bug.last_modified)
228 total = query.count()
229 results = {
230 'total': total,
231 'items': [],
232 }
233
234 bugs, _, _ = query.fetch_page(
235 page_size=_PAGE_SIZE, offset=(page - 1) * _PAGE_SIZE)
236 for bug in bugs:
237 results['items'].append(bug_to_response(bug, detailed=False))
238
239 return results
240
241
242 def osv_get_by_id(vuln_id):
243 """Gets bug details from its id. If invalid, aborts the request."""
244 if not vuln_id:
245 abort(400)
246 return None
247
248 bug = osv.Bug.get_by_id(vuln_id)
249 if not bug:
250 abort(404)
251 return None
252
253 if bug.status == osv.BugStatus.UNPROCESSED:
254 abort(404)
255 return None
256
257 if not bug.public:
258 abort(403)
259 return None
260
261 return bug_to_response(bug)
262
263
264 @blueprint.route(_BACKEND_ROUTE + '/ecosystems')
265 def ecosystems_handler():
266 """Handle query for list of ecosystems."""
267 return jsonify(osv_get_ecosystems())
268
269
270 @blueprint.route(_BACKEND_ROUTE + '/ecosystem-counts')
271 def ecosystem_counts_handler():
272 """Handle query for list of ecosystems."""
273 return jsonify(osv_get_ecosystem_counts())
274
275
276 @blueprint.route(_BACKEND_ROUTE + '/query')
277 def query_handler():
278 """Handle a query."""
279 search_string = request.args.get('search')
280 page = int(request.args.get('page', 1))
281 affected_only = request.args.get('affected_only') == 'true'
282 ecosystem = request.args.get('ecosystem')
283 results = osv_query(search_string, page, affected_only, ecosystem)
284 return jsonify(results)
285
286
287 @blueprint.route(_BACKEND_ROUTE + '/vulnerability')
288 def vulnerability_handler():
289 """Handle a vulnerability request."""
290 vuln_id = request.args.get('id')
291 return jsonify(osv_get_by_id(vuln_id))
292
293
294 @blueprint.app_template_filter('event_type')
295 def event_type(event):
296 """Get the type from an event."""
297 if event.get('introduced'):
298 return 'Introduced'
299 if event.get('fixed'):
300 return 'Fixed'
301 if event.get('limit'):
302 return 'Limit'
303 if event.get('last_affected'):
304 return 'Last affected'
305
306 return None
307
308
309 @blueprint.app_template_filter('event_link')
310 def event_link(event):
311 """Get the link from an event."""
312 if event.get('introduced_link'):
313 return event['introduced_link']
314 if event.get('fixed_link'):
315 return event['fixed_link']
316 if event.get('limit_link'):
317 return event['limit_link']
318 if event.get('last_affected_link'):
319 return event['last_affected_link']
320
321 return None
322
323
324 @blueprint.app_template_filter('event_value')
325 def event_value(event):
326 """Get the value from an event."""
327 if event.get('introduced'):
328 return event['introduced']
329 if event.get('fixed'):
330 return event['fixed']
331 if event.get('limit'):
332 return event['limit']
333 if event.get('last_affected'):
334 return event['last_affected']
335
336 return None
337
338
339 @blueprint.app_template_filter('should_collapse')
340 def should_collapse(affected):
341 """Whether if we should collapse the package tab bar."""
342 total_package_length = sum(
343 [len(entry.get('package', {}).get('name', '')) for entry in affected])
344 return total_package_length > 70 or len(affected) > 5
345
346
347 @blueprint.app_template_filter('log')
348 def logarithm(n):
349 return math.log(n)
350
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gcp/appengine/frontend_handlers.py b/gcp/appengine/frontend_handlers.py
--- a/gcp/appengine/frontend_handlers.py
+++ b/gcp/appengine/frontend_handlers.py
@@ -344,6 +344,22 @@
return total_package_length > 70 or len(affected) > 5
[email protected]_template_filter('group_versions')
+def group_versions(versions):
+ """Group versions by prefix."""
+ groups = {}
+
+ for version in sorted(versions):
+ if '.' not in version:
+ groups.setdefault('Other', []).append(version)
+ continue
+
+ label = version.split('.')[0] + '.*'
+ groups.setdefault(label, []).append(version)
+
+ return groups
+
+
@blueprint.app_template_filter('log')
def logarithm(n):
return math.log(n)
|
{"golden_diff": "diff --git a/gcp/appengine/frontend_handlers.py b/gcp/appengine/frontend_handlers.py\n--- a/gcp/appengine/frontend_handlers.py\n+++ b/gcp/appengine/frontend_handlers.py\n@@ -344,6 +344,22 @@\n return total_package_length > 70 or len(affected) > 5\n \n \[email protected]_template_filter('group_versions')\n+def group_versions(versions):\n+ \"\"\"Group versions by prefix.\"\"\"\n+ groups = {}\n+\n+ for version in sorted(versions):\n+ if '.' not in version:\n+ groups.setdefault('Other', []).append(version)\n+ continue\n+\n+ label = version.split('.')[0] + '.*'\n+ groups.setdefault(label, []).append(version)\n+\n+ return groups\n+\n+\n @blueprint.app_template_filter('log')\n def logarithm(n):\n return math.log(n)\n", "issue": "V2 UI: Grouped versions on vulnerability page\nGroup by the first component leading up to the first period. e.g. \"1.\", \"2.\"\r\n\r\n\r\n\r\nIf no period, maybe just group this under something generic.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Handlers for the OSV web frontend.\"\"\"\n\nimport os\nimport math\n\nfrom flask import abort\nfrom flask import Blueprint\nfrom flask import jsonify\nfrom flask import make_response\nfrom flask import render_template\nfrom flask import request\n\nimport cache\nimport osv\nimport rate_limiter\nimport source_mapper\nimport utils\n\nblueprint = Blueprint('frontend_handlers', __name__)\n\n_BACKEND_ROUTE = '/backend'\n_PAGE_SIZE = 16\n_PAGE_LOOKAHEAD = 4\n_REQUESTS_PER_MIN = 30\n\nif utils.is_prod():\n redis_host = os.environ.get('REDISHOST', 'localhost')\n redis_port = int(os.environ.get('REDISPORT', 6379))\n limiter = rate_limiter.RateLimiter(\n redis_host, redis_port, requests_per_min=_REQUESTS_PER_MIN)\n\n @blueprint.before_request\n def check_rate_limit():\n ip_addr = request.headers.get('X-Appengine-User-Ip', 'unknown')\n if not limiter.check_request(ip_addr):\n abort(429)\n\n\[email protected]_request\ndef check_cors_preflight():\n \"\"\"Handle CORS preflight requests.\"\"\"\n if request.method != 'OPTIONS':\n return None\n\n response = make_response()\n response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080')\n response.headers.add('Access-Control-Allow-Methods', '*')\n response.headers.add('Access-Control-Allow-Headers', '*')\n return response\n\n\[email protected]_request\ndef add_cors_headers(response):\n \"\"\"Add CORS headers.\"\"\"\n response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080')\n return response\n\n\[email protected]('/')\ndef index():\n \"\"\"Main page.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/v2/')\ndef index_v2():\n return render_template(\n 'home.html', ecosystem_counts=osv_get_ecosystem_counts())\n\n\[email protected]('/v2/about')\ndef about():\n return render_template('about.html')\n\n\[email protected]('/v2/list')\ndef list_vulnerabilities():\n \"\"\"Main page.\"\"\"\n query = request.args.get('q', '')\n page = int(request.args.get('page', 1))\n ecosystem = request.args.get('ecosystem')\n results = osv_query(query, page, False, ecosystem)\n\n # Fetch ecosystems by default. As an optimization, skip when rendering page\n # fragments.\n ecosystem_counts = osv_get_ecosystem_counts(\n ) if not request.headers.get('Turbo-Frame') else None\n\n return render_template(\n 'list.html',\n page=page,\n query=query,\n selected_ecosystem=ecosystem,\n ecosystem_counts=ecosystem_counts,\n vulnerabilities=results['items'])\n\n\[email protected]('/v2/vulnerability/<vuln_id>')\ndef vulnerability(vuln_id):\n \"\"\"Vulnerability page.\"\"\"\n vuln = osv_get_by_id(vuln_id)\n return render_template('vulnerability.html', vulnerability=vuln)\n\n\ndef bug_to_response(bug, detailed=True):\n \"\"\"Convert a Bug entity to a response object.\"\"\"\n response = osv.vulnerability_to_dict(bug.to_vulnerability())\n response.update({\n 'isFixed': bug.is_fixed,\n 'invalid': bug.status == osv.BugStatus.INVALID\n })\n\n if detailed:\n add_links(response)\n add_source_info(bug, response)\n return response\n\n\ndef add_links(bug):\n \"\"\"Add VCS links where possible.\"\"\"\n\n for entry in bug.get('affected', []):\n for i, affected_range in enumerate(entry.get('ranges', [])):\n affected_range['id'] = i\n if affected_range['type'] != 'GIT':\n continue\n\n repo_url = affected_range.get('repo')\n if not repo_url:\n continue\n\n for event in affected_range.get('events', []):\n if event.get('introduced'):\n event['introduced_link'] = _commit_to_link(repo_url,\n event['introduced'])\n continue\n\n if event.get('fixed'):\n event['fixed_link'] = _commit_to_link(repo_url, event['fixed'])\n continue\n\n if event.get('limit'):\n event['limit_link'] = _commit_to_link(repo_url, event['limit'])\n continue\n\n\ndef add_source_info(bug, response):\n \"\"\"Add source information to `response`.\"\"\"\n if bug.source_of_truth == osv.SourceOfTruth.INTERNAL:\n response['source'] = 'INTERNAL'\n return\n\n source_repo = osv.get_source_repository(bug.source)\n if not source_repo or not source_repo.link:\n return\n\n source_path = osv.source_path(source_repo, bug)\n response['source'] = source_repo.link + source_path\n response['source_link'] = response['source']\n\n\ndef _commit_to_link(repo_url, commit):\n \"\"\"Convert commit to link.\"\"\"\n vcs = source_mapper.get_vcs_viewer_for_url(repo_url)\n if not vcs:\n return None\n\n if ':' not in commit:\n return vcs.get_source_url_for_revision(commit)\n\n commit_parts = commit.split(':')\n if len(commit_parts) != 2:\n return None\n\n start, end = commit_parts\n if start == 'unknown':\n return None\n\n return vcs.get_source_url_for_revision_diff(start, end)\n\n\ndef osv_get_ecosystems():\n \"\"\"Get list of ecosystems.\"\"\"\n query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)\n return sorted([bug.ecosystem[0] for bug in query if bug.ecosystem])\n\n\[email protected](\n timeout=24 * 60 * 60, key_prefix='osv_get_ecosystem_counts')\ndef osv_get_ecosystem_counts():\n \"\"\"Get count of vulnerabilities per ecosystem.\"\"\"\n counts = {}\n ecosystems = osv_get_ecosystems()\n for ecosystem in ecosystems:\n counts[ecosystem] = osv.Bug.query(osv.Bug.ecosystem == ecosystem).count()\n\n return counts\n\n\ndef osv_query(search_string, page, affected_only, ecosystem):\n \"\"\"Run an OSV query.\"\"\"\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n if search_string:\n query = query.filter(osv.Bug.search_indices == search_string.lower())\n\n if affected_only:\n query = query.filter(osv.Bug.has_affected == True) # pylint: disable=singleton-comparison\n\n if ecosystem:\n query = query.filter(osv.Bug.ecosystem == ecosystem)\n\n query = query.order(-osv.Bug.last_modified)\n total = query.count()\n results = {\n 'total': total,\n 'items': [],\n }\n\n bugs, _, _ = query.fetch_page(\n page_size=_PAGE_SIZE, offset=(page - 1) * _PAGE_SIZE)\n for bug in bugs:\n results['items'].append(bug_to_response(bug, detailed=False))\n\n return results\n\n\ndef osv_get_by_id(vuln_id):\n \"\"\"Gets bug details from its id. If invalid, aborts the request.\"\"\"\n if not vuln_id:\n abort(400)\n return None\n\n bug = osv.Bug.get_by_id(vuln_id)\n if not bug:\n abort(404)\n return None\n\n if bug.status == osv.BugStatus.UNPROCESSED:\n abort(404)\n return None\n\n if not bug.public:\n abort(403)\n return None\n\n return bug_to_response(bug)\n\n\[email protected](_BACKEND_ROUTE + '/ecosystems')\ndef ecosystems_handler():\n \"\"\"Handle query for list of ecosystems.\"\"\"\n return jsonify(osv_get_ecosystems())\n\n\[email protected](_BACKEND_ROUTE + '/ecosystem-counts')\ndef ecosystem_counts_handler():\n \"\"\"Handle query for list of ecosystems.\"\"\"\n return jsonify(osv_get_ecosystem_counts())\n\n\[email protected](_BACKEND_ROUTE + '/query')\ndef query_handler():\n \"\"\"Handle a query.\"\"\"\n search_string = request.args.get('search')\n page = int(request.args.get('page', 1))\n affected_only = request.args.get('affected_only') == 'true'\n ecosystem = request.args.get('ecosystem')\n results = osv_query(search_string, page, affected_only, ecosystem)\n return jsonify(results)\n\n\[email protected](_BACKEND_ROUTE + '/vulnerability')\ndef vulnerability_handler():\n \"\"\"Handle a vulnerability request.\"\"\"\n vuln_id = request.args.get('id')\n return jsonify(osv_get_by_id(vuln_id))\n\n\[email protected]_template_filter('event_type')\ndef event_type(event):\n \"\"\"Get the type from an event.\"\"\"\n if event.get('introduced'):\n return 'Introduced'\n if event.get('fixed'):\n return 'Fixed'\n if event.get('limit'):\n return 'Limit'\n if event.get('last_affected'):\n return 'Last affected'\n\n return None\n\n\[email protected]_template_filter('event_link')\ndef event_link(event):\n \"\"\"Get the link from an event.\"\"\"\n if event.get('introduced_link'):\n return event['introduced_link']\n if event.get('fixed_link'):\n return event['fixed_link']\n if event.get('limit_link'):\n return event['limit_link']\n if event.get('last_affected_link'):\n return event['last_affected_link']\n\n return None\n\n\[email protected]_template_filter('event_value')\ndef event_value(event):\n \"\"\"Get the value from an event.\"\"\"\n if event.get('introduced'):\n return event['introduced']\n if event.get('fixed'):\n return event['fixed']\n if event.get('limit'):\n return event['limit']\n if event.get('last_affected'):\n return event['last_affected']\n\n return None\n\n\[email protected]_template_filter('should_collapse')\ndef should_collapse(affected):\n \"\"\"Whether if we should collapse the package tab bar.\"\"\"\n total_package_length = sum(\n [len(entry.get('package', {}).get('name', '')) for entry in affected])\n return total_package_length > 70 or len(affected) > 5\n\n\[email protected]_template_filter('log')\ndef logarithm(n):\n return math.log(n)\n", "path": "gcp/appengine/frontend_handlers.py"}], "after_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Handlers for the OSV web frontend.\"\"\"\n\nimport os\nimport math\n\nfrom flask import abort\nfrom flask import Blueprint\nfrom flask import jsonify\nfrom flask import make_response\nfrom flask import render_template\nfrom flask import request\n\nimport cache\nimport osv\nimport rate_limiter\nimport source_mapper\nimport utils\n\nblueprint = Blueprint('frontend_handlers', __name__)\n\n_BACKEND_ROUTE = '/backend'\n_PAGE_SIZE = 16\n_PAGE_LOOKAHEAD = 4\n_REQUESTS_PER_MIN = 30\n\nif utils.is_prod():\n redis_host = os.environ.get('REDISHOST', 'localhost')\n redis_port = int(os.environ.get('REDISPORT', 6379))\n limiter = rate_limiter.RateLimiter(\n redis_host, redis_port, requests_per_min=_REQUESTS_PER_MIN)\n\n @blueprint.before_request\n def check_rate_limit():\n ip_addr = request.headers.get('X-Appengine-User-Ip', 'unknown')\n if not limiter.check_request(ip_addr):\n abort(429)\n\n\[email protected]_request\ndef check_cors_preflight():\n \"\"\"Handle CORS preflight requests.\"\"\"\n if request.method != 'OPTIONS':\n return None\n\n response = make_response()\n response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080')\n response.headers.add('Access-Control-Allow-Methods', '*')\n response.headers.add('Access-Control-Allow-Headers', '*')\n return response\n\n\[email protected]_request\ndef add_cors_headers(response):\n \"\"\"Add CORS headers.\"\"\"\n response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080')\n return response\n\n\[email protected]('/')\ndef index():\n \"\"\"Main page.\"\"\"\n return render_template('index.html')\n\n\[email protected]('/v2/')\ndef index_v2():\n return render_template(\n 'home.html', ecosystem_counts=osv_get_ecosystem_counts())\n\n\[email protected]('/v2/about')\ndef about():\n return render_template('about.html')\n\n\[email protected]('/v2/list')\ndef list_vulnerabilities():\n \"\"\"Main page.\"\"\"\n query = request.args.get('q', '')\n page = int(request.args.get('page', 1))\n ecosystem = request.args.get('ecosystem')\n results = osv_query(query, page, False, ecosystem)\n\n # Fetch ecosystems by default. As an optimization, skip when rendering page\n # fragments.\n ecosystem_counts = osv_get_ecosystem_counts(\n ) if not request.headers.get('Turbo-Frame') else None\n\n return render_template(\n 'list.html',\n page=page,\n query=query,\n selected_ecosystem=ecosystem,\n ecosystem_counts=ecosystem_counts,\n vulnerabilities=results['items'])\n\n\[email protected]('/v2/vulnerability/<vuln_id>')\ndef vulnerability(vuln_id):\n \"\"\"Vulnerability page.\"\"\"\n vuln = osv_get_by_id(vuln_id)\n return render_template('vulnerability.html', vulnerability=vuln)\n\n\ndef bug_to_response(bug, detailed=True):\n \"\"\"Convert a Bug entity to a response object.\"\"\"\n response = osv.vulnerability_to_dict(bug.to_vulnerability())\n response.update({\n 'isFixed': bug.is_fixed,\n 'invalid': bug.status == osv.BugStatus.INVALID\n })\n\n if detailed:\n add_links(response)\n add_source_info(bug, response)\n return response\n\n\ndef add_links(bug):\n \"\"\"Add VCS links where possible.\"\"\"\n\n for entry in bug.get('affected', []):\n for i, affected_range in enumerate(entry.get('ranges', [])):\n affected_range['id'] = i\n if affected_range['type'] != 'GIT':\n continue\n\n repo_url = affected_range.get('repo')\n if not repo_url:\n continue\n\n for event in affected_range.get('events', []):\n if event.get('introduced'):\n event['introduced_link'] = _commit_to_link(repo_url,\n event['introduced'])\n continue\n\n if event.get('fixed'):\n event['fixed_link'] = _commit_to_link(repo_url, event['fixed'])\n continue\n\n if event.get('limit'):\n event['limit_link'] = _commit_to_link(repo_url, event['limit'])\n continue\n\n\ndef add_source_info(bug, response):\n \"\"\"Add source information to `response`.\"\"\"\n if bug.source_of_truth == osv.SourceOfTruth.INTERNAL:\n response['source'] = 'INTERNAL'\n return\n\n source_repo = osv.get_source_repository(bug.source)\n if not source_repo or not source_repo.link:\n return\n\n source_path = osv.source_path(source_repo, bug)\n response['source'] = source_repo.link + source_path\n response['source_link'] = response['source']\n\n\ndef _commit_to_link(repo_url, commit):\n \"\"\"Convert commit to link.\"\"\"\n vcs = source_mapper.get_vcs_viewer_for_url(repo_url)\n if not vcs:\n return None\n\n if ':' not in commit:\n return vcs.get_source_url_for_revision(commit)\n\n commit_parts = commit.split(':')\n if len(commit_parts) != 2:\n return None\n\n start, end = commit_parts\n if start == 'unknown':\n return None\n\n return vcs.get_source_url_for_revision_diff(start, end)\n\n\ndef osv_get_ecosystems():\n \"\"\"Get list of ecosystems.\"\"\"\n query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)\n return sorted([bug.ecosystem[0] for bug in query if bug.ecosystem])\n\n\[email protected](\n timeout=24 * 60 * 60, key_prefix='osv_get_ecosystem_counts')\ndef osv_get_ecosystem_counts():\n \"\"\"Get count of vulnerabilities per ecosystem.\"\"\"\n counts = {}\n ecosystems = osv_get_ecosystems()\n for ecosystem in ecosystems:\n counts[ecosystem] = osv.Bug.query(osv.Bug.ecosystem == ecosystem).count()\n\n return counts\n\n\ndef osv_query(search_string, page, affected_only, ecosystem):\n \"\"\"Run an OSV query.\"\"\"\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n if search_string:\n query = query.filter(osv.Bug.search_indices == search_string.lower())\n\n if affected_only:\n query = query.filter(osv.Bug.has_affected == True) # pylint: disable=singleton-comparison\n\n if ecosystem:\n query = query.filter(osv.Bug.ecosystem == ecosystem)\n\n query = query.order(-osv.Bug.last_modified)\n total = query.count()\n results = {\n 'total': total,\n 'items': [],\n }\n\n bugs, _, _ = query.fetch_page(\n page_size=_PAGE_SIZE, offset=(page - 1) * _PAGE_SIZE)\n for bug in bugs:\n results['items'].append(bug_to_response(bug, detailed=False))\n\n return results\n\n\ndef osv_get_by_id(vuln_id):\n \"\"\"Gets bug details from its id. If invalid, aborts the request.\"\"\"\n if not vuln_id:\n abort(400)\n return None\n\n bug = osv.Bug.get_by_id(vuln_id)\n if not bug:\n abort(404)\n return None\n\n if bug.status == osv.BugStatus.UNPROCESSED:\n abort(404)\n return None\n\n if not bug.public:\n abort(403)\n return None\n\n return bug_to_response(bug)\n\n\[email protected](_BACKEND_ROUTE + '/ecosystems')\ndef ecosystems_handler():\n \"\"\"Handle query for list of ecosystems.\"\"\"\n return jsonify(osv_get_ecosystems())\n\n\[email protected](_BACKEND_ROUTE + '/ecosystem-counts')\ndef ecosystem_counts_handler():\n \"\"\"Handle query for list of ecosystems.\"\"\"\n return jsonify(osv_get_ecosystem_counts())\n\n\[email protected](_BACKEND_ROUTE + '/query')\ndef query_handler():\n \"\"\"Handle a query.\"\"\"\n search_string = request.args.get('search')\n page = int(request.args.get('page', 1))\n affected_only = request.args.get('affected_only') == 'true'\n ecosystem = request.args.get('ecosystem')\n results = osv_query(search_string, page, affected_only, ecosystem)\n return jsonify(results)\n\n\[email protected](_BACKEND_ROUTE + '/vulnerability')\ndef vulnerability_handler():\n \"\"\"Handle a vulnerability request.\"\"\"\n vuln_id = request.args.get('id')\n return jsonify(osv_get_by_id(vuln_id))\n\n\[email protected]_template_filter('event_type')\ndef event_type(event):\n \"\"\"Get the type from an event.\"\"\"\n if event.get('introduced'):\n return 'Introduced'\n if event.get('fixed'):\n return 'Fixed'\n if event.get('limit'):\n return 'Limit'\n if event.get('last_affected'):\n return 'Last affected'\n\n return None\n\n\[email protected]_template_filter('event_link')\ndef event_link(event):\n \"\"\"Get the link from an event.\"\"\"\n if event.get('introduced_link'):\n return event['introduced_link']\n if event.get('fixed_link'):\n return event['fixed_link']\n if event.get('limit_link'):\n return event['limit_link']\n if event.get('last_affected_link'):\n return event['last_affected_link']\n\n return None\n\n\[email protected]_template_filter('event_value')\ndef event_value(event):\n \"\"\"Get the value from an event.\"\"\"\n if event.get('introduced'):\n return event['introduced']\n if event.get('fixed'):\n return event['fixed']\n if event.get('limit'):\n return event['limit']\n if event.get('last_affected'):\n return event['last_affected']\n\n return None\n\n\[email protected]_template_filter('should_collapse')\ndef should_collapse(affected):\n \"\"\"Whether if we should collapse the package tab bar.\"\"\"\n total_package_length = sum(\n [len(entry.get('package', {}).get('name', '')) for entry in affected])\n return total_package_length > 70 or len(affected) > 5\n\n\[email protected]_template_filter('group_versions')\ndef group_versions(versions):\n \"\"\"Group versions by prefix.\"\"\"\n groups = {}\n\n for version in sorted(versions):\n if '.' not in version:\n groups.setdefault('Other', []).append(version)\n continue\n\n label = version.split('.')[0] + '.*'\n groups.setdefault(label, []).append(version)\n\n return groups\n\n\[email protected]_template_filter('log')\ndef logarithm(n):\n return math.log(n)\n", "path": "gcp/appengine/frontend_handlers.py"}]}
| 3,751 | 190 |
gh_patches_debug_15171
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3614
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type hints to toilClean.py
Add type hints to src/toil/utils/toilClean.py so it can be checked by mypy during linting.
Refers to #3568.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-899)
┆Issue Number: TOIL-899
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/utils/toilClean.py`
Content:
```
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Delete a job store used by a previous Toil workflow invocation."""
15 import logging
16
17 from toil.common import Toil, parser_with_common_options
18 from toil.jobStores.abstractJobStore import NoSuchJobStoreException
19 from toil.statsAndLogging import set_logging_from_options
20
21 logger = logging.getLogger(__name__)
22
23
24 def main():
25 parser = parser_with_common_options(jobstore_option=True)
26
27 options = parser.parse_args()
28 set_logging_from_options(options)
29 try:
30 jobstore = Toil.getJobStore(options.jobStore)
31 jobstore.resume()
32 jobstore.destroy()
33 logger.info(f"Successfully deleted the job store: {options.jobStore}")
34 except NoSuchJobStoreException:
35 logger.info(f"Failed to delete the job store: {options.jobStore} is non-existent.")
36 except:
37 logger.info(f"Failed to delete the job store: {options.jobStore}")
38 raise
39
```
Path: `contrib/admin/mypy-with-ignore.py`
Content:
```
1 #!/usr/bin/env python3
2 """
3 Runs mypy and ignores files that do not yet have passing type hints.
4
5 Does not type check test files (any path including "src/toil/test").
6 """
7 import os
8 import subprocess
9 import sys
10
11 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa
12 sys.path.insert(0, pkg_root) # noqa
13
14 from src.toil.lib.resources import glob # type: ignore
15
16
17 def main():
18 all_files_to_check = []
19 for d in ['dashboard', 'docker', 'docs', 'src']:
20 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))
21
22 # TODO: Remove these paths as typing is added and mypy conflicts are addressed
23 ignore_paths = [os.path.abspath(f) for f in [
24 'docker/Dockerfile.py',
25 'docs/conf.py',
26 'docs/vendor/sphinxcontrib/fulltoc.py',
27 'docs/vendor/sphinxcontrib/__init__.py',
28 'src/toil/job.py',
29 'src/toil/leader.py',
30 'src/toil/statsAndLogging.py',
31 'src/toil/common.py',
32 'src/toil/realtimeLogger.py',
33 'src/toil/worker.py',
34 'src/toil/serviceManager.py',
35 'src/toil/toilState.py',
36 'src/toil/__init__.py',
37 'src/toil/resource.py',
38 'src/toil/deferred.py',
39 'src/toil/version.py',
40 'src/toil/wdl/utils.py',
41 'src/toil/wdl/wdl_types.py',
42 'src/toil/wdl/wdl_synthesis.py',
43 'src/toil/wdl/wdl_analysis.py',
44 'src/toil/wdl/wdl_functions.py',
45 'src/toil/wdl/toilwdl.py',
46 'src/toil/wdl/versions/draft2.py',
47 'src/toil/wdl/versions/v1.py',
48 'src/toil/wdl/versions/dev.py',
49 'src/toil/provisioners/clusterScaler.py',
50 'src/toil/provisioners/abstractProvisioner.py',
51 'src/toil/provisioners/gceProvisioner.py',
52 'src/toil/provisioners/__init__.py',
53 'src/toil/provisioners/node.py',
54 'src/toil/provisioners/aws/boto2Context.py',
55 'src/toil/provisioners/aws/awsProvisioner.py',
56 'src/toil/provisioners/aws/__init__.py',
57 'src/toil/batchSystems/slurm.py',
58 'src/toil/batchSystems/gridengine.py',
59 'src/toil/batchSystems/singleMachine.py',
60 'src/toil/batchSystems/abstractBatchSystem.py',
61 'src/toil/batchSystems/parasol.py',
62 'src/toil/batchSystems/kubernetes.py',
63 'src/toil/batchSystems/torque.py',
64 'src/toil/batchSystems/options.py',
65 'src/toil/batchSystems/registry.py',
66 'src/toil/batchSystems/lsf.py',
67 'src/toil/batchSystems/__init__.py',
68 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',
69 'src/toil/batchSystems/lsfHelper.py',
70 'src/toil/batchSystems/htcondor.py',
71 'src/toil/batchSystems/mesos/batchSystem.py',
72 'src/toil/batchSystems/mesos/executor.py',
73 'src/toil/batchSystems/mesos/conftest.py',
74 'src/toil/batchSystems/mesos/__init__.py',
75 'src/toil/batchSystems/mesos/test/__init__.py',
76 'src/toil/cwl/conftest.py',
77 'src/toil/cwl/__init__.py',
78 'src/toil/cwl/cwltoil.py',
79 'src/toil/fileStores/cachingFileStore.py',
80 'src/toil/fileStores/abstractFileStore.py',
81 'src/toil/fileStores/nonCachingFileStore.py',
82 'src/toil/fileStores/__init__.py',
83 'src/toil/jobStores/utils.py',
84 'src/toil/jobStores/abstractJobStore.py',
85 'src/toil/jobStores/conftest.py',
86 'src/toil/jobStores/fileJobStore.py',
87 'src/toil/jobStores/__init__.py',
88 'src/toil/jobStores/googleJobStore.py',
89 'src/toil/jobStores/aws/utils.py',
90 'src/toil/jobStores/aws/jobStore.py',
91 'src/toil/jobStores/aws/__init__.py',
92 'src/toil/utils/toilDebugFile.py',
93 'src/toil/utils/toilUpdateEC2Instances.py',
94 'src/toil/utils/toilStatus.py',
95 'src/toil/utils/toilStats.py',
96 'src/toil/utils/toilSshCluster.py',
97 'src/toil/utils/toilMain.py',
98 'src/toil/utils/__init__.py',
99 'src/toil/utils/toilDestroyCluster.py',
100 'src/toil/utils/toilDebugJob.py',
101 'src/toil/utils/toilRsyncCluster.py',
102 'src/toil/utils/toilClean.py',
103 'src/toil/utils/toilLaunchCluster.py',
104 'src/toil/lib/memoize.py',
105 'src/toil/lib/throttle.py',
106 'src/toil/lib/humanize.py',
107 'src/toil/lib/compatibility.py',
108 'src/toil/lib/iterables.py',
109 'src/toil/lib/bioio.py',
110 'src/toil/lib/ec2.py',
111 # 'src/toil/lib/conversions.py',
112 'src/toil/lib/ec2nodes.py',
113 # 'src/toil/lib/misc.py',
114 'src/toil/lib/expando.py',
115 'src/toil/lib/threading.py',
116 'src/toil/lib/exceptions.py',
117 'src/toil/lib/__init__.py',
118 'src/toil/lib/generatedEC2Lists.py',
119 'src/toil/lib/retry.py',
120 'src/toil/lib/objects.py',
121 'src/toil/lib/io.py',
122 'src/toil/lib/docker.py',
123 'src/toil/lib/encryption/_nacl.py',
124 'src/toil/lib/encryption/_dummy.py',
125 'src/toil/lib/encryption/conftest.py',
126 'src/toil/lib/encryption/__init__.py',
127 'src/toil/lib/aws/utils.py',
128 'src/toil/lib/aws/__init__.py'
129 ]]
130
131 filtered_files_to_check = []
132 for file_path in all_files_to_check:
133 if file_path not in ignore_paths and 'src/toil/test' not in file_path:
134 filtered_files_to_check.append(file_path)
135 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?
136 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check
137 p = subprocess.run(args=args, stdout=subprocess.PIPE)
138 result = p.stdout.decode()
139 print(result)
140 if 'Success: no issues found' not in result:
141 exit(1)
142
143
144 if __name__ == '__main__':
145 main()
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py
--- a/contrib/admin/mypy-with-ignore.py
+++ b/contrib/admin/mypy-with-ignore.py
@@ -99,7 +99,6 @@
'src/toil/utils/toilDestroyCluster.py',
'src/toil/utils/toilDebugJob.py',
'src/toil/utils/toilRsyncCluster.py',
- 'src/toil/utils/toilClean.py',
'src/toil/utils/toilLaunchCluster.py',
'src/toil/lib/memoize.py',
'src/toil/lib/throttle.py',
diff --git a/src/toil/utils/toilClean.py b/src/toil/utils/toilClean.py
--- a/src/toil/utils/toilClean.py
+++ b/src/toil/utils/toilClean.py
@@ -21,7 +21,7 @@
logger = logging.getLogger(__name__)
-def main():
+def main() -> None:
parser = parser_with_common_options(jobstore_option=True)
options = parser.parse_args()
|
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -99,7 +99,6 @@\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n- 'src/toil/utils/toilClean.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\ndiff --git a/src/toil/utils/toilClean.py b/src/toil/utils/toilClean.py\n--- a/src/toil/utils/toilClean.py\n+++ b/src/toil/utils/toilClean.py\n@@ -21,7 +21,7 @@\n logger = logging.getLogger(__name__)\n \n \n-def main():\n+def main() -> None:\n parser = parser_with_common_options(jobstore_option=True)\n \n options = parser.parse_args()\n", "issue": "Add type hints to toilClean.py\nAdd type hints to src/toil/utils/toilClean.py so it can be checked by mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-899)\n\u2506Issue Number: TOIL-899\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Delete a job store used by a previous Toil workflow invocation.\"\"\"\nimport logging\n\nfrom toil.common import Toil, parser_with_common_options\nfrom toil.jobStores.abstractJobStore import NoSuchJobStoreException\nfrom toil.statsAndLogging import set_logging_from_options\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = parser_with_common_options(jobstore_option=True)\n\n options = parser.parse_args()\n set_logging_from_options(options)\n try:\n jobstore = Toil.getJobStore(options.jobStore)\n jobstore.resume()\n jobstore.destroy()\n logger.info(f\"Successfully deleted the job store: {options.jobStore}\")\n except NoSuchJobStoreException:\n logger.info(f\"Failed to delete the job store: {options.jobStore} is non-existent.\")\n except:\n logger.info(f\"Failed to delete the job store: {options.jobStore}\")\n raise\n", "path": "src/toil/utils/toilClean.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilClean.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n # 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}], "after_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Delete a job store used by a previous Toil workflow invocation.\"\"\"\nimport logging\n\nfrom toil.common import Toil, parser_with_common_options\nfrom toil.jobStores.abstractJobStore import NoSuchJobStoreException\nfrom toil.statsAndLogging import set_logging_from_options\n\nlogger = logging.getLogger(__name__)\n\n\ndef main() -> None:\n parser = parser_with_common_options(jobstore_option=True)\n\n options = parser.parse_args()\n set_logging_from_options(options)\n try:\n jobstore = Toil.getJobStore(options.jobStore)\n jobstore.resume()\n jobstore.destroy()\n logger.info(f\"Successfully deleted the job store: {options.jobStore}\")\n except NoSuchJobStoreException:\n logger.info(f\"Failed to delete the job store: {options.jobStore} is non-existent.\")\n except:\n logger.info(f\"Failed to delete the job store: {options.jobStore}\")\n raise\n", "path": "src/toil/utils/toilClean.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n # 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
| 2,670 | 232 |
gh_patches_debug_9381
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-5128
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hook file for sqlalchemy misses hidden import "sqlalchemy.ext.baked"
The provided hook file for sqlalchemy doesn't seem to pick up the hidden import of "sqlalchemy.ext.baked".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-sqlalchemy.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import re
13 from PyInstaller.utils.hooks import (
14 exec_statement, is_module_satisfies, logger)
15 from PyInstaller.compat import open_file, text_read_mode
16 from PyInstaller.lib.modulegraph.modulegraph import SourceModule
17 from PyInstaller.lib.modulegraph.util import guess_encoding
18
19 # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
20 excludedimports = ['sqlalchemy.testing']
21
22 # include most common database bindings
23 # some database bindings are detected and include some
24 # are not. We should explicitly include database backends.
25 hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2']
26
27 # In SQLAlchemy >= 0.6, the "sqlalchemy.dialects" package provides dialects.
28 if is_module_satisfies('sqlalchemy >= 0.6'):
29 dialects = exec_statement("import sqlalchemy.dialects;print(sqlalchemy.dialects.__all__)")
30 dialects = eval(dialects.strip())
31
32 for n in dialects:
33 hiddenimports.append("sqlalchemy.dialects." + n)
34 # In SQLAlchemy <= 0.5, the "sqlalchemy.databases" package provides dialects.
35 else:
36 databases = exec_statement("import sqlalchemy.databases; print(sqlalchemy.databases.__all__)")
37 databases = eval(databases.strip())
38
39 for n in databases:
40 hiddenimports.append("sqlalchemy.databases." + n)
41
42
43 def hook(hook_api):
44 """
45 SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This
46 decorator does imports. eg:
47
48 @util.dependencies("sqlalchemy.sql.schema")
49
50 This hook scans for included SQLAlchemy modules and then scans those modules
51 for any util.dependencies and marks those modules as hidden imports.
52 """
53
54 if not is_module_satisfies('sqlalchemy >= 0.9'):
55 return
56
57 # this parser is very simplistic but seems to catch all cases as of V1.1
58 depend_regex = re.compile(r'@util.dependencies\([\'"](.*?)[\'"]\)')
59
60 hidden_imports_set = set()
61 known_imports = set()
62 for node in hook_api.module_graph.flatten(start=hook_api.module):
63 if isinstance(node, SourceModule) and \
64 node.identifier.startswith('sqlalchemy.'):
65 known_imports.add(node.identifier)
66 # Determine the encoding of the source file.
67 with open_file(node.filename, 'rb') as f:
68 encoding = guess_encoding(f)
69 # Use that to open the file.
70 with open_file(node.filename, text_read_mode,
71 encoding=encoding) as f:
72 for match in depend_regex.findall(f.read()):
73 hidden_imports_set.add(match)
74
75 hidden_imports_set -= known_imports
76 if len(hidden_imports_set):
77 logger.info(" Found %d sqlalchemy hidden imports",
78 len(hidden_imports_set))
79 hook_api.add_imports(*list(hidden_imports_set))
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py
--- a/PyInstaller/hooks/hook-sqlalchemy.py
+++ b/PyInstaller/hooks/hook-sqlalchemy.py
@@ -22,7 +22,7 @@
# include most common database bindings
# some database bindings are detected and include some
# are not. We should explicitly include database backends.
-hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2']
+hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']
# In SQLAlchemy >= 0.6, the "sqlalchemy.dialects" package provides dialects.
if is_module_satisfies('sqlalchemy >= 0.6'):
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py\n--- a/PyInstaller/hooks/hook-sqlalchemy.py\n+++ b/PyInstaller/hooks/hook-sqlalchemy.py\n@@ -22,7 +22,7 @@\n # include most common database bindings\n # some database bindings are detected and include some\n # are not. We should explicitly include database backends.\n-hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2']\n+hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']\n \n # In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\n if is_module_satisfies('sqlalchemy >= 0.6'):\n", "issue": "Hook file for sqlalchemy misses hidden import \"sqlalchemy.ext.baked\"\nThe provided hook file for sqlalchemy doesn't seem to pick up the hidden import of \"sqlalchemy.ext.baked\".\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport re\nfrom PyInstaller.utils.hooks import (\n exec_statement, is_module_satisfies, logger)\nfrom PyInstaller.compat import open_file, text_read_mode\nfrom PyInstaller.lib.modulegraph.modulegraph import SourceModule\nfrom PyInstaller.lib.modulegraph.util import guess_encoding\n\n# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\nexcludedimports = ['sqlalchemy.testing']\n\n# include most common database bindings\n# some database bindings are detected and include some\n# are not. We should explicitly include database backends.\nhiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2']\n\n# In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\nif is_module_satisfies('sqlalchemy >= 0.6'):\n dialects = exec_statement(\"import sqlalchemy.dialects;print(sqlalchemy.dialects.__all__)\")\n dialects = eval(dialects.strip())\n\n for n in dialects:\n hiddenimports.append(\"sqlalchemy.dialects.\" + n)\n# In SQLAlchemy <= 0.5, the \"sqlalchemy.databases\" package provides dialects.\nelse:\n databases = exec_statement(\"import sqlalchemy.databases; print(sqlalchemy.databases.__all__)\")\n databases = eval(databases.strip())\n\n for n in databases:\n hiddenimports.append(\"sqlalchemy.databases.\" + n)\n\n\ndef hook(hook_api):\n \"\"\"\n SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This\n decorator does imports. eg:\n\n @util.dependencies(\"sqlalchemy.sql.schema\")\n\n This hook scans for included SQLAlchemy modules and then scans those modules\n for any util.dependencies and marks those modules as hidden imports.\n \"\"\"\n\n if not is_module_satisfies('sqlalchemy >= 0.9'):\n return\n\n # this parser is very simplistic but seems to catch all cases as of V1.1\n depend_regex = re.compile(r'@util.dependencies\\([\\'\"](.*?)[\\'\"]\\)')\n\n hidden_imports_set = set()\n known_imports = set()\n for node in hook_api.module_graph.flatten(start=hook_api.module):\n if isinstance(node, SourceModule) and \\\n node.identifier.startswith('sqlalchemy.'):\n known_imports.add(node.identifier)\n # Determine the encoding of the source file.\n with open_file(node.filename, 'rb') as f:\n encoding = guess_encoding(f)\n # Use that to open the file.\n with open_file(node.filename, text_read_mode,\n encoding=encoding) as f:\n for match in depend_regex.findall(f.read()):\n hidden_imports_set.add(match)\n\n hidden_imports_set -= known_imports\n if len(hidden_imports_set):\n logger.info(\" Found %d sqlalchemy hidden imports\",\n len(hidden_imports_set))\n hook_api.add_imports(*list(hidden_imports_set))\n", "path": "PyInstaller/hooks/hook-sqlalchemy.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport re\nfrom PyInstaller.utils.hooks import (\n exec_statement, is_module_satisfies, logger)\nfrom PyInstaller.compat import open_file, text_read_mode\nfrom PyInstaller.lib.modulegraph.modulegraph import SourceModule\nfrom PyInstaller.lib.modulegraph.util import guess_encoding\n\n# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\nexcludedimports = ['sqlalchemy.testing']\n\n# include most common database bindings\n# some database bindings are detected and include some\n# are not. We should explicitly include database backends.\nhiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']\n\n# In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\nif is_module_satisfies('sqlalchemy >= 0.6'):\n dialects = exec_statement(\"import sqlalchemy.dialects;print(sqlalchemy.dialects.__all__)\")\n dialects = eval(dialects.strip())\n\n for n in dialects:\n hiddenimports.append(\"sqlalchemy.dialects.\" + n)\n# In SQLAlchemy <= 0.5, the \"sqlalchemy.databases\" package provides dialects.\nelse:\n databases = exec_statement(\"import sqlalchemy.databases; print(sqlalchemy.databases.__all__)\")\n databases = eval(databases.strip())\n\n for n in databases:\n hiddenimports.append(\"sqlalchemy.databases.\" + n)\n\n\ndef hook(hook_api):\n \"\"\"\n SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This\n decorator does imports. eg:\n\n @util.dependencies(\"sqlalchemy.sql.schema\")\n\n This hook scans for included SQLAlchemy modules and then scans those modules\n for any util.dependencies and marks those modules as hidden imports.\n \"\"\"\n\n if not is_module_satisfies('sqlalchemy >= 0.9'):\n return\n\n # this parser is very simplistic but seems to catch all cases as of V1.1\n depend_regex = re.compile(r'@util.dependencies\\([\\'\"](.*?)[\\'\"]\\)')\n\n hidden_imports_set = set()\n known_imports = set()\n for node in hook_api.module_graph.flatten(start=hook_api.module):\n if isinstance(node, SourceModule) and \\\n node.identifier.startswith('sqlalchemy.'):\n known_imports.add(node.identifier)\n # Determine the encoding of the source file.\n with open_file(node.filename, 'rb') as f:\n encoding = guess_encoding(f)\n # Use that to open the file.\n with open_file(node.filename, text_read_mode,\n encoding=encoding) as f:\n for match in depend_regex.findall(f.read()):\n hidden_imports_set.add(match)\n\n hidden_imports_set -= known_imports\n if len(hidden_imports_set):\n logger.info(\" Found %d sqlalchemy hidden imports\",\n len(hidden_imports_set))\n hook_api.add_imports(*list(hidden_imports_set))\n", "path": "PyInstaller/hooks/hook-sqlalchemy.py"}]}
| 1,163 | 175 |
gh_patches_debug_20548
|
rasdani/github-patches
|
git_diff
|
napari__napari-2734
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`Viewer.window.add_dock_widget` does not support list of `FunctionGui` from `magicgui`
## 🐛 Bug
The `Viewer.window.add_dock_widget` expects a `Sequence` of `QWigets` or a single `FunctionGui`, it does not support a `Sequence` of `FunctionGui`, this possibly a bug from the changes of `magicgui v0.2`.
The programmer can circumvent this issue by sending the `FunctionGui.native` objects, but I'm not sure if this is the expected behavior.
## To Reproduce
Execute this code:
```python3
import napari
from magicgui import magicgui
viewer = napari.Viewer()
@magicgui(call_button='hi')
def _first_widget() -> None:
print('hi')
@magicgui(call_button='bye')
def _second_widget() -> None:
print('bye')
viewer.window.add_dock_widget([_first_widget, _second_widget])
napari.run()
```
Outputting:
```python3
Traceback (most recent call last):
File "error.py", line 14, in <module>
viewer.window.add_dock_widget([_first_widget, _second_widget])
File "/home/jordao/Softwares/napari/napari/_qt/qt_main_window.py", line 994, in add_dock_widget
dock_widget = QtViewerDockWidget(
File "/home/jordao/Softwares/napari/napari/_qt/widgets/qt_viewer_dock_widget.py", line 131, in __init__
widget = combine_widgets(widget, vertical=is_vertical)
File "/home/jordao/Softwares/napari/napari/_qt/utils.py", line 245, in combine_widgets
raise TypeError(
TypeError: "widget" must be a QWidget or a sequence of QWidgets
```
It works if I change to:
```python3
import napari
from magicgui import magicgui
viewer = napari.Viewer()
@magicgui(call_button='hi')
def _first_widget() -> None:
print('hi')
@magicgui(call_button='bye')
def _second_widget() -> None:
print('bye')
viewer.window.add_dock_widget([_first_widget.native, _second_widget.native])
napari.run()
```
## Environment
```
napari: 0.4.7
Platform: Linux-5.8.0-50-generic-x86_64-with-glibc2.10
System: Ubuntu 20.04.2 LTS
Python: 3.8.8 (default, Apr 13 2021, 19:58:26) [GCC 7.3.0]
Qt: 5.15.2
PyQt5: 5.15.4
NumPy: 1.20.3
SciPy: 1.6.3
Dask: 2021.05.0
VisPy: 0.6.6
OpenGL:
- GL version: 4.6 (Compatibility Profile) Mesa 20.2.6
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 3440x1440, scale 1.0
Plugins:
- console: 0.0.3
- napari-dexp: 0.1.dev2+g1c663ea.d20210517
- napari-ibacktracker: 0.1.dev1+gf589318.d20210517
- napari-segm-annotation: 0.1.dev1+g7fbf135.d20210518
- svg: 0.1.5
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_qt/utils.py`
Content:
```
1 from contextlib import contextmanager
2 from functools import lru_cache
3 from typing import Sequence, Union
4
5 import numpy as np
6 import qtpy
7 from qtpy.QtCore import QByteArray, QSize, Qt
8 from qtpy.QtGui import QCursor, QDrag, QImage, QPainter, QPixmap
9 from qtpy.QtWidgets import (
10 QApplication,
11 QGraphicsOpacityEffect,
12 QHBoxLayout,
13 QListWidget,
14 QVBoxLayout,
15 QWidget,
16 )
17
18 from ..utils.misc import is_sequence
19 from ..utils.translations import trans
20
21 QBYTE_FLAG = "!QBYTE_"
22
23
24 def is_qbyte(string: str) -> bool:
25 """Check if a string is a QByteArray string.
26
27 Parameters
28 ----------
29 string : bool
30 State string.
31 """
32 return isinstance(string, str) and string.startswith(QBYTE_FLAG)
33
34
35 def qbytearray_to_str(qbyte: QByteArray) -> str:
36 """Convert a window state to a string.
37
38 Used for restoring the state of the main window.
39
40 Parameters
41 ----------
42 qbyte : QByteArray
43 State array.
44 """
45 return QBYTE_FLAG + qbyte.toBase64().data().decode()
46
47
48 def str_to_qbytearray(string: str) -> QByteArray:
49 """Convert a string to a QbyteArray.
50
51 Used for restoring the state of the main window.
52
53 Parameters
54 ----------
55 string : str
56 State string.
57 """
58 if len(string) < len(QBYTE_FLAG) or not is_qbyte(string):
59 raise ValueError(
60 trans._(
61 "Invalid QByte string. QByte strings start with '{QBYTE_FLAG}'",
62 QBYTE_FLAG=QBYTE_FLAG,
63 )
64 )
65
66 return QByteArray.fromBase64(string[len(QBYTE_FLAG) :].encode())
67
68
69 def QImg2array(img):
70 """Convert QImage to an array.
71
72 Parameters
73 ----------
74 img : qtpy.QtGui.QImage
75 QImage to be converted.
76
77 Returns
78 -------
79 arr : array
80 Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
81 upper-left corner of the rendered region.
82 """
83 # Fix when image is provided in wrong format (ex. test on Azure pipelines)
84 if img.format() != QImage.Format_ARGB32:
85 img = img.convertToFormat(QImage.Format_ARGB32)
86 b = img.constBits()
87 h, w, c = img.height(), img.width(), 4
88
89 # As vispy doesn't use qtpy we need to reconcile the differences
90 # between the `QImage` API for `PySide2` and `PyQt5` on how to convert
91 # a QImage to a numpy array.
92 if qtpy.API_NAME == 'PySide2':
93 arr = np.array(b).reshape(h, w, c)
94 else:
95 b.setsize(h * w * c)
96 arr = np.frombuffer(b, np.uint8).reshape(h, w, c)
97
98 # Format of QImage is ARGB32_Premultiplied, but color channels are
99 # reversed.
100 arr = arr[:, :, [2, 1, 0, 3]]
101 return arr
102
103
104 @contextmanager
105 def qt_signals_blocked(obj):
106 """Context manager to temporarily block signals from `obj`"""
107 obj.blockSignals(True)
108 yield
109 obj.blockSignals(False)
110
111
112 @contextmanager
113 def event_hook_removed():
114 """Context manager to temporarily remove the PyQt5 input hook"""
115 from qtpy import QtCore
116
117 if hasattr(QtCore, 'pyqtRemoveInputHook'):
118 QtCore.pyqtRemoveInputHook()
119 try:
120 yield
121 finally:
122 if hasattr(QtCore, 'pyqtRestoreInputHook'):
123 QtCore.pyqtRestoreInputHook()
124
125
126 def disable_with_opacity(obj, widget_list, disabled):
127 """Set enabled state on a list of widgets. If disabled, decrease opacity"""
128 for wdg in widget_list:
129 widget = getattr(obj, wdg)
130 widget.setEnabled(obj.layer.editable)
131 op = QGraphicsOpacityEffect(obj)
132 op.setOpacity(1 if obj.layer.editable else 0.5)
133 widget.setGraphicsEffect(op)
134
135
136 @lru_cache(maxsize=64)
137 def square_pixmap(size):
138 """Create a white/black hollow square pixmap. For use as labels cursor."""
139 size = max(int(size), 1)
140 pixmap = QPixmap(QSize(size, size))
141 pixmap.fill(Qt.transparent)
142 painter = QPainter(pixmap)
143 painter.setPen(Qt.white)
144 painter.drawRect(0, 0, size - 1, size - 1)
145 painter.setPen(Qt.black)
146 painter.drawRect(1, 1, size - 3, size - 3)
147 painter.end()
148 return pixmap
149
150
151 @lru_cache(maxsize=64)
152 def circle_pixmap(size: int):
153 """Create a white/black hollow circle pixmap. For use as labels cursor."""
154 size = max(int(size), 1)
155 pixmap = QPixmap(QSize(size, size))
156 pixmap.fill(Qt.transparent)
157 painter = QPainter(pixmap)
158 painter.setPen(Qt.white)
159 painter.drawEllipse(0, 0, size - 1, size - 1)
160 painter.setPen(Qt.black)
161 painter.drawEllipse(1, 1, size - 3, size - 3)
162 painter.end()
163 return pixmap
164
165
166 def drag_with_pixmap(list_widget: QListWidget) -> QDrag:
167 """Create a QDrag object with a pixmap of the currently select list item.
168
169 This method is useful when you have a QListWidget that displays custom
170 widgets for each QListWidgetItem instance in the list (usually by calling
171 ``QListWidget.setItemWidget(item, widget)``). When used in a
172 ``QListWidget.startDrag`` method, this function creates a QDrag object that
173 shows an image of the item being dragged (rather than an empty rectangle).
174
175 Parameters
176 ----------
177 list_widget : QListWidget
178 The QListWidget for which to create a QDrag object.
179
180 Returns
181 -------
182 QDrag
183 A QDrag instance with a pixmap of the currently selected item.
184
185 Examples
186 --------
187 >>> class QListWidget:
188 ... def startDrag(self, supportedActions):
189 ... drag = drag_with_pixmap(self)
190 ... drag.exec_(supportedActions, Qt.MoveAction)
191
192 """
193 drag = QDrag(list_widget)
194 drag.setMimeData(list_widget.mimeData(list_widget.selectedItems()))
195 size = list_widget.viewport().visibleRegion().boundingRect().size()
196 pixmap = QPixmap(size)
197 pixmap.fill(Qt.transparent)
198 painter = QPainter(pixmap)
199 for index in list_widget.selectedIndexes():
200 rect = list_widget.visualRect(index)
201 painter.drawPixmap(rect, list_widget.viewport().grab(rect))
202 painter.end()
203 drag.setPixmap(pixmap)
204 drag.setHotSpot(list_widget.viewport().mapFromGlobal(QCursor.pos()))
205 return drag
206
207
208 def combine_widgets(
209 widgets: Union[QWidget, Sequence[QWidget]], vertical: bool = False
210 ) -> QWidget:
211 """Combine a list of widgets into a single QWidget with Layout.
212
213 Parameters
214 ----------
215 widgets : QWidget or sequence of QWidget
216 A widget or a list of widgets to combine.
217 vertical : bool, optional
218 Whether the layout should be QVBoxLayout or not, by default
219 QHBoxLayout is used
220
221 Returns
222 -------
223 QWidget
224 If ``widgets`` is a sequence, returns combined QWidget with `.layout`
225 property, otherwise returns the original widget.
226
227 Raises
228 ------
229 TypeError
230 If ``widgets`` is neither a ``QWidget`` or a sequence of ``QWidgets``.
231 """
232 if isinstance(getattr(widgets, 'native', None), QWidget):
233 # compatibility with magicgui v0.2.0 which no longer uses QWidgets
234 # directly. Like vispy, the backend widget is at widget.native
235 return widgets.native # type: ignore
236 elif isinstance(widgets, QWidget):
237 return widgets
238 elif is_sequence(widgets) and all(isinstance(i, QWidget) for i in widgets):
239 container = QWidget()
240 container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())
241 for widget in widgets:
242 container.layout().addWidget(widget)
243 return container
244 else:
245 raise TypeError(
246 trans._('"widget" must be a QWidget or a sequence of QWidgets')
247 )
248
249
250 def delete_qapp(app):
251 """Delete a QApplication
252
253 Parameters
254 ----------
255 app : qtpy.QApplication
256 """
257 try:
258 # Pyside2
259 from shiboken2 import delete
260 except ImportError:
261 # PyQt5
262 from sip import delete
263
264 delete(app)
265 # calling a second time is necessary on PySide2...
266 # see: https://bugreports.qt.io/browse/PYSIDE-1470
267 QApplication.instance()
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/napari/_qt/utils.py b/napari/_qt/utils.py
--- a/napari/_qt/utils.py
+++ b/napari/_qt/utils.py
@@ -235,16 +235,21 @@
return widgets.native # type: ignore
elif isinstance(widgets, QWidget):
return widgets
- elif is_sequence(widgets) and all(isinstance(i, QWidget) for i in widgets):
- container = QWidget()
- container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())
- for widget in widgets:
- container.layout().addWidget(widget)
- return container
- else:
- raise TypeError(
- trans._('"widget" must be a QWidget or a sequence of QWidgets')
- )
+ elif is_sequence(widgets):
+ # the same as above, compatibility with magicgui v0.2.0
+ widgets = [
+ i.native if isinstance(getattr(i, 'native', None), QWidget) else i
+ for i in widgets
+ ]
+ if all(isinstance(i, QWidget) for i in widgets):
+ container = QWidget()
+ container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())
+ for widget in widgets:
+ container.layout().addWidget(widget)
+ return container
+ raise TypeError(
+ trans._('"widget" must be a QWidget or a sequence of QWidgets')
+ )
def delete_qapp(app):
|
{"golden_diff": "diff --git a/napari/_qt/utils.py b/napari/_qt/utils.py\n--- a/napari/_qt/utils.py\n+++ b/napari/_qt/utils.py\n@@ -235,16 +235,21 @@\n return widgets.native # type: ignore\n elif isinstance(widgets, QWidget):\n return widgets\n- elif is_sequence(widgets) and all(isinstance(i, QWidget) for i in widgets):\n- container = QWidget()\n- container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())\n- for widget in widgets:\n- container.layout().addWidget(widget)\n- return container\n- else:\n- raise TypeError(\n- trans._('\"widget\" must be a QWidget or a sequence of QWidgets')\n- )\n+ elif is_sequence(widgets):\n+ # the same as above, compatibility with magicgui v0.2.0\n+ widgets = [\n+ i.native if isinstance(getattr(i, 'native', None), QWidget) else i\n+ for i in widgets\n+ ]\n+ if all(isinstance(i, QWidget) for i in widgets):\n+ container = QWidget()\n+ container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())\n+ for widget in widgets:\n+ container.layout().addWidget(widget)\n+ return container\n+ raise TypeError(\n+ trans._('\"widget\" must be a QWidget or a sequence of QWidgets')\n+ )\n \n \n def delete_qapp(app):\n", "issue": "`Viewer.window.add_dock_widget` does not support list of `FunctionGui` from `magicgui`\n## \ud83d\udc1b Bug\r\n\r\nThe `Viewer.window.add_dock_widget` expects a `Sequence` of `QWigets` or a single `FunctionGui`, it does not support a `Sequence` of `FunctionGui`, this possibly a bug from the changes of `magicgui v0.2`.\r\n\r\nThe programmer can circumvent this issue by sending the `FunctionGui.native` objects, but I'm not sure if this is the expected behavior.\r\n\r\n## To Reproduce\r\n\r\nExecute this code:\r\n\r\n```python3\r\nimport napari\r\nfrom magicgui import magicgui\r\n\r\nviewer = napari.Viewer()\r\n\r\n@magicgui(call_button='hi')\r\ndef _first_widget() -> None:\r\n print('hi')\r\n\r\n@magicgui(call_button='bye')\r\ndef _second_widget() -> None:\r\n print('bye')\r\n\r\nviewer.window.add_dock_widget([_first_widget, _second_widget]) \r\nnapari.run()\r\n```\r\n\r\nOutputting:\r\n```python3\r\nTraceback (most recent call last):\r\n File \"error.py\", line 14, in <module>\r\n viewer.window.add_dock_widget([_first_widget, _second_widget])\r\n File \"/home/jordao/Softwares/napari/napari/_qt/qt_main_window.py\", line 994, in add_dock_widget\r\n dock_widget = QtViewerDockWidget(\r\n File \"/home/jordao/Softwares/napari/napari/_qt/widgets/qt_viewer_dock_widget.py\", line 131, in __init__\r\n widget = combine_widgets(widget, vertical=is_vertical)\r\n File \"/home/jordao/Softwares/napari/napari/_qt/utils.py\", line 245, in combine_widgets\r\n raise TypeError(\r\nTypeError: \"widget\" must be a QWidget or a sequence of QWidgets\r\n```\r\n\r\nIt works if I change to:\r\n\r\n```python3\r\nimport napari\r\nfrom magicgui import magicgui\r\n\r\nviewer = napari.Viewer()\r\n\r\n@magicgui(call_button='hi')\r\ndef _first_widget() -> None:\r\n print('hi')\r\n\r\n@magicgui(call_button='bye')\r\ndef _second_widget() -> None:\r\n print('bye')\r\n\r\nviewer.window.add_dock_widget([_first_widget.native, _second_widget.native]) \r\nnapari.run()\r\n```\r\n\r\n## Environment\r\n\r\n```\r\nnapari: 0.4.7\r\nPlatform: Linux-5.8.0-50-generic-x86_64-with-glibc2.10\r\nSystem: Ubuntu 20.04.2 LTS\r\nPython: 3.8.8 (default, Apr 13 2021, 19:58:26) [GCC 7.3.0]\r\nQt: 5.15.2\r\nPyQt5: 5.15.4\r\nNumPy: 1.20.3\r\nSciPy: 1.6.3\r\nDask: 2021.05.0\r\nVisPy: 0.6.6\r\n\r\nOpenGL:\r\n- GL version: 4.6 (Compatibility Profile) Mesa 20.2.6\r\n- MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n- screen 1: resolution 3440x1440, scale 1.0\r\n\r\nPlugins:\r\n- console: 0.0.3\r\n- napari-dexp: 0.1.dev2+g1c663ea.d20210517\r\n- napari-ibacktracker: 0.1.dev1+gf589318.d20210517\r\n- napari-segm-annotation: 0.1.dev1+g7fbf135.d20210518\r\n- svg: 0.1.5\r\n```\r\n\n", "before_files": [{"content": "from contextlib import contextmanager\nfrom functools import lru_cache\nfrom typing import Sequence, Union\n\nimport numpy as np\nimport qtpy\nfrom qtpy.QtCore import QByteArray, QSize, Qt\nfrom qtpy.QtGui import QCursor, QDrag, QImage, QPainter, QPixmap\nfrom qtpy.QtWidgets import (\n QApplication,\n QGraphicsOpacityEffect,\n QHBoxLayout,\n QListWidget,\n QVBoxLayout,\n QWidget,\n)\n\nfrom ..utils.misc import is_sequence\nfrom ..utils.translations import trans\n\nQBYTE_FLAG = \"!QBYTE_\"\n\n\ndef is_qbyte(string: str) -> bool:\n \"\"\"Check if a string is a QByteArray string.\n\n Parameters\n ----------\n string : bool\n State string.\n \"\"\"\n return isinstance(string, str) and string.startswith(QBYTE_FLAG)\n\n\ndef qbytearray_to_str(qbyte: QByteArray) -> str:\n \"\"\"Convert a window state to a string.\n\n Used for restoring the state of the main window.\n\n Parameters\n ----------\n qbyte : QByteArray\n State array.\n \"\"\"\n return QBYTE_FLAG + qbyte.toBase64().data().decode()\n\n\ndef str_to_qbytearray(string: str) -> QByteArray:\n \"\"\"Convert a string to a QbyteArray.\n\n Used for restoring the state of the main window.\n\n Parameters\n ----------\n string : str\n State string.\n \"\"\"\n if len(string) < len(QBYTE_FLAG) or not is_qbyte(string):\n raise ValueError(\n trans._(\n \"Invalid QByte string. QByte strings start with '{QBYTE_FLAG}'\",\n QBYTE_FLAG=QBYTE_FLAG,\n )\n )\n\n return QByteArray.fromBase64(string[len(QBYTE_FLAG) :].encode())\n\n\ndef QImg2array(img):\n \"\"\"Convert QImage to an array.\n\n Parameters\n ----------\n img : qtpy.QtGui.QImage\n QImage to be converted.\n\n Returns\n -------\n arr : array\n Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the\n upper-left corner of the rendered region.\n \"\"\"\n # Fix when image is provided in wrong format (ex. test on Azure pipelines)\n if img.format() != QImage.Format_ARGB32:\n img = img.convertToFormat(QImage.Format_ARGB32)\n b = img.constBits()\n h, w, c = img.height(), img.width(), 4\n\n # As vispy doesn't use qtpy we need to reconcile the differences\n # between the `QImage` API for `PySide2` and `PyQt5` on how to convert\n # a QImage to a numpy array.\n if qtpy.API_NAME == 'PySide2':\n arr = np.array(b).reshape(h, w, c)\n else:\n b.setsize(h * w * c)\n arr = np.frombuffer(b, np.uint8).reshape(h, w, c)\n\n # Format of QImage is ARGB32_Premultiplied, but color channels are\n # reversed.\n arr = arr[:, :, [2, 1, 0, 3]]\n return arr\n\n\n@contextmanager\ndef qt_signals_blocked(obj):\n \"\"\"Context manager to temporarily block signals from `obj`\"\"\"\n obj.blockSignals(True)\n yield\n obj.blockSignals(False)\n\n\n@contextmanager\ndef event_hook_removed():\n \"\"\"Context manager to temporarily remove the PyQt5 input hook\"\"\"\n from qtpy import QtCore\n\n if hasattr(QtCore, 'pyqtRemoveInputHook'):\n QtCore.pyqtRemoveInputHook()\n try:\n yield\n finally:\n if hasattr(QtCore, 'pyqtRestoreInputHook'):\n QtCore.pyqtRestoreInputHook()\n\n\ndef disable_with_opacity(obj, widget_list, disabled):\n \"\"\"Set enabled state on a list of widgets. If disabled, decrease opacity\"\"\"\n for wdg in widget_list:\n widget = getattr(obj, wdg)\n widget.setEnabled(obj.layer.editable)\n op = QGraphicsOpacityEffect(obj)\n op.setOpacity(1 if obj.layer.editable else 0.5)\n widget.setGraphicsEffect(op)\n\n\n@lru_cache(maxsize=64)\ndef square_pixmap(size):\n \"\"\"Create a white/black hollow square pixmap. For use as labels cursor.\"\"\"\n size = max(int(size), 1)\n pixmap = QPixmap(QSize(size, size))\n pixmap.fill(Qt.transparent)\n painter = QPainter(pixmap)\n painter.setPen(Qt.white)\n painter.drawRect(0, 0, size - 1, size - 1)\n painter.setPen(Qt.black)\n painter.drawRect(1, 1, size - 3, size - 3)\n painter.end()\n return pixmap\n\n\n@lru_cache(maxsize=64)\ndef circle_pixmap(size: int):\n \"\"\"Create a white/black hollow circle pixmap. For use as labels cursor.\"\"\"\n size = max(int(size), 1)\n pixmap = QPixmap(QSize(size, size))\n pixmap.fill(Qt.transparent)\n painter = QPainter(pixmap)\n painter.setPen(Qt.white)\n painter.drawEllipse(0, 0, size - 1, size - 1)\n painter.setPen(Qt.black)\n painter.drawEllipse(1, 1, size - 3, size - 3)\n painter.end()\n return pixmap\n\n\ndef drag_with_pixmap(list_widget: QListWidget) -> QDrag:\n \"\"\"Create a QDrag object with a pixmap of the currently select list item.\n\n This method is useful when you have a QListWidget that displays custom\n widgets for each QListWidgetItem instance in the list (usually by calling\n ``QListWidget.setItemWidget(item, widget)``). When used in a\n ``QListWidget.startDrag`` method, this function creates a QDrag object that\n shows an image of the item being dragged (rather than an empty rectangle).\n\n Parameters\n ----------\n list_widget : QListWidget\n The QListWidget for which to create a QDrag object.\n\n Returns\n -------\n QDrag\n A QDrag instance with a pixmap of the currently selected item.\n\n Examples\n --------\n >>> class QListWidget:\n ... def startDrag(self, supportedActions):\n ... drag = drag_with_pixmap(self)\n ... drag.exec_(supportedActions, Qt.MoveAction)\n\n \"\"\"\n drag = QDrag(list_widget)\n drag.setMimeData(list_widget.mimeData(list_widget.selectedItems()))\n size = list_widget.viewport().visibleRegion().boundingRect().size()\n pixmap = QPixmap(size)\n pixmap.fill(Qt.transparent)\n painter = QPainter(pixmap)\n for index in list_widget.selectedIndexes():\n rect = list_widget.visualRect(index)\n painter.drawPixmap(rect, list_widget.viewport().grab(rect))\n painter.end()\n drag.setPixmap(pixmap)\n drag.setHotSpot(list_widget.viewport().mapFromGlobal(QCursor.pos()))\n return drag\n\n\ndef combine_widgets(\n widgets: Union[QWidget, Sequence[QWidget]], vertical: bool = False\n) -> QWidget:\n \"\"\"Combine a list of widgets into a single QWidget with Layout.\n\n Parameters\n ----------\n widgets : QWidget or sequence of QWidget\n A widget or a list of widgets to combine.\n vertical : bool, optional\n Whether the layout should be QVBoxLayout or not, by default\n QHBoxLayout is used\n\n Returns\n -------\n QWidget\n If ``widgets`` is a sequence, returns combined QWidget with `.layout`\n property, otherwise returns the original widget.\n\n Raises\n ------\n TypeError\n If ``widgets`` is neither a ``QWidget`` or a sequence of ``QWidgets``.\n \"\"\"\n if isinstance(getattr(widgets, 'native', None), QWidget):\n # compatibility with magicgui v0.2.0 which no longer uses QWidgets\n # directly. Like vispy, the backend widget is at widget.native\n return widgets.native # type: ignore\n elif isinstance(widgets, QWidget):\n return widgets\n elif is_sequence(widgets) and all(isinstance(i, QWidget) for i in widgets):\n container = QWidget()\n container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())\n for widget in widgets:\n container.layout().addWidget(widget)\n return container\n else:\n raise TypeError(\n trans._('\"widget\" must be a QWidget or a sequence of QWidgets')\n )\n\n\ndef delete_qapp(app):\n \"\"\"Delete a QApplication\n\n Parameters\n ----------\n app : qtpy.QApplication\n \"\"\"\n try:\n # Pyside2\n from shiboken2 import delete\n except ImportError:\n # PyQt5\n from sip import delete\n\n delete(app)\n # calling a second time is necessary on PySide2...\n # see: https://bugreports.qt.io/browse/PYSIDE-1470\n QApplication.instance()\n", "path": "napari/_qt/utils.py"}], "after_files": [{"content": "from contextlib import contextmanager\nfrom functools import lru_cache\nfrom typing import Sequence, Union\n\nimport numpy as np\nimport qtpy\nfrom qtpy.QtCore import QByteArray, QSize, Qt\nfrom qtpy.QtGui import QCursor, QDrag, QImage, QPainter, QPixmap\nfrom qtpy.QtWidgets import (\n QApplication,\n QGraphicsOpacityEffect,\n QHBoxLayout,\n QListWidget,\n QVBoxLayout,\n QWidget,\n)\n\nfrom ..utils.misc import is_sequence\nfrom ..utils.translations import trans\n\nQBYTE_FLAG = \"!QBYTE_\"\n\n\ndef is_qbyte(string: str) -> bool:\n \"\"\"Check if a string is a QByteArray string.\n\n Parameters\n ----------\n string : bool\n State string.\n \"\"\"\n return isinstance(string, str) and string.startswith(QBYTE_FLAG)\n\n\ndef qbytearray_to_str(qbyte: QByteArray) -> str:\n \"\"\"Convert a window state to a string.\n\n Used for restoring the state of the main window.\n\n Parameters\n ----------\n qbyte : QByteArray\n State array.\n \"\"\"\n return QBYTE_FLAG + qbyte.toBase64().data().decode()\n\n\ndef str_to_qbytearray(string: str) -> QByteArray:\n \"\"\"Convert a string to a QbyteArray.\n\n Used for restoring the state of the main window.\n\n Parameters\n ----------\n string : str\n State string.\n \"\"\"\n if len(string) < len(QBYTE_FLAG) or not is_qbyte(string):\n raise ValueError(\n trans._(\n \"Invalid QByte string. QByte strings start with '{QBYTE_FLAG}'\",\n QBYTE_FLAG=QBYTE_FLAG,\n )\n )\n\n return QByteArray.fromBase64(string[len(QBYTE_FLAG) :].encode())\n\n\ndef QImg2array(img):\n \"\"\"Convert QImage to an array.\n\n Parameters\n ----------\n img : qtpy.QtGui.QImage\n QImage to be converted.\n\n Returns\n -------\n arr : array\n Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the\n upper-left corner of the rendered region.\n \"\"\"\n # Fix when image is provided in wrong format (ex. test on Azure pipelines)\n if img.format() != QImage.Format_ARGB32:\n img = img.convertToFormat(QImage.Format_ARGB32)\n b = img.constBits()\n h, w, c = img.height(), img.width(), 4\n\n # As vispy doesn't use qtpy we need to reconcile the differences\n # between the `QImage` API for `PySide2` and `PyQt5` on how to convert\n # a QImage to a numpy array.\n if qtpy.API_NAME == 'PySide2':\n arr = np.array(b).reshape(h, w, c)\n else:\n b.setsize(h * w * c)\n arr = np.frombuffer(b, np.uint8).reshape(h, w, c)\n\n # Format of QImage is ARGB32_Premultiplied, but color channels are\n # reversed.\n arr = arr[:, :, [2, 1, 0, 3]]\n return arr\n\n\n@contextmanager\ndef qt_signals_blocked(obj):\n \"\"\"Context manager to temporarily block signals from `obj`\"\"\"\n obj.blockSignals(True)\n yield\n obj.blockSignals(False)\n\n\n@contextmanager\ndef event_hook_removed():\n \"\"\"Context manager to temporarily remove the PyQt5 input hook\"\"\"\n from qtpy import QtCore\n\n if hasattr(QtCore, 'pyqtRemoveInputHook'):\n QtCore.pyqtRemoveInputHook()\n try:\n yield\n finally:\n if hasattr(QtCore, 'pyqtRestoreInputHook'):\n QtCore.pyqtRestoreInputHook()\n\n\ndef disable_with_opacity(obj, widget_list, disabled):\n \"\"\"Set enabled state on a list of widgets. If disabled, decrease opacity\"\"\"\n for wdg in widget_list:\n widget = getattr(obj, wdg)\n widget.setEnabled(obj.layer.editable)\n op = QGraphicsOpacityEffect(obj)\n op.setOpacity(1 if obj.layer.editable else 0.5)\n widget.setGraphicsEffect(op)\n\n\n@lru_cache(maxsize=64)\ndef square_pixmap(size):\n \"\"\"Create a white/black hollow square pixmap. For use as labels cursor.\"\"\"\n size = max(int(size), 1)\n pixmap = QPixmap(QSize(size, size))\n pixmap.fill(Qt.transparent)\n painter = QPainter(pixmap)\n painter.setPen(Qt.white)\n painter.drawRect(0, 0, size - 1, size - 1)\n painter.setPen(Qt.black)\n painter.drawRect(1, 1, size - 3, size - 3)\n painter.end()\n return pixmap\n\n\n@lru_cache(maxsize=64)\ndef circle_pixmap(size: int):\n \"\"\"Create a white/black hollow circle pixmap. For use as labels cursor.\"\"\"\n size = max(int(size), 1)\n pixmap = QPixmap(QSize(size, size))\n pixmap.fill(Qt.transparent)\n painter = QPainter(pixmap)\n painter.setPen(Qt.white)\n painter.drawEllipse(0, 0, size - 1, size - 1)\n painter.setPen(Qt.black)\n painter.drawEllipse(1, 1, size - 3, size - 3)\n painter.end()\n return pixmap\n\n\ndef drag_with_pixmap(list_widget: QListWidget) -> QDrag:\n \"\"\"Create a QDrag object with a pixmap of the currently select list item.\n\n This method is useful when you have a QListWidget that displays custom\n widgets for each QListWidgetItem instance in the list (usually by calling\n ``QListWidget.setItemWidget(item, widget)``). When used in a\n ``QListWidget.startDrag`` method, this function creates a QDrag object that\n shows an image of the item being dragged (rather than an empty rectangle).\n\n Parameters\n ----------\n list_widget : QListWidget\n The QListWidget for which to create a QDrag object.\n\n Returns\n -------\n QDrag\n A QDrag instance with a pixmap of the currently selected item.\n\n Examples\n --------\n >>> class QListWidget:\n ... def startDrag(self, supportedActions):\n ... drag = drag_with_pixmap(self)\n ... drag.exec_(supportedActions, Qt.MoveAction)\n\n \"\"\"\n drag = QDrag(list_widget)\n drag.setMimeData(list_widget.mimeData(list_widget.selectedItems()))\n size = list_widget.viewport().visibleRegion().boundingRect().size()\n pixmap = QPixmap(size)\n pixmap.fill(Qt.transparent)\n painter = QPainter(pixmap)\n for index in list_widget.selectedIndexes():\n rect = list_widget.visualRect(index)\n painter.drawPixmap(rect, list_widget.viewport().grab(rect))\n painter.end()\n drag.setPixmap(pixmap)\n drag.setHotSpot(list_widget.viewport().mapFromGlobal(QCursor.pos()))\n return drag\n\n\ndef combine_widgets(\n widgets: Union[QWidget, Sequence[QWidget]], vertical: bool = False\n) -> QWidget:\n \"\"\"Combine a list of widgets into a single QWidget with Layout.\n\n Parameters\n ----------\n widgets : QWidget or sequence of QWidget\n A widget or a list of widgets to combine.\n vertical : bool, optional\n Whether the layout should be QVBoxLayout or not, by default\n QHBoxLayout is used\n\n Returns\n -------\n QWidget\n If ``widgets`` is a sequence, returns combined QWidget with `.layout`\n property, otherwise returns the original widget.\n\n Raises\n ------\n TypeError\n If ``widgets`` is neither a ``QWidget`` or a sequence of ``QWidgets``.\n \"\"\"\n if isinstance(getattr(widgets, 'native', None), QWidget):\n # compatibility with magicgui v0.2.0 which no longer uses QWidgets\n # directly. Like vispy, the backend widget is at widget.native\n return widgets.native # type: ignore\n elif isinstance(widgets, QWidget):\n return widgets\n elif is_sequence(widgets):\n # the same as above, compatibility with magicgui v0.2.0\n widgets = [\n i.native if isinstance(getattr(i, 'native', None), QWidget) else i\n for i in widgets\n ]\n if all(isinstance(i, QWidget) for i in widgets):\n container = QWidget()\n container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())\n for widget in widgets:\n container.layout().addWidget(widget)\n return container\n raise TypeError(\n trans._('\"widget\" must be a QWidget or a sequence of QWidgets')\n )\n\n\ndef delete_qapp(app):\n \"\"\"Delete a QApplication\n\n Parameters\n ----------\n app : qtpy.QApplication\n \"\"\"\n try:\n # Pyside2\n from shiboken2 import delete\n except ImportError:\n # PyQt5\n from sip import delete\n\n delete(app)\n # calling a second time is necessary on PySide2...\n # see: https://bugreports.qt.io/browse/PYSIDE-1470\n QApplication.instance()\n", "path": "napari/_qt/utils.py"}]}
| 3,713 | 313 |
gh_patches_debug_14971
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1240
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typos in gRPC instrumentation example codes
**Describe your environment**
* Platform: any
* Python version: any
* Library version: main HEAD as of Aug 24th
**Steps to reproduce**
See [documents for gRPC instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/grpc/grpc.html)
**What is the expected behavior?**
For client instrumentation example, I would see either of the followings:
1. `GrpcInstrumentorClient().instrument()` is called without return value assignment.
2. Get the return value of `GrpcInstrumentorClient()` and call `instrument()` method of it.
**What is the actual behavior?**
I see the following example codes.
In the [Usage Client](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/grpc/grpc.html#usage-client) section:
```python
instrumentor = GrpcInstrumentorClient().instrument()
```
And in the [GrpcInstrumentorClient class document](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/grpc/grpc.html#usage-client):
```python
grpc_client_instrumentor = GrpcInstrumentorClient()
grpc.client_instrumentor.instrument()
```
where 2nd line is calling non existing package. It should be typo of the variable defined in 1st line.
**Additional context**
I was trying implement the code to fix #373 and found this issue, so I created separate issue here. I'll create a PR for this shortly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint:disable=no-name-in-module
16 # pylint:disable=relative-beyond-top-level
17 # pylint:disable=import-error
18 # pylint:disable=no-self-use
19 """
20 Usage Client
21 ------------
22 .. code-block:: python
23
24 import logging
25
26 import grpc
27
28 from opentelemetry import trace
29 from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
30 from opentelemetry.sdk.trace import TracerProvider
31 from opentelemetry.sdk.trace.export import (
32 ConsoleSpanExporter,
33 SimpleSpanProcessor,
34 )
35
36 try:
37 from .gen import helloworld_pb2, helloworld_pb2_grpc
38 except ImportError:
39 from gen import helloworld_pb2, helloworld_pb2_grpc
40
41 trace.set_tracer_provider(TracerProvider())
42 trace.get_tracer_provider().add_span_processor(
43 SimpleSpanProcessor(ConsoleSpanExporter())
44 )
45
46 instrumentor = GrpcInstrumentorClient().instrument()
47
48 def run():
49 with grpc.insecure_channel("localhost:50051") as channel:
50
51 stub = helloworld_pb2_grpc.GreeterStub(channel)
52 response = stub.SayHello(helloworld_pb2.HelloRequest(name="YOU"))
53
54 print("Greeter client received: " + response.message)
55
56
57 if __name__ == "__main__":
58 logging.basicConfig()
59 run()
60
61 Usage Server
62 ------------
63 .. code-block:: python
64
65 import logging
66 from concurrent import futures
67
68 import grpc
69
70 from opentelemetry import trace
71 from opentelemetry.instrumentation.grpc import GrpcInstrumentorServer
72 from opentelemetry.sdk.trace import TracerProvider
73 from opentelemetry.sdk.trace.export import (
74 ConsoleSpanExporter,
75 SimpleSpanProcessor,
76 )
77
78 try:
79 from .gen import helloworld_pb2, helloworld_pb2_grpc
80 except ImportError:
81 from gen import helloworld_pb2, helloworld_pb2_grpc
82
83 trace.set_tracer_provider(TracerProvider())
84 trace.get_tracer_provider().add_span_processor(
85 SimpleSpanProcessor(ConsoleSpanExporter())
86 )
87
88 grpc_server_instrumentor = GrpcInstrumentorServer()
89 grpc_server_instrumentor.instrument()
90
91 class Greeter(helloworld_pb2_grpc.GreeterServicer):
92 def SayHello(self, request, context):
93 return helloworld_pb2.HelloReply(message="Hello, %s!" % request.name)
94
95
96 def serve():
97
98 server = grpc.server(futures.ThreadPoolExecutor())
99
100 helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
101 server.add_insecure_port("[::]:50051")
102 server.start()
103 server.wait_for_termination()
104
105
106 if __name__ == "__main__":
107 logging.basicConfig()
108 serve()
109
110 You can also add the instrumentor manually, rather than using
111 :py:class:`~opentelemetry.instrumentation.grpc.GrpcInstrumentorServer`:
112
113 .. code-block:: python
114
115 from opentelemetry.instrumentation.grpc import server_interceptor
116
117 server = grpc.server(futures.ThreadPoolExecutor(),
118 interceptors = [server_interceptor()])
119
120 """
121 from typing import Collection
122
123 import grpc # pylint:disable=import-self
124 from wrapt import wrap_function_wrapper as _wrap
125
126 from opentelemetry import trace
127 from opentelemetry.instrumentation.grpc.grpcext import intercept_channel
128 from opentelemetry.instrumentation.grpc.package import _instruments
129 from opentelemetry.instrumentation.grpc.version import __version__
130 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
131 from opentelemetry.instrumentation.utils import unwrap
132
133 # pylint:disable=import-outside-toplevel
134 # pylint:disable=import-self
135 # pylint:disable=unused-argument
136
137
138 class GrpcInstrumentorServer(BaseInstrumentor):
139 """
140 Globally instrument the grpc server.
141
142 Usage::
143
144 grpc_server_instrumentor = GrpcInstrumentorServer()
145 grpc_server_instrumentor.instrument()
146
147 """
148
149 # pylint:disable=attribute-defined-outside-init, redefined-outer-name
150
151 def instrumentation_dependencies(self) -> Collection[str]:
152 return _instruments
153
154 def _instrument(self, **kwargs):
155 self._original_func = grpc.server
156 tracer_provider = kwargs.get("tracer_provider")
157
158 def server(*args, **kwargs):
159 if "interceptors" in kwargs:
160 # add our interceptor as the first
161 kwargs["interceptors"].insert(
162 0, server_interceptor(tracer_provider=tracer_provider)
163 )
164 else:
165 kwargs["interceptors"] = [
166 server_interceptor(tracer_provider=tracer_provider)
167 ]
168 return self._original_func(*args, **kwargs)
169
170 grpc.server = server
171
172 def _uninstrument(self, **kwargs):
173 grpc.server = self._original_func
174
175
176 class GrpcInstrumentorClient(BaseInstrumentor):
177 """
178 Globally instrument the grpc client
179
180 Usage::
181
182 grpc_client_instrumentor = GrpcInstrumentorClient()
183 grpc.client_instrumentor.instrument()
184
185 """
186
187 # Figures out which channel type we need to wrap
188 def _which_channel(self, kwargs):
189 # handle legacy argument
190 if "channel_type" in kwargs:
191 if kwargs.get("channel_type") == "secure":
192 return ("secure_channel",)
193 return ("insecure_channel",)
194
195 # handle modern arguments
196 types = []
197 for ctype in ("secure_channel", "insecure_channel"):
198 if kwargs.get(ctype, True):
199 types.append(ctype)
200
201 return tuple(types)
202
203 def instrumentation_dependencies(self) -> Collection[str]:
204 return _instruments
205
206 def _instrument(self, **kwargs):
207 for ctype in self._which_channel(kwargs):
208 _wrap(
209 "grpc",
210 ctype,
211 self.wrapper_fn,
212 )
213
214 def _uninstrument(self, **kwargs):
215 for ctype in self._which_channel(kwargs):
216 unwrap(grpc, ctype)
217
218 def wrapper_fn(self, original_func, instance, args, kwargs):
219 channel = original_func(*args, **kwargs)
220 tracer_provider = kwargs.get("tracer_provider")
221 return intercept_channel(
222 channel,
223 client_interceptor(tracer_provider=tracer_provider),
224 )
225
226
227 def client_interceptor(tracer_provider=None):
228 """Create a gRPC client channel interceptor.
229
230 Args:
231 tracer: The tracer to use to create client-side spans.
232
233 Returns:
234 An invocation-side interceptor object.
235 """
236 from . import _client
237
238 tracer = trace.get_tracer(__name__, __version__, tracer_provider)
239
240 return _client.OpenTelemetryClientInterceptor(tracer)
241
242
243 def server_interceptor(tracer_provider=None):
244 """Create a gRPC server interceptor.
245
246 Args:
247 tracer: The tracer to use to create server-side spans.
248
249 Returns:
250 A service-side interceptor object.
251 """
252 from . import _server
253
254 tracer = trace.get_tracer(__name__, __version__, tracer_provider)
255
256 return _server.OpenTelemetryServerInterceptor(tracer)
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py
@@ -43,7 +43,8 @@
SimpleSpanProcessor(ConsoleSpanExporter())
)
- instrumentor = GrpcInstrumentorClient().instrument()
+ grpc_client_instrumentor = GrpcInstrumentorClient()
+ grpc_client_instrumentor.instrument()
def run():
with grpc.insecure_channel("localhost:50051") as channel:
@@ -180,7 +181,7 @@
Usage::
grpc_client_instrumentor = GrpcInstrumentorClient()
- grpc.client_instrumentor.instrument()
+ grpc_client_instrumentor.instrument()
"""
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py\n@@ -43,7 +43,8 @@\n SimpleSpanProcessor(ConsoleSpanExporter())\n )\n \n- instrumentor = GrpcInstrumentorClient().instrument()\n+ grpc_client_instrumentor = GrpcInstrumentorClient()\n+ grpc_client_instrumentor.instrument()\n \n def run():\n with grpc.insecure_channel(\"localhost:50051\") as channel:\n@@ -180,7 +181,7 @@\n Usage::\n \n grpc_client_instrumentor = GrpcInstrumentorClient()\n- grpc.client_instrumentor.instrument()\n+ grpc_client_instrumentor.instrument()\n \n \"\"\"\n", "issue": "Typos in gRPC instrumentation example codes\n**Describe your environment**\r\n\r\n* Platform: any\r\n* Python version: any\r\n* Library version: main HEAD as of Aug 24th\r\n\r\n**Steps to reproduce**\r\nSee [documents for gRPC instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/grpc/grpc.html)\r\n\r\n**What is the expected behavior?**\r\nFor client instrumentation example, I would see either of the followings:\r\n\r\n1. `GrpcInstrumentorClient().instrument()` is called without return value assignment.\r\n2. Get the return value of `GrpcInstrumentorClient()` and call `instrument()` method of it.\r\n\r\n**What is the actual behavior?**\r\n\r\nI see the following example codes.\r\n\r\nIn the [Usage Client](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/grpc/grpc.html#usage-client) section:\r\n\r\n```python\r\ninstrumentor = GrpcInstrumentorClient().instrument()\r\n```\r\n\r\nAnd in the [GrpcInstrumentorClient class document](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/grpc/grpc.html#usage-client):\r\n\r\n```python\r\ngrpc_client_instrumentor = GrpcInstrumentorClient()\r\ngrpc.client_instrumentor.instrument()\r\n```\r\n\r\nwhere 2nd line is calling non existing package. It should be typo of the variable defined in 1st line.\r\n\r\n**Additional context**\r\n\r\nI was trying implement the code to fix #373 and found this issue, so I created separate issue here. I'll create a PR for this shortly.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint:disable=no-name-in-module\n# pylint:disable=relative-beyond-top-level\n# pylint:disable=import-error\n# pylint:disable=no-self-use\n\"\"\"\nUsage Client\n------------\n.. code-block:: python\n\n import logging\n\n import grpc\n\n from opentelemetry import trace\n from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n )\n\n try:\n from .gen import helloworld_pb2, helloworld_pb2_grpc\n except ImportError:\n from gen import helloworld_pb2, helloworld_pb2_grpc\n\n trace.set_tracer_provider(TracerProvider())\n trace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n )\n\n instrumentor = GrpcInstrumentorClient().instrument()\n\n def run():\n with grpc.insecure_channel(\"localhost:50051\") as channel:\n\n stub = helloworld_pb2_grpc.GreeterStub(channel)\n response = stub.SayHello(helloworld_pb2.HelloRequest(name=\"YOU\"))\n\n print(\"Greeter client received: \" + response.message)\n\n\n if __name__ == \"__main__\":\n logging.basicConfig()\n run()\n\nUsage Server\n------------\n.. code-block:: python\n\n import logging\n from concurrent import futures\n\n import grpc\n\n from opentelemetry import trace\n from opentelemetry.instrumentation.grpc import GrpcInstrumentorServer\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n )\n\n try:\n from .gen import helloworld_pb2, helloworld_pb2_grpc\n except ImportError:\n from gen import helloworld_pb2, helloworld_pb2_grpc\n\n trace.set_tracer_provider(TracerProvider())\n trace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n )\n\n grpc_server_instrumentor = GrpcInstrumentorServer()\n grpc_server_instrumentor.instrument()\n\n class Greeter(helloworld_pb2_grpc.GreeterServicer):\n def SayHello(self, request, context):\n return helloworld_pb2.HelloReply(message=\"Hello, %s!\" % request.name)\n\n\n def serve():\n\n server = grpc.server(futures.ThreadPoolExecutor())\n\n helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)\n server.add_insecure_port(\"[::]:50051\")\n server.start()\n server.wait_for_termination()\n\n\n if __name__ == \"__main__\":\n logging.basicConfig()\n serve()\n\nYou can also add the instrumentor manually, rather than using\n:py:class:`~opentelemetry.instrumentation.grpc.GrpcInstrumentorServer`:\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.grpc import server_interceptor\n\n server = grpc.server(futures.ThreadPoolExecutor(),\n interceptors = [server_interceptor()])\n\n\"\"\"\nfrom typing import Collection\n\nimport grpc # pylint:disable=import-self\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.grpc.grpcext import intercept_channel\nfrom opentelemetry.instrumentation.grpc.package import _instruments\nfrom opentelemetry.instrumentation.grpc.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\n\n# pylint:disable=import-outside-toplevel\n# pylint:disable=import-self\n# pylint:disable=unused-argument\n\n\nclass GrpcInstrumentorServer(BaseInstrumentor):\n \"\"\"\n Globally instrument the grpc server.\n\n Usage::\n\n grpc_server_instrumentor = GrpcInstrumentorServer()\n grpc_server_instrumentor.instrument()\n\n \"\"\"\n\n # pylint:disable=attribute-defined-outside-init, redefined-outer-name\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_func = grpc.server\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n def server(*args, **kwargs):\n if \"interceptors\" in kwargs:\n # add our interceptor as the first\n kwargs[\"interceptors\"].insert(\n 0, server_interceptor(tracer_provider=tracer_provider)\n )\n else:\n kwargs[\"interceptors\"] = [\n server_interceptor(tracer_provider=tracer_provider)\n ]\n return self._original_func(*args, **kwargs)\n\n grpc.server = server\n\n def _uninstrument(self, **kwargs):\n grpc.server = self._original_func\n\n\nclass GrpcInstrumentorClient(BaseInstrumentor):\n \"\"\"\n Globally instrument the grpc client\n\n Usage::\n\n grpc_client_instrumentor = GrpcInstrumentorClient()\n grpc.client_instrumentor.instrument()\n\n \"\"\"\n\n # Figures out which channel type we need to wrap\n def _which_channel(self, kwargs):\n # handle legacy argument\n if \"channel_type\" in kwargs:\n if kwargs.get(\"channel_type\") == \"secure\":\n return (\"secure_channel\",)\n return (\"insecure_channel\",)\n\n # handle modern arguments\n types = []\n for ctype in (\"secure_channel\", \"insecure_channel\"):\n if kwargs.get(ctype, True):\n types.append(ctype)\n\n return tuple(types)\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n for ctype in self._which_channel(kwargs):\n _wrap(\n \"grpc\",\n ctype,\n self.wrapper_fn,\n )\n\n def _uninstrument(self, **kwargs):\n for ctype in self._which_channel(kwargs):\n unwrap(grpc, ctype)\n\n def wrapper_fn(self, original_func, instance, args, kwargs):\n channel = original_func(*args, **kwargs)\n tracer_provider = kwargs.get(\"tracer_provider\")\n return intercept_channel(\n channel,\n client_interceptor(tracer_provider=tracer_provider),\n )\n\n\ndef client_interceptor(tracer_provider=None):\n \"\"\"Create a gRPC client channel interceptor.\n\n Args:\n tracer: The tracer to use to create client-side spans.\n\n Returns:\n An invocation-side interceptor object.\n \"\"\"\n from . import _client\n\n tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n return _client.OpenTelemetryClientInterceptor(tracer)\n\n\ndef server_interceptor(tracer_provider=None):\n \"\"\"Create a gRPC server interceptor.\n\n Args:\n tracer: The tracer to use to create server-side spans.\n\n Returns:\n A service-side interceptor object.\n \"\"\"\n from . import _server\n\n tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n return _server.OpenTelemetryServerInterceptor(tracer)\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint:disable=no-name-in-module\n# pylint:disable=relative-beyond-top-level\n# pylint:disable=import-error\n# pylint:disable=no-self-use\n\"\"\"\nUsage Client\n------------\n.. code-block:: python\n\n import logging\n\n import grpc\n\n from opentelemetry import trace\n from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n )\n\n try:\n from .gen import helloworld_pb2, helloworld_pb2_grpc\n except ImportError:\n from gen import helloworld_pb2, helloworld_pb2_grpc\n\n trace.set_tracer_provider(TracerProvider())\n trace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n )\n\n grpc_client_instrumentor = GrpcInstrumentorClient()\n grpc_client_instrumentor.instrument()\n\n def run():\n with grpc.insecure_channel(\"localhost:50051\") as channel:\n\n stub = helloworld_pb2_grpc.GreeterStub(channel)\n response = stub.SayHello(helloworld_pb2.HelloRequest(name=\"YOU\"))\n\n print(\"Greeter client received: \" + response.message)\n\n\n if __name__ == \"__main__\":\n logging.basicConfig()\n run()\n\nUsage Server\n------------\n.. code-block:: python\n\n import logging\n from concurrent import futures\n\n import grpc\n\n from opentelemetry import trace\n from opentelemetry.instrumentation.grpc import GrpcInstrumentorServer\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n )\n\n try:\n from .gen import helloworld_pb2, helloworld_pb2_grpc\n except ImportError:\n from gen import helloworld_pb2, helloworld_pb2_grpc\n\n trace.set_tracer_provider(TracerProvider())\n trace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n )\n\n grpc_server_instrumentor = GrpcInstrumentorServer()\n grpc_server_instrumentor.instrument()\n\n class Greeter(helloworld_pb2_grpc.GreeterServicer):\n def SayHello(self, request, context):\n return helloworld_pb2.HelloReply(message=\"Hello, %s!\" % request.name)\n\n\n def serve():\n\n server = grpc.server(futures.ThreadPoolExecutor())\n\n helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)\n server.add_insecure_port(\"[::]:50051\")\n server.start()\n server.wait_for_termination()\n\n\n if __name__ == \"__main__\":\n logging.basicConfig()\n serve()\n\nYou can also add the instrumentor manually, rather than using\n:py:class:`~opentelemetry.instrumentation.grpc.GrpcInstrumentorServer`:\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.grpc import server_interceptor\n\n server = grpc.server(futures.ThreadPoolExecutor(),\n interceptors = [server_interceptor()])\n\n\"\"\"\nfrom typing import Collection\n\nimport grpc # pylint:disable=import-self\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.grpc.grpcext import intercept_channel\nfrom opentelemetry.instrumentation.grpc.package import _instruments\nfrom opentelemetry.instrumentation.grpc.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\n\n# pylint:disable=import-outside-toplevel\n# pylint:disable=import-self\n# pylint:disable=unused-argument\n\n\nclass GrpcInstrumentorServer(BaseInstrumentor):\n \"\"\"\n Globally instrument the grpc server.\n\n Usage::\n\n grpc_server_instrumentor = GrpcInstrumentorServer()\n grpc_server_instrumentor.instrument()\n\n \"\"\"\n\n # pylint:disable=attribute-defined-outside-init, redefined-outer-name\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_func = grpc.server\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n def server(*args, **kwargs):\n if \"interceptors\" in kwargs:\n # add our interceptor as the first\n kwargs[\"interceptors\"].insert(\n 0, server_interceptor(tracer_provider=tracer_provider)\n )\n else:\n kwargs[\"interceptors\"] = [\n server_interceptor(tracer_provider=tracer_provider)\n ]\n return self._original_func(*args, **kwargs)\n\n grpc.server = server\n\n def _uninstrument(self, **kwargs):\n grpc.server = self._original_func\n\n\nclass GrpcInstrumentorClient(BaseInstrumentor):\n \"\"\"\n Globally instrument the grpc client\n\n Usage::\n\n grpc_client_instrumentor = GrpcInstrumentorClient()\n grpc_client_instrumentor.instrument()\n\n \"\"\"\n\n # Figures out which channel type we need to wrap\n def _which_channel(self, kwargs):\n # handle legacy argument\n if \"channel_type\" in kwargs:\n if kwargs.get(\"channel_type\") == \"secure\":\n return (\"secure_channel\",)\n return (\"insecure_channel\",)\n\n # handle modern arguments\n types = []\n for ctype in (\"secure_channel\", \"insecure_channel\"):\n if kwargs.get(ctype, True):\n types.append(ctype)\n\n return tuple(types)\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n for ctype in self._which_channel(kwargs):\n _wrap(\n \"grpc\",\n ctype,\n self.wrapper_fn,\n )\n\n def _uninstrument(self, **kwargs):\n for ctype in self._which_channel(kwargs):\n unwrap(grpc, ctype)\n\n def wrapper_fn(self, original_func, instance, args, kwargs):\n channel = original_func(*args, **kwargs)\n tracer_provider = kwargs.get(\"tracer_provider\")\n return intercept_channel(\n channel,\n client_interceptor(tracer_provider=tracer_provider),\n )\n\n\ndef client_interceptor(tracer_provider=None):\n \"\"\"Create a gRPC client channel interceptor.\n\n Args:\n tracer: The tracer to use to create client-side spans.\n\n Returns:\n An invocation-side interceptor object.\n \"\"\"\n from . import _client\n\n tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n return _client.OpenTelemetryClientInterceptor(tracer)\n\n\ndef server_interceptor(tracer_provider=None):\n \"\"\"Create a gRPC server interceptor.\n\n Args:\n tracer: The tracer to use to create server-side spans.\n\n Returns:\n A service-side interceptor object.\n \"\"\"\n from . import _server\n\n tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n return _server.OpenTelemetryServerInterceptor(tracer)\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/__init__.py"}]}
| 2,893 | 238 |
gh_patches_debug_32112
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-2253
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test only asyncio loop by default.
aiohttp uses parametrized `loop` fixture which runs all test suite several times: one for asyncio itself and another ones for uvloop and tokio if they are installed.
It's good for aiohttp testing itself but bad for `pytest-aiohttp` users.
For example test suites for my jobs take about 15 mins, we don't like to double the time (we use uvloop, sure).
We could solve the problem by pushing custom `loop` fixture into our `conftest.py` but it is not obvious and robust solution.
I suggest moving `pytest_configure` from `pytest_plugin.py` into `tests/conftest.py` and using only asyncio loop for tests by default.
As an option we could add support for command line `--aiohttp-loops` option and `aiohttp-loops` in `pytest.ini` for helping people to run their tests on specific asyncio loop implementations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/pytest_plugin.py`
Content:
```
1 import asyncio
2 import collections
3 import contextlib
4 import tempfile
5 import warnings
6
7 import pytest
8 from py import path
9
10 from aiohttp.web import Application
11
12 from .test_utils import unused_port as _unused_port
13 from .test_utils import (BaseTestServer, RawTestServer, TestClient, TestServer,
14 loop_context, setup_test_loop, teardown_test_loop)
15
16
17 try:
18 import uvloop
19 except: # pragma: no cover
20 uvloop = None
21
22 try:
23 import tokio
24 except: # pragma: no cover
25 tokio = None
26
27
28 def pytest_addoption(parser):
29 parser.addoption(
30 '--fast', action='store_true', default=False,
31 help='run tests faster by disabling extra checks')
32 parser.addoption(
33 '--loop', action='append', default=[],
34 help='run tests with specific loop: pyloop, uvloop, tokio')
35 parser.addoption(
36 '--enable-loop-debug', action='store_true', default=False,
37 help='enable event loop debug mode')
38
39
40 @pytest.fixture
41 def fast(request):
42 """ --fast config option """
43 return request.config.getoption('--fast') # pragma: no cover
44
45
46 @contextlib.contextmanager
47 def _runtime_warning_context():
48 """
49 Context manager which checks for RuntimeWarnings, specifically to
50 avoid "coroutine 'X' was never awaited" warnings being missed.
51
52 If RuntimeWarnings occur in the context a RuntimeError is raised.
53 """
54 with warnings.catch_warnings(record=True) as _warnings:
55 yield
56 rw = ['{w.filename}:{w.lineno}:{w.message}'.format(w=w)
57 for w in _warnings if w.category == RuntimeWarning]
58 if rw:
59 raise RuntimeError('{} Runtime Warning{},\n{}'.format(
60 len(rw),
61 '' if len(rw) == 1 else 's',
62 '\n'.join(rw)
63 ))
64
65
66 @contextlib.contextmanager
67 def _passthrough_loop_context(loop, fast=False):
68 """
69 setups and tears down a loop unless one is passed in via the loop
70 argument when it's passed straight through.
71 """
72 if loop:
73 # loop already exists, pass it straight through
74 yield loop
75 else:
76 # this shadows loop_context's standard behavior
77 loop = setup_test_loop()
78 yield loop
79 teardown_test_loop(loop, fast=fast)
80
81
82 def pytest_pycollect_makeitem(collector, name, obj):
83 """
84 Fix pytest collecting for coroutines.
85 """
86 if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj):
87 return list(collector._genfunctions(name, obj))
88
89
90 def pytest_pyfunc_call(pyfuncitem):
91 """
92 Run coroutines in an event loop instead of a normal function call.
93 """
94 fast = pyfuncitem.config.getoption("--fast")
95 if asyncio.iscoroutinefunction(pyfuncitem.function):
96 existing_loop = pyfuncitem.funcargs.get('loop', None)
97 with _runtime_warning_context():
98 with _passthrough_loop_context(existing_loop, fast=fast) as _loop:
99 testargs = {arg: pyfuncitem.funcargs[arg]
100 for arg in pyfuncitem._fixtureinfo.argnames}
101
102 task = _loop.create_task(pyfuncitem.obj(**testargs))
103 _loop.run_until_complete(task)
104
105 return True
106
107
108 def pytest_configure(config):
109 loops = config.getoption('--loop')
110
111 factories = {'pyloop': asyncio.new_event_loop}
112
113 if uvloop is not None: # pragma: no cover
114 factories['uvloop'] = uvloop.new_event_loop
115
116 if tokio is not None: # pragma: no cover
117 factories['tokio'] = tokio.new_event_loop
118
119 LOOP_FACTORIES.clear()
120 LOOP_FACTORY_IDS.clear()
121
122 if loops:
123 for names in (name.split(',') for name in loops):
124 for name in names:
125 name = name.strip()
126 if name not in factories:
127 raise ValueError(
128 "Unknown loop '%s', available loops: %s" % (
129 name, list(factories.keys())))
130
131 LOOP_FACTORIES.append(factories[name])
132 LOOP_FACTORY_IDS.append(name)
133 else:
134 LOOP_FACTORIES.append(asyncio.new_event_loop)
135 LOOP_FACTORY_IDS.append('pyloop')
136
137 if uvloop is not None: # pragma: no cover
138 LOOP_FACTORIES.append(uvloop.new_event_loop)
139 LOOP_FACTORY_IDS.append('uvloop')
140
141 if tokio is not None:
142 LOOP_FACTORIES.append(tokio.new_event_loop)
143 LOOP_FACTORY_IDS.append('tokio')
144
145 asyncio.set_event_loop(None)
146
147
148 LOOP_FACTORIES = []
149 LOOP_FACTORY_IDS = []
150
151
152 @pytest.fixture(params=LOOP_FACTORIES, ids=LOOP_FACTORY_IDS)
153 def loop(request):
154 """Return an instance of the event loop."""
155 fast = request.config.getoption('--fast')
156 debug = request.config.getoption('--enable-loop-debug')
157
158 with loop_context(request.param, fast=fast) as _loop:
159 if debug:
160 _loop.set_debug(True) # pragma: no cover
161 yield _loop
162
163
164 @pytest.fixture
165 def unused_port():
166 """Return a port that is unused on the current host."""
167 return _unused_port
168
169
170 @pytest.yield_fixture
171 def test_server(loop):
172 """Factory to create a TestServer instance, given an app.
173
174 test_server(app, **kwargs)
175 """
176 servers = []
177
178 @asyncio.coroutine
179 def go(app, **kwargs):
180 server = TestServer(app)
181 yield from server.start_server(loop=loop, **kwargs)
182 servers.append(server)
183 return server
184
185 yield go
186
187 @asyncio.coroutine
188 def finalize():
189 while servers:
190 yield from servers.pop().close()
191
192 loop.run_until_complete(finalize())
193
194
195 @pytest.yield_fixture
196 def raw_test_server(loop):
197 """Factory to create a RawTestServer instance, given a web handler.
198
199 raw_test_server(handler, **kwargs)
200 """
201 servers = []
202
203 @asyncio.coroutine
204 def go(handler, **kwargs):
205 server = RawTestServer(handler)
206 yield from server.start_server(loop=loop, **kwargs)
207 servers.append(server)
208 return server
209
210 yield go
211
212 @asyncio.coroutine
213 def finalize():
214 while servers:
215 yield from servers.pop().close()
216
217 loop.run_until_complete(finalize())
218
219
220 @pytest.yield_fixture
221 def test_client(loop):
222 """Factory to create a TestClient instance.
223
224 test_client(app, **kwargs)
225 test_client(server, **kwargs)
226 test_client(raw_server, **kwargs)
227 """
228 clients = []
229
230 @asyncio.coroutine
231 def go(__param, *args, server_kwargs={}, **kwargs):
232
233 if isinstance(__param, collections.Callable) and \
234 not isinstance(__param, (Application, BaseTestServer)):
235 __param = __param(loop, *args, **kwargs)
236 kwargs = {}
237 else:
238 assert not args, "args should be empty"
239
240 if isinstance(__param, Application):
241 server = TestServer(__param, loop=loop, **server_kwargs)
242 client = TestClient(server, loop=loop, **kwargs)
243 elif isinstance(__param, BaseTestServer):
244 client = TestClient(__param, loop=loop, **kwargs)
245 else:
246 raise ValueError("Unknown argument type: %r" % type(__param))
247
248 yield from client.start_server()
249 clients.append(client)
250 return client
251
252 yield go
253
254 @asyncio.coroutine
255 def finalize():
256 while clients:
257 yield from clients.pop().close()
258
259 loop.run_until_complete(finalize())
260
261
262 @pytest.fixture
263 def shorttmpdir():
264 """Provides a temporary directory with a shorter file system path than the
265 tmpdir fixture.
266 """
267 tmpdir = path.local(tempfile.mkdtemp())
268 yield tmpdir
269 tmpdir.remove(rec=1)
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/aiohttp/pytest_plugin.py b/aiohttp/pytest_plugin.py
--- a/aiohttp/pytest_plugin.py
+++ b/aiohttp/pytest_plugin.py
@@ -30,8 +30,8 @@
'--fast', action='store_true', default=False,
help='run tests faster by disabling extra checks')
parser.addoption(
- '--loop', action='append', default=[],
- help='run tests with specific loop: pyloop, uvloop, tokio')
+ '--loop', action='store', default='pyloop',
+ help='run tests with specific loop: pyloop, uvloop, tokio or all')
parser.addoption(
'--enable-loop-debug', action='store_true', default=False,
help='enable event loop debug mode')
@@ -119,29 +119,19 @@
LOOP_FACTORIES.clear()
LOOP_FACTORY_IDS.clear()
- if loops:
- for names in (name.split(',') for name in loops):
- for name in names:
- name = name.strip()
- if name not in factories:
- raise ValueError(
- "Unknown loop '%s', available loops: %s" % (
- name, list(factories.keys())))
-
- LOOP_FACTORIES.append(factories[name])
- LOOP_FACTORY_IDS.append(name)
- else:
- LOOP_FACTORIES.append(asyncio.new_event_loop)
- LOOP_FACTORY_IDS.append('pyloop')
-
- if uvloop is not None: # pragma: no cover
- LOOP_FACTORIES.append(uvloop.new_event_loop)
- LOOP_FACTORY_IDS.append('uvloop')
-
- if tokio is not None:
- LOOP_FACTORIES.append(tokio.new_event_loop)
- LOOP_FACTORY_IDS.append('tokio')
-
+ if loops == 'all':
+ loops = 'pyloop,uvloop?,tokio?'
+
+ for name in loops.split(','):
+ required = not name.endswith('?')
+ name = name.strip(' ?')
+ if name in factories:
+ LOOP_FACTORIES.append(factories[name])
+ LOOP_FACTORY_IDS.append(name)
+ elif required:
+ raise ValueError(
+ "Unknown loop '%s', available loops: %s" % (
+ name, list(factories.keys())))
asyncio.set_event_loop(None)
|
{"golden_diff": "diff --git a/aiohttp/pytest_plugin.py b/aiohttp/pytest_plugin.py\n--- a/aiohttp/pytest_plugin.py\n+++ b/aiohttp/pytest_plugin.py\n@@ -30,8 +30,8 @@\n '--fast', action='store_true', default=False,\n help='run tests faster by disabling extra checks')\n parser.addoption(\n- '--loop', action='append', default=[],\n- help='run tests with specific loop: pyloop, uvloop, tokio')\n+ '--loop', action='store', default='pyloop',\n+ help='run tests with specific loop: pyloop, uvloop, tokio or all')\n parser.addoption(\n '--enable-loop-debug', action='store_true', default=False,\n help='enable event loop debug mode')\n@@ -119,29 +119,19 @@\n LOOP_FACTORIES.clear()\n LOOP_FACTORY_IDS.clear()\n \n- if loops:\n- for names in (name.split(',') for name in loops):\n- for name in names:\n- name = name.strip()\n- if name not in factories:\n- raise ValueError(\n- \"Unknown loop '%s', available loops: %s\" % (\n- name, list(factories.keys())))\n-\n- LOOP_FACTORIES.append(factories[name])\n- LOOP_FACTORY_IDS.append(name)\n- else:\n- LOOP_FACTORIES.append(asyncio.new_event_loop)\n- LOOP_FACTORY_IDS.append('pyloop')\n-\n- if uvloop is not None: # pragma: no cover\n- LOOP_FACTORIES.append(uvloop.new_event_loop)\n- LOOP_FACTORY_IDS.append('uvloop')\n-\n- if tokio is not None:\n- LOOP_FACTORIES.append(tokio.new_event_loop)\n- LOOP_FACTORY_IDS.append('tokio')\n-\n+ if loops == 'all':\n+ loops = 'pyloop,uvloop?,tokio?'\n+\n+ for name in loops.split(','):\n+ required = not name.endswith('?')\n+ name = name.strip(' ?')\n+ if name in factories:\n+ LOOP_FACTORIES.append(factories[name])\n+ LOOP_FACTORY_IDS.append(name)\n+ elif required:\n+ raise ValueError(\n+ \"Unknown loop '%s', available loops: %s\" % (\n+ name, list(factories.keys())))\n asyncio.set_event_loop(None)\n", "issue": "Test only asyncio loop by default.\naiohttp uses parametrized `loop` fixture which runs all test suite several times: one for asyncio itself and another ones for uvloop and tokio if they are installed.\r\nIt's good for aiohttp testing itself but bad for `pytest-aiohttp` users.\r\nFor example test suites for my jobs take about 15 mins, we don't like to double the time (we use uvloop, sure).\r\nWe could solve the problem by pushing custom `loop` fixture into our `conftest.py` but it is not obvious and robust solution.\r\n\r\nI suggest moving `pytest_configure` from `pytest_plugin.py` into `tests/conftest.py` and using only asyncio loop for tests by default.\r\nAs an option we could add support for command line `--aiohttp-loops` option and `aiohttp-loops` in `pytest.ini` for helping people to run their tests on specific asyncio loop implementations.\n", "before_files": [{"content": "import asyncio\nimport collections\nimport contextlib\nimport tempfile\nimport warnings\n\nimport pytest\nfrom py import path\n\nfrom aiohttp.web import Application\n\nfrom .test_utils import unused_port as _unused_port\nfrom .test_utils import (BaseTestServer, RawTestServer, TestClient, TestServer,\n loop_context, setup_test_loop, teardown_test_loop)\n\n\ntry:\n import uvloop\nexcept: # pragma: no cover\n uvloop = None\n\ntry:\n import tokio\nexcept: # pragma: no cover\n tokio = None\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n '--fast', action='store_true', default=False,\n help='run tests faster by disabling extra checks')\n parser.addoption(\n '--loop', action='append', default=[],\n help='run tests with specific loop: pyloop, uvloop, tokio')\n parser.addoption(\n '--enable-loop-debug', action='store_true', default=False,\n help='enable event loop debug mode')\n\n\[email protected]\ndef fast(request):\n \"\"\" --fast config option \"\"\"\n return request.config.getoption('--fast') # pragma: no cover\n\n\[email protected]\ndef _runtime_warning_context():\n \"\"\"\n Context manager which checks for RuntimeWarnings, specifically to\n avoid \"coroutine 'X' was never awaited\" warnings being missed.\n\n If RuntimeWarnings occur in the context a RuntimeError is raised.\n \"\"\"\n with warnings.catch_warnings(record=True) as _warnings:\n yield\n rw = ['{w.filename}:{w.lineno}:{w.message}'.format(w=w)\n for w in _warnings if w.category == RuntimeWarning]\n if rw:\n raise RuntimeError('{} Runtime Warning{},\\n{}'.format(\n len(rw),\n '' if len(rw) == 1 else 's',\n '\\n'.join(rw)\n ))\n\n\[email protected]\ndef _passthrough_loop_context(loop, fast=False):\n \"\"\"\n setups and tears down a loop unless one is passed in via the loop\n argument when it's passed straight through.\n \"\"\"\n if loop:\n # loop already exists, pass it straight through\n yield loop\n else:\n # this shadows loop_context's standard behavior\n loop = setup_test_loop()\n yield loop\n teardown_test_loop(loop, fast=fast)\n\n\ndef pytest_pycollect_makeitem(collector, name, obj):\n \"\"\"\n Fix pytest collecting for coroutines.\n \"\"\"\n if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj):\n return list(collector._genfunctions(name, obj))\n\n\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"\n Run coroutines in an event loop instead of a normal function call.\n \"\"\"\n fast = pyfuncitem.config.getoption(\"--fast\")\n if asyncio.iscoroutinefunction(pyfuncitem.function):\n existing_loop = pyfuncitem.funcargs.get('loop', None)\n with _runtime_warning_context():\n with _passthrough_loop_context(existing_loop, fast=fast) as _loop:\n testargs = {arg: pyfuncitem.funcargs[arg]\n for arg in pyfuncitem._fixtureinfo.argnames}\n\n task = _loop.create_task(pyfuncitem.obj(**testargs))\n _loop.run_until_complete(task)\n\n return True\n\n\ndef pytest_configure(config):\n loops = config.getoption('--loop')\n\n factories = {'pyloop': asyncio.new_event_loop}\n\n if uvloop is not None: # pragma: no cover\n factories['uvloop'] = uvloop.new_event_loop\n\n if tokio is not None: # pragma: no cover\n factories['tokio'] = tokio.new_event_loop\n\n LOOP_FACTORIES.clear()\n LOOP_FACTORY_IDS.clear()\n\n if loops:\n for names in (name.split(',') for name in loops):\n for name in names:\n name = name.strip()\n if name not in factories:\n raise ValueError(\n \"Unknown loop '%s', available loops: %s\" % (\n name, list(factories.keys())))\n\n LOOP_FACTORIES.append(factories[name])\n LOOP_FACTORY_IDS.append(name)\n else:\n LOOP_FACTORIES.append(asyncio.new_event_loop)\n LOOP_FACTORY_IDS.append('pyloop')\n\n if uvloop is not None: # pragma: no cover\n LOOP_FACTORIES.append(uvloop.new_event_loop)\n LOOP_FACTORY_IDS.append('uvloop')\n\n if tokio is not None:\n LOOP_FACTORIES.append(tokio.new_event_loop)\n LOOP_FACTORY_IDS.append('tokio')\n\n asyncio.set_event_loop(None)\n\n\nLOOP_FACTORIES = []\nLOOP_FACTORY_IDS = []\n\n\[email protected](params=LOOP_FACTORIES, ids=LOOP_FACTORY_IDS)\ndef loop(request):\n \"\"\"Return an instance of the event loop.\"\"\"\n fast = request.config.getoption('--fast')\n debug = request.config.getoption('--enable-loop-debug')\n\n with loop_context(request.param, fast=fast) as _loop:\n if debug:\n _loop.set_debug(True) # pragma: no cover\n yield _loop\n\n\[email protected]\ndef unused_port():\n \"\"\"Return a port that is unused on the current host.\"\"\"\n return _unused_port\n\n\[email protected]_fixture\ndef test_server(loop):\n \"\"\"Factory to create a TestServer instance, given an app.\n\n test_server(app, **kwargs)\n \"\"\"\n servers = []\n\n @asyncio.coroutine\n def go(app, **kwargs):\n server = TestServer(app)\n yield from server.start_server(loop=loop, **kwargs)\n servers.append(server)\n return server\n\n yield go\n\n @asyncio.coroutine\n def finalize():\n while servers:\n yield from servers.pop().close()\n\n loop.run_until_complete(finalize())\n\n\[email protected]_fixture\ndef raw_test_server(loop):\n \"\"\"Factory to create a RawTestServer instance, given a web handler.\n\n raw_test_server(handler, **kwargs)\n \"\"\"\n servers = []\n\n @asyncio.coroutine\n def go(handler, **kwargs):\n server = RawTestServer(handler)\n yield from server.start_server(loop=loop, **kwargs)\n servers.append(server)\n return server\n\n yield go\n\n @asyncio.coroutine\n def finalize():\n while servers:\n yield from servers.pop().close()\n\n loop.run_until_complete(finalize())\n\n\[email protected]_fixture\ndef test_client(loop):\n \"\"\"Factory to create a TestClient instance.\n\n test_client(app, **kwargs)\n test_client(server, **kwargs)\n test_client(raw_server, **kwargs)\n \"\"\"\n clients = []\n\n @asyncio.coroutine\n def go(__param, *args, server_kwargs={}, **kwargs):\n\n if isinstance(__param, collections.Callable) and \\\n not isinstance(__param, (Application, BaseTestServer)):\n __param = __param(loop, *args, **kwargs)\n kwargs = {}\n else:\n assert not args, \"args should be empty\"\n\n if isinstance(__param, Application):\n server = TestServer(__param, loop=loop, **server_kwargs)\n client = TestClient(server, loop=loop, **kwargs)\n elif isinstance(__param, BaseTestServer):\n client = TestClient(__param, loop=loop, **kwargs)\n else:\n raise ValueError(\"Unknown argument type: %r\" % type(__param))\n\n yield from client.start_server()\n clients.append(client)\n return client\n\n yield go\n\n @asyncio.coroutine\n def finalize():\n while clients:\n yield from clients.pop().close()\n\n loop.run_until_complete(finalize())\n\n\[email protected]\ndef shorttmpdir():\n \"\"\"Provides a temporary directory with a shorter file system path than the\n tmpdir fixture.\n \"\"\"\n tmpdir = path.local(tempfile.mkdtemp())\n yield tmpdir\n tmpdir.remove(rec=1)\n", "path": "aiohttp/pytest_plugin.py"}], "after_files": [{"content": "import asyncio\nimport collections\nimport contextlib\nimport tempfile\nimport warnings\n\nimport pytest\nfrom py import path\n\nfrom aiohttp.web import Application\n\nfrom .test_utils import unused_port as _unused_port\nfrom .test_utils import (BaseTestServer, RawTestServer, TestClient, TestServer,\n loop_context, setup_test_loop, teardown_test_loop)\n\n\ntry:\n import uvloop\nexcept: # pragma: no cover\n uvloop = None\n\ntry:\n import tokio\nexcept: # pragma: no cover\n tokio = None\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n '--fast', action='store_true', default=False,\n help='run tests faster by disabling extra checks')\n parser.addoption(\n '--loop', action='store', default='pyloop',\n help='run tests with specific loop: pyloop, uvloop, tokio or all')\n parser.addoption(\n '--enable-loop-debug', action='store_true', default=False,\n help='enable event loop debug mode')\n\n\[email protected]\ndef fast(request):\n \"\"\" --fast config option \"\"\"\n return request.config.getoption('--fast') # pragma: no cover\n\n\[email protected]\ndef _runtime_warning_context():\n \"\"\"\n Context manager which checks for RuntimeWarnings, specifically to\n avoid \"coroutine 'X' was never awaited\" warnings being missed.\n\n If RuntimeWarnings occur in the context a RuntimeError is raised.\n \"\"\"\n with warnings.catch_warnings(record=True) as _warnings:\n yield\n rw = ['{w.filename}:{w.lineno}:{w.message}'.format(w=w)\n for w in _warnings if w.category == RuntimeWarning]\n if rw:\n raise RuntimeError('{} Runtime Warning{},\\n{}'.format(\n len(rw),\n '' if len(rw) == 1 else 's',\n '\\n'.join(rw)\n ))\n\n\[email protected]\ndef _passthrough_loop_context(loop, fast=False):\n \"\"\"\n setups and tears down a loop unless one is passed in via the loop\n argument when it's passed straight through.\n \"\"\"\n if loop:\n # loop already exists, pass it straight through\n yield loop\n else:\n # this shadows loop_context's standard behavior\n loop = setup_test_loop()\n yield loop\n teardown_test_loop(loop, fast=fast)\n\n\ndef pytest_pycollect_makeitem(collector, name, obj):\n \"\"\"\n Fix pytest collecting for coroutines.\n \"\"\"\n if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj):\n return list(collector._genfunctions(name, obj))\n\n\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"\n Run coroutines in an event loop instead of a normal function call.\n \"\"\"\n fast = pyfuncitem.config.getoption(\"--fast\")\n if asyncio.iscoroutinefunction(pyfuncitem.function):\n existing_loop = pyfuncitem.funcargs.get('loop', None)\n with _runtime_warning_context():\n with _passthrough_loop_context(existing_loop, fast=fast) as _loop:\n testargs = {arg: pyfuncitem.funcargs[arg]\n for arg in pyfuncitem._fixtureinfo.argnames}\n\n task = _loop.create_task(pyfuncitem.obj(**testargs))\n _loop.run_until_complete(task)\n\n return True\n\n\ndef pytest_configure(config):\n loops = config.getoption('--loop')\n\n factories = {'pyloop': asyncio.new_event_loop}\n\n if uvloop is not None: # pragma: no cover\n factories['uvloop'] = uvloop.new_event_loop\n\n if tokio is not None: # pragma: no cover\n factories['tokio'] = tokio.new_event_loop\n\n LOOP_FACTORIES.clear()\n LOOP_FACTORY_IDS.clear()\n\n if loops == 'all':\n loops = 'pyloop,uvloop?,tokio?'\n\n for name in loops.split(','):\n required = not name.endswith('?')\n name = name.strip(' ?')\n if name in factories:\n LOOP_FACTORIES.append(factories[name])\n LOOP_FACTORY_IDS.append(name)\n elif required:\n raise ValueError(\n \"Unknown loop '%s', available loops: %s\" % (\n name, list(factories.keys())))\n asyncio.set_event_loop(None)\n\n\nLOOP_FACTORIES = []\nLOOP_FACTORY_IDS = []\n\n\[email protected](params=LOOP_FACTORIES, ids=LOOP_FACTORY_IDS)\ndef loop(request):\n \"\"\"Return an instance of the event loop.\"\"\"\n fast = request.config.getoption('--fast')\n debug = request.config.getoption('--enable-loop-debug')\n\n with loop_context(request.param, fast=fast) as _loop:\n if debug:\n _loop.set_debug(True) # pragma: no cover\n yield _loop\n\n\[email protected]\ndef unused_port():\n \"\"\"Return a port that is unused on the current host.\"\"\"\n return _unused_port\n\n\[email protected]_fixture\ndef test_server(loop):\n \"\"\"Factory to create a TestServer instance, given an app.\n\n test_server(app, **kwargs)\n \"\"\"\n servers = []\n\n @asyncio.coroutine\n def go(app, **kwargs):\n server = TestServer(app)\n yield from server.start_server(loop=loop, **kwargs)\n servers.append(server)\n return server\n\n yield go\n\n @asyncio.coroutine\n def finalize():\n while servers:\n yield from servers.pop().close()\n\n loop.run_until_complete(finalize())\n\n\[email protected]_fixture\ndef raw_test_server(loop):\n \"\"\"Factory to create a RawTestServer instance, given a web handler.\n\n raw_test_server(handler, **kwargs)\n \"\"\"\n servers = []\n\n @asyncio.coroutine\n def go(handler, **kwargs):\n server = RawTestServer(handler)\n yield from server.start_server(loop=loop, **kwargs)\n servers.append(server)\n return server\n\n yield go\n\n @asyncio.coroutine\n def finalize():\n while servers:\n yield from servers.pop().close()\n\n loop.run_until_complete(finalize())\n\n\[email protected]_fixture\ndef test_client(loop):\n \"\"\"Factory to create a TestClient instance.\n\n test_client(app, **kwargs)\n test_client(server, **kwargs)\n test_client(raw_server, **kwargs)\n \"\"\"\n clients = []\n\n @asyncio.coroutine\n def go(__param, *args, server_kwargs={}, **kwargs):\n\n if isinstance(__param, collections.Callable) and \\\n not isinstance(__param, (Application, BaseTestServer)):\n __param = __param(loop, *args, **kwargs)\n kwargs = {}\n else:\n assert not args, \"args should be empty\"\n\n if isinstance(__param, Application):\n server = TestServer(__param, loop=loop, **server_kwargs)\n client = TestClient(server, loop=loop, **kwargs)\n elif isinstance(__param, BaseTestServer):\n client = TestClient(__param, loop=loop, **kwargs)\n else:\n raise ValueError(\"Unknown argument type: %r\" % type(__param))\n\n yield from client.start_server()\n clients.append(client)\n return client\n\n yield go\n\n @asyncio.coroutine\n def finalize():\n while clients:\n yield from clients.pop().close()\n\n loop.run_until_complete(finalize())\n\n\[email protected]\ndef shorttmpdir():\n \"\"\"Provides a temporary directory with a shorter file system path than the\n tmpdir fixture.\n \"\"\"\n tmpdir = path.local(tempfile.mkdtemp())\n yield tmpdir\n tmpdir.remove(rec=1)\n", "path": "aiohttp/pytest_plugin.py"}]}
| 2,873 | 510 |
gh_patches_debug_17787
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-1077
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gunicorn keeps booting workers when exceptions raised at startup events
Hi, I have the same issues when deploy FastAPI using gunicorn + uvicorn,it keeps booting workers when exceptions raised at startup events.
**main.py**
```python
from fastapi import FastAPI
app = FastAPI()
@app.on_event("startup")
async def startup():
raise Exception("error")
@app.get("/")
async def hello():
return {"msg": "hello world"}
```
**gunicorn.conf.py**
```python
import json
import os
worker_class = "uvicorn.workers.UvicornWorker"
host = os.getenv("HOST", "0.0.0.0")
port = os.getenv("PORT", "8000")
bind_env = os.getenv("BIND", None)
use_loglevel = os.getenv("LOG_LEVEL", "info")
if bind_env:
use_bind = bind_env
else:
use_bind = f"{host}:{port}"
accesslog_var = os.getenv("ACCESS_LOG", "-")
use_accesslog = accesslog_var or None
errorlog_var = os.getenv("ERROR_LOG", "-")
use_errorlog = errorlog_var or None
graceful_timeout_str = os.getenv("GRACEFUL_TIMEOUT", "120")
timeout_str = os.getenv("TIMEOUT", "120")
keepalive_str = os.getenv("KEEP_ALIVE", "5")
# Gunicorn config variables
loglevel = use_loglevel
bind = use_bind
errorlog = use_errorlog
accesslog = use_accesslog
graceful_timeout = int(graceful_timeout_str)
timeout = int(timeout_str)
keepalive = int(keepalive_str)
# For debugging and testing
log_data = {
"loglevel": loglevel,
"bind": bind,
"graceful_timeout": graceful_timeout,
"timeout": timeout,
"keepalive": keepalive,
"errorlog": errorlog,
"accesslog": accesslog,
}
print(json.dumps(log_data))
```
**gunicorn log**
```text
{"loglevel": "info", "bind": "0.0.0.0:8000", "graceful_timeout": 120, "timeout": 120, "keepalive": 5, "errorlog": "-", "accesslog": "-", "host": "0.0.0.0", "port": "8000"}
[2021-06-01 21:13:05 +0800] [40946] [INFO] Starting gunicorn 20.0.4
[2021-06-01 21:13:05 +0800] [40946] [INFO] Listening at: http://0.0.0.0:8000 (40946)
[2021-06-01 21:13:05 +0800] [40946] [INFO] Using worker: uvicorn.workers.UvicornWorker
[2021-06-01 21:13:05 +0800] [40949] [INFO] Booting worker with pid: 40949
[2021-06-01 21:13:05 +0800] [40949] [INFO] Started server process [40949]
[2021-06-01 21:13:05 +0800] [40949] [INFO] Waiting for application startup.
[2021-06-01 21:13:05 +0800] [40949] [ERROR] Traceback (most recent call last):
File "./envs/test/lib/python3.7/site-packages/starlette/routing.py", line 526, in lifespan
async for item in self.lifespan_context(app):
File "./envs/test/lib/python3.7/site-packages/starlette/routing.py", line 467, in default_lifespan
await self.startup()
File "./envs/test/lib/python3.7/site-packages/starlette/routing.py", line 502, in startup
await handler()
File "./app/main.py", line 16, in startup
raise Exception("error")
Exception: error
[2021-06-01 21:13:05 +0800] [40949] [ERROR] Application startup failed. Exiting.
[2021-06-01 21:13:05 +0800] [40949] [INFO] Worker exiting (pid: 40949)
[2021-06-01 21:13:05 +0800] [40950] [INFO] Booting worker with pid: 40950
[2021-06-01 21:13:06 +0800] [40950] [INFO] Started server process [40950]
[2021-06-01 21:13:06 +0800] [40950] [INFO] Waiting for application startup.
[2021-06-01 21:13:06 +0800] [40950] [ERROR] Traceback (most recent call last):
File "./envs/test/lib/python3.7/site-packages/starlette/routing.py", line 526, in lifespan
async for item in self.lifespan_context(app):
File "./envs/test/lib/python3.7/site-packages/starlette/routing.py", line 467, in default_lifespan
await self.startup()
File "./envs/test/lib/python3.7/site-packages/starlette/routing.py", line 502, in startup
await handler()
File "./app/main.py", line 16, in startup
raise Exception("error")
Exception: error
[2021-06-01 21:13:06 +0800] [40950] [ERROR] Application startup failed. Exiting.
```
I hope the gunicorn would exit when exceptions raised, but it keeps booting workers. Can someone helps me, thanks?
_Originally posted by @fanchunke1991 in https://github.com/encode/uvicorn/issues/1031#issuecomment-852121760_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/workers.py`
Content:
```
1 import asyncio
2 import logging
3 import signal
4 from typing import Any
5
6 from gunicorn.workers.base import Worker
7
8 from uvicorn.config import Config
9 from uvicorn.main import Server
10
11
12 class UvicornWorker(Worker):
13 """
14 A worker class for Gunicorn that interfaces with an ASGI consumer callable,
15 rather than a WSGI callable.
16 """
17
18 CONFIG_KWARGS = {"loop": "auto", "http": "auto"}
19
20 def __init__(self, *args: Any, **kwargs: Any) -> None:
21 super(UvicornWorker, self).__init__(*args, **kwargs)
22
23 logger = logging.getLogger("uvicorn.error")
24 logger.handlers = self.log.error_log.handlers
25 logger.setLevel(self.log.error_log.level)
26 logger.propagate = False
27
28 logger = logging.getLogger("uvicorn.access")
29 logger.handlers = self.log.access_log.handlers
30 logger.setLevel(self.log.access_log.level)
31 logger.propagate = False
32
33 config_kwargs = {
34 "app": None,
35 "log_config": None,
36 "timeout_keep_alive": self.cfg.keepalive,
37 "timeout_notify": self.timeout,
38 "callback_notify": self.callback_notify,
39 "limit_max_requests": self.max_requests,
40 "forwarded_allow_ips": self.cfg.forwarded_allow_ips,
41 }
42
43 if self.cfg.is_ssl:
44 ssl_kwargs = {
45 "ssl_keyfile": self.cfg.ssl_options.get("keyfile"),
46 "ssl_certfile": self.cfg.ssl_options.get("certfile"),
47 "ssl_keyfile_password": self.cfg.ssl_options.get("password"),
48 "ssl_version": self.cfg.ssl_options.get("ssl_version"),
49 "ssl_cert_reqs": self.cfg.ssl_options.get("cert_reqs"),
50 "ssl_ca_certs": self.cfg.ssl_options.get("ca_certs"),
51 "ssl_ciphers": self.cfg.ssl_options.get("ciphers"),
52 }
53 config_kwargs.update(ssl_kwargs)
54
55 if self.cfg.settings["backlog"].value:
56 config_kwargs["backlog"] = self.cfg.settings["backlog"].value
57
58 config_kwargs.update(self.CONFIG_KWARGS)
59
60 self.config = Config(**config_kwargs)
61
62 def init_process(self) -> None:
63 self.config.setup_event_loop()
64 super(UvicornWorker, self).init_process()
65
66 def init_signals(self) -> None:
67 # Reset signals so Gunicorn doesn't swallow subprocess return codes
68 # other signals are set up by Server.install_signal_handlers()
69 # See: https://github.com/encode/uvicorn/issues/894
70 for s in self.SIGNALS:
71 signal.signal(s, signal.SIG_DFL)
72
73 def run(self) -> None:
74 self.config.app = self.wsgi
75 server = Server(config=self.config)
76 loop = asyncio.get_event_loop()
77 loop.run_until_complete(server.serve(sockets=self.sockets))
78
79 async def callback_notify(self) -> None:
80 self.notify()
81
82
83 class UvicornH11Worker(UvicornWorker):
84 CONFIG_KWARGS = {"loop": "asyncio", "http": "h11"}
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/workers.py b/uvicorn/workers.py
--- a/uvicorn/workers.py
+++ b/uvicorn/workers.py
@@ -1,8 +1,10 @@
import asyncio
import logging
import signal
+import sys
from typing import Any
+from gunicorn.arbiter import Arbiter
from gunicorn.workers.base import Worker
from uvicorn.config import Config
@@ -75,6 +77,11 @@
server = Server(config=self.config)
loop = asyncio.get_event_loop()
loop.run_until_complete(server.serve(sockets=self.sockets))
+ # Exit with status 3 when worker starts failed, so Gunicorn
+ # can shut it down to avoid infinite start/stop cycles.
+ # See: https://github.com/encode/uvicorn/issues/1066
+ if not server.started:
+ sys.exit(Arbiter.WORKER_BOOT_ERROR)
async def callback_notify(self) -> None:
self.notify()
|
{"golden_diff": "diff --git a/uvicorn/workers.py b/uvicorn/workers.py\n--- a/uvicorn/workers.py\n+++ b/uvicorn/workers.py\n@@ -1,8 +1,10 @@\n import asyncio\n import logging\n import signal\n+import sys\n from typing import Any\n \n+from gunicorn.arbiter import Arbiter\n from gunicorn.workers.base import Worker\n \n from uvicorn.config import Config\n@@ -75,6 +77,11 @@\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n+ # Exit with status 3 when worker starts failed, so Gunicorn\n+ # can shut it down to avoid infinite start/stop cycles.\n+ # See: https://github.com/encode/uvicorn/issues/1066\n+ if not server.started:\n+ sys.exit(Arbiter.WORKER_BOOT_ERROR)\n \n async def callback_notify(self) -> None:\n self.notify()\n", "issue": "gunicorn keeps booting workers when exceptions raised at startup events\nHi, I have the same issues when deploy FastAPI using gunicorn + uvicorn\uff0cit keeps booting workers when exceptions raised at startup events.\r\n\r\n**main.py**\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\n\r\napp = FastAPI()\r\n\r\n\r\[email protected]_event(\"startup\")\r\nasync def startup():\r\n raise Exception(\"error\")\r\n\r\n\r\[email protected](\"/\")\r\nasync def hello():\r\n return {\"msg\": \"hello world\"}\r\n```\r\n\r\n**gunicorn.conf.py**\r\n\r\n```python\r\nimport json\r\nimport os\r\n\r\nworker_class = \"uvicorn.workers.UvicornWorker\"\r\n\r\nhost = os.getenv(\"HOST\", \"0.0.0.0\")\r\nport = os.getenv(\"PORT\", \"8000\")\r\nbind_env = os.getenv(\"BIND\", None)\r\nuse_loglevel = os.getenv(\"LOG_LEVEL\", \"info\")\r\nif bind_env:\r\n use_bind = bind_env\r\nelse:\r\n use_bind = f\"{host}:{port}\"\r\n\r\n\r\naccesslog_var = os.getenv(\"ACCESS_LOG\", \"-\")\r\nuse_accesslog = accesslog_var or None\r\nerrorlog_var = os.getenv(\"ERROR_LOG\", \"-\")\r\nuse_errorlog = errorlog_var or None\r\ngraceful_timeout_str = os.getenv(\"GRACEFUL_TIMEOUT\", \"120\")\r\ntimeout_str = os.getenv(\"TIMEOUT\", \"120\")\r\nkeepalive_str = os.getenv(\"KEEP_ALIVE\", \"5\")\r\n\r\n# Gunicorn config variables\r\nloglevel = use_loglevel\r\nbind = use_bind\r\nerrorlog = use_errorlog\r\naccesslog = use_accesslog\r\ngraceful_timeout = int(graceful_timeout_str)\r\ntimeout = int(timeout_str)\r\nkeepalive = int(keepalive_str)\r\n\r\n# For debugging and testing\r\nlog_data = {\r\n \"loglevel\": loglevel,\r\n \"bind\": bind,\r\n \"graceful_timeout\": graceful_timeout,\r\n \"timeout\": timeout,\r\n \"keepalive\": keepalive,\r\n \"errorlog\": errorlog,\r\n \"accesslog\": accesslog,\r\n}\r\nprint(json.dumps(log_data))\r\n```\r\n\r\n**gunicorn log**\r\n\r\n```text\r\n{\"loglevel\": \"info\", \"bind\": \"0.0.0.0:8000\", \"graceful_timeout\": 120, \"timeout\": 120, \"keepalive\": 5, \"errorlog\": \"-\", \"accesslog\": \"-\", \"host\": \"0.0.0.0\", \"port\": \"8000\"}\r\n[2021-06-01 21:13:05 +0800] [40946] [INFO] Starting gunicorn 20.0.4\r\n[2021-06-01 21:13:05 +0800] [40946] [INFO] Listening at: http://0.0.0.0:8000 (40946)\r\n[2021-06-01 21:13:05 +0800] [40946] [INFO] Using worker: uvicorn.workers.UvicornWorker\r\n[2021-06-01 21:13:05 +0800] [40949] [INFO] Booting worker with pid: 40949\r\n[2021-06-01 21:13:05 +0800] [40949] [INFO] Started server process [40949]\r\n[2021-06-01 21:13:05 +0800] [40949] [INFO] Waiting for application startup.\r\n[2021-06-01 21:13:05 +0800] [40949] [ERROR] Traceback (most recent call last):\r\n File \"./envs/test/lib/python3.7/site-packages/starlette/routing.py\", line 526, in lifespan\r\n async for item in self.lifespan_context(app):\r\n File \"./envs/test/lib/python3.7/site-packages/starlette/routing.py\", line 467, in default_lifespan\r\n await self.startup()\r\n File \"./envs/test/lib/python3.7/site-packages/starlette/routing.py\", line 502, in startup\r\n await handler()\r\n File \"./app/main.py\", line 16, in startup\r\n raise Exception(\"error\")\r\nException: error\r\n\r\n[2021-06-01 21:13:05 +0800] [40949] [ERROR] Application startup failed. Exiting.\r\n[2021-06-01 21:13:05 +0800] [40949] [INFO] Worker exiting (pid: 40949)\r\n[2021-06-01 21:13:05 +0800] [40950] [INFO] Booting worker with pid: 40950\r\n[2021-06-01 21:13:06 +0800] [40950] [INFO] Started server process [40950]\r\n[2021-06-01 21:13:06 +0800] [40950] [INFO] Waiting for application startup.\r\n[2021-06-01 21:13:06 +0800] [40950] [ERROR] Traceback (most recent call last):\r\n File \"./envs/test/lib/python3.7/site-packages/starlette/routing.py\", line 526, in lifespan\r\n async for item in self.lifespan_context(app):\r\n File \"./envs/test/lib/python3.7/site-packages/starlette/routing.py\", line 467, in default_lifespan\r\n await self.startup()\r\n File \"./envs/test/lib/python3.7/site-packages/starlette/routing.py\", line 502, in startup\r\n await handler()\r\n File \"./app/main.py\", line 16, in startup\r\n raise Exception(\"error\")\r\nException: error\r\n\r\n[2021-06-01 21:13:06 +0800] [40950] [ERROR] Application startup failed. Exiting.\r\n```\r\n\r\nI hope the gunicorn would exit when exceptions raised, but it keeps booting workers. Can someone helps me, thanks?\r\n\r\n_Originally posted by @fanchunke1991 in https://github.com/encode/uvicorn/issues/1031#issuecomment-852121760_\n", "before_files": [{"content": "import asyncio\nimport logging\nimport signal\nfrom typing import Any\n\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"auto\", \"http\": \"auto\"}\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n logger = logging.getLogger(\"uvicorn.error\")\n logger.handlers = self.log.error_log.handlers\n logger.setLevel(self.log.error_log.level)\n logger.propagate = False\n\n logger = logging.getLogger(\"uvicorn.access\")\n logger.handlers = self.log.access_log.handlers\n logger.setLevel(self.log.access_log.level)\n logger.propagate = False\n\n config_kwargs = {\n \"app\": None,\n \"log_config\": None,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n \"forwarded_allow_ips\": self.cfg.forwarded_allow_ips,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_keyfile_password\": self.cfg.ssl_options.get(\"password\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n if self.cfg.settings[\"backlog\"].value:\n config_kwargs[\"backlog\"] = self.cfg.settings[\"backlog\"].value\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self) -> None:\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self) -> None:\n # Reset signals so Gunicorn doesn't swallow subprocess return codes\n # other signals are set up by Server.install_signal_handlers()\n # See: https://github.com/encode/uvicorn/issues/894\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n\n def run(self) -> None:\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n\n async def callback_notify(self) -> None:\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport signal\nimport sys\nfrom typing import Any\n\nfrom gunicorn.arbiter import Arbiter\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"auto\", \"http\": \"auto\"}\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n logger = logging.getLogger(\"uvicorn.error\")\n logger.handlers = self.log.error_log.handlers\n logger.setLevel(self.log.error_log.level)\n logger.propagate = False\n\n logger = logging.getLogger(\"uvicorn.access\")\n logger.handlers = self.log.access_log.handlers\n logger.setLevel(self.log.access_log.level)\n logger.propagate = False\n\n config_kwargs = {\n \"app\": None,\n \"log_config\": None,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n \"forwarded_allow_ips\": self.cfg.forwarded_allow_ips,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_keyfile_password\": self.cfg.ssl_options.get(\"password\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n if self.cfg.settings[\"backlog\"].value:\n config_kwargs[\"backlog\"] = self.cfg.settings[\"backlog\"].value\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self) -> None:\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self) -> None:\n # Reset signals so Gunicorn doesn't swallow subprocess return codes\n # other signals are set up by Server.install_signal_handlers()\n # See: https://github.com/encode/uvicorn/issues/894\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n\n def run(self) -> None:\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n # Exit with status 3 when worker starts failed, so Gunicorn\n # can shut it down to avoid infinite start/stop cycles.\n # See: https://github.com/encode/uvicorn/issues/1066\n if not server.started:\n sys.exit(Arbiter.WORKER_BOOT_ERROR)\n\n async def callback_notify(self) -> None:\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}]}
| 2,591 | 223 |
gh_patches_debug_11454
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1617
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WHEN TYPING "-" IN THE NUMBERINPUT, WIDGET FAILS.
"""
TESTE
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class TESTE(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
# WIDGETS ###############################
self.number = toga.NumberInput()
self.pushButton = toga.Button('AHHHH')
########################################
# BOX ####################################################
main_box = toga.Box(style=Pack(direction=COLUMN))
main_box.add(self.number, self.pushButton)
#########################################################
# EVENT #####################################################
self.pushButton.on_press = self.printar
##############################################################
# WINDOW #####################################################
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
##############################################################
def printar(self, widget):
brasil = float(self.number.value)
print(brasil)
def main():
return TESTE()
https://user-images.githubusercontent.com/75274707/195914116-84981cc4-62d4-423c-a51d-0b77b4f6948a.mp4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/android/toga_android/widgets/numberinput.py`
Content:
```
1 from decimal import Decimal
2
3 from travertino.size import at_least
4
5 from ..libs.android.text import InputType, TextWatcher
6 from ..libs.android.util import TypedValue
7 from ..libs.android.view import Gravity, View__MeasureSpec
8 from ..libs.android.widget import EditText
9 from .base import Widget, align
10
11
12 def decimal_from_string(s):
13 """If s is the empty string, return `None`. Otherwise, convert to a `Decimal`,
14 allowing any exceptions to bubble up."""
15 if not s:
16 return None
17 return Decimal(s)
18
19
20 def string_from_decimal(d):
21 '''Implement the inverse of `decimal_from_string()`. This way, Toga's
22 `NumericInput` can pass us a `None` or `Decimal`, and we can always place
23 a String in the Android `EditText`.'''
24 if d is None:
25 return ""
26 return str(d)
27
28
29 class TogaNumberInputWatcher(TextWatcher):
30 def __init__(self, impl):
31 super().__init__()
32 self.interface = impl.interface
33
34 def beforeTextChanged(self, _charSequence, _start, _count, _after):
35 pass
36
37 def afterTextChanged(self, editable):
38 # Toga `NumberInput` stores the value as a property on the `interface`.
39 self.interface._value = decimal_from_string(editable.toString())
40 # Call the user on_change callback, if it exists.
41 if self.interface.on_change:
42 self.interface.on_change(widget=self.interface)
43
44 def onTextChanged(self, _charSequence, _start, _before, _count):
45 pass
46
47
48 class NumberInput(Widget):
49 def create(self):
50 self.native = EditText(self._native_activity)
51 self.native.addTextChangedListener(TogaNumberInputWatcher(self))
52
53 # A `NumberInput` in Toga supports signed decimal numbers.
54 self.native.setInputType(
55 InputType.TYPE_CLASS_NUMBER
56 | InputType.TYPE_NUMBER_FLAG_DECIMAL
57 | InputType.TYPE_NUMBER_FLAG_SIGNED
58 )
59
60 def set_readonly(self, value):
61 self.native.setFocusable(not value)
62
63 def set_placeholder(self, value):
64 # Android EditText's setHint() requires a Python string.
65 self.native.setHint(value if value is not None else "")
66
67 def set_alignment(self, value):
68 self.native.setGravity(Gravity.CENTER_VERTICAL | align(value))
69
70 def set_font(self, font):
71 if font:
72 font_impl = font.bind(self.interface.factory)
73 self.native.setTextSize(TypedValue.COMPLEX_UNIT_SP, font_impl.get_size())
74 self.native.setTypeface(font_impl.get_typeface(), font_impl.get_style())
75
76 def set_value(self, value):
77 # Store a string in the Android widget. The `afterTextChanged` method
78 # will call the user on_change handler.
79 self.native.setText(string_from_decimal(value))
80
81 def set_step(self, step):
82 self.interface.factory.not_implemented("NumberInput.set_step()")
83
84 def set_max_value(self, value):
85 self.interface.factory.not_implemented("NumberInput.set_max_value()")
86
87 def set_min_value(self, value):
88 self.interface.factory.not_implemented("NumberInput.set_min_value()")
89
90 def set_on_change(self, handler):
91 # No special handling required.
92 pass
93
94 def rehint(self):
95 # On Android, EditText's measure() throws NullPointerException if the widget has no
96 # LayoutParams.
97 if not self.native.getLayoutParams():
98 return
99 self.native.measure(
100 View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED
101 )
102 self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
103 self.interface.intrinsic.height = self.native.getMeasuredHeight()
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/android/toga_android/widgets/numberinput.py b/src/android/toga_android/widgets/numberinput.py
--- a/src/android/toga_android/widgets/numberinput.py
+++ b/src/android/toga_android/widgets/numberinput.py
@@ -1,4 +1,4 @@
-from decimal import Decimal
+from decimal import Decimal, InvalidOperation
from travertino.size import at_least
@@ -10,11 +10,11 @@
def decimal_from_string(s):
- """If s is the empty string, return `None`. Otherwise, convert to a `Decimal`,
- allowing any exceptions to bubble up."""
- if not s:
+ """Convert s to a `Decimal`, returning `None` if it's not a valid number."""
+ try:
+ return Decimal(s)
+ except InvalidOperation:
return None
- return Decimal(s)
def string_from_decimal(d):
|
{"golden_diff": "diff --git a/src/android/toga_android/widgets/numberinput.py b/src/android/toga_android/widgets/numberinput.py\n--- a/src/android/toga_android/widgets/numberinput.py\n+++ b/src/android/toga_android/widgets/numberinput.py\n@@ -1,4 +1,4 @@\n-from decimal import Decimal\n+from decimal import Decimal, InvalidOperation\n \n from travertino.size import at_least\n \n@@ -10,11 +10,11 @@\n \n \n def decimal_from_string(s):\n- \"\"\"If s is the empty string, return `None`. Otherwise, convert to a `Decimal`,\n- allowing any exceptions to bubble up.\"\"\"\n- if not s:\n+ \"\"\"Convert s to a `Decimal`, returning `None` if it's not a valid number.\"\"\"\n+ try:\n+ return Decimal(s)\n+ except InvalidOperation:\n return None\n- return Decimal(s)\n \n \n def string_from_decimal(d):\n", "issue": "WHEN TYPING \"-\" IN THE NUMBERINPUT, WIDGET FAILS.\n\"\"\"\r\nTESTE\r\n\"\"\"\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\n\r\nclass TESTE(toga.App):\r\n\r\n def startup(self):\r\n \"\"\"\r\n Construct and show the Toga application.\r\n\r\n Usually, you would add your application to a main content box.\r\n We then create a main window (with a name matching the app), and\r\n show the main window.\r\n \"\"\"\r\n\r\n # WIDGETS ###############################\r\n self.number = toga.NumberInput()\r\n self.pushButton = toga.Button('AHHHH')\r\n ########################################\r\n\r\n # BOX ####################################################\r\n main_box = toga.Box(style=Pack(direction=COLUMN))\r\n main_box.add(self.number, self.pushButton)\r\n #########################################################\r\n\r\n # EVENT #####################################################\r\n self.pushButton.on_press = self.printar\r\n ##############################################################\r\n\r\n # WINDOW #####################################################\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = main_box\r\n self.main_window.show()\r\n ##############################################################\r\n\r\n def printar(self, widget):\r\n brasil = float(self.number.value)\r\n print(brasil)\r\n\r\ndef main():\r\n return TESTE()\r\n\r\nhttps://user-images.githubusercontent.com/75274707/195914116-84981cc4-62d4-423c-a51d-0b77b4f6948a.mp4\r\n\r\n\n", "before_files": [{"content": "from decimal import Decimal\n\nfrom travertino.size import at_least\n\nfrom ..libs.android.text import InputType, TextWatcher\nfrom ..libs.android.util import TypedValue\nfrom ..libs.android.view import Gravity, View__MeasureSpec\nfrom ..libs.android.widget import EditText\nfrom .base import Widget, align\n\n\ndef decimal_from_string(s):\n \"\"\"If s is the empty string, return `None`. Otherwise, convert to a `Decimal`,\n allowing any exceptions to bubble up.\"\"\"\n if not s:\n return None\n return Decimal(s)\n\n\ndef string_from_decimal(d):\n '''Implement the inverse of `decimal_from_string()`. This way, Toga's\n `NumericInput` can pass us a `None` or `Decimal`, and we can always place\n a String in the Android `EditText`.'''\n if d is None:\n return \"\"\n return str(d)\n\n\nclass TogaNumberInputWatcher(TextWatcher):\n def __init__(self, impl):\n super().__init__()\n self.interface = impl.interface\n\n def beforeTextChanged(self, _charSequence, _start, _count, _after):\n pass\n\n def afterTextChanged(self, editable):\n # Toga `NumberInput` stores the value as a property on the `interface`.\n self.interface._value = decimal_from_string(editable.toString())\n # Call the user on_change callback, if it exists.\n if self.interface.on_change:\n self.interface.on_change(widget=self.interface)\n\n def onTextChanged(self, _charSequence, _start, _before, _count):\n pass\n\n\nclass NumberInput(Widget):\n def create(self):\n self.native = EditText(self._native_activity)\n self.native.addTextChangedListener(TogaNumberInputWatcher(self))\n\n # A `NumberInput` in Toga supports signed decimal numbers.\n self.native.setInputType(\n InputType.TYPE_CLASS_NUMBER\n | InputType.TYPE_NUMBER_FLAG_DECIMAL\n | InputType.TYPE_NUMBER_FLAG_SIGNED\n )\n\n def set_readonly(self, value):\n self.native.setFocusable(not value)\n\n def set_placeholder(self, value):\n # Android EditText's setHint() requires a Python string.\n self.native.setHint(value if value is not None else \"\")\n\n def set_alignment(self, value):\n self.native.setGravity(Gravity.CENTER_VERTICAL | align(value))\n\n def set_font(self, font):\n if font:\n font_impl = font.bind(self.interface.factory)\n self.native.setTextSize(TypedValue.COMPLEX_UNIT_SP, font_impl.get_size())\n self.native.setTypeface(font_impl.get_typeface(), font_impl.get_style())\n\n def set_value(self, value):\n # Store a string in the Android widget. The `afterTextChanged` method\n # will call the user on_change handler.\n self.native.setText(string_from_decimal(value))\n\n def set_step(self, step):\n self.interface.factory.not_implemented(\"NumberInput.set_step()\")\n\n def set_max_value(self, value):\n self.interface.factory.not_implemented(\"NumberInput.set_max_value()\")\n\n def set_min_value(self, value):\n self.interface.factory.not_implemented(\"NumberInput.set_min_value()\")\n\n def set_on_change(self, handler):\n # No special handling required.\n pass\n\n def rehint(self):\n # On Android, EditText's measure() throws NullPointerException if the widget has no\n # LayoutParams.\n if not self.native.getLayoutParams():\n return\n self.native.measure(\n View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED\n )\n self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n", "path": "src/android/toga_android/widgets/numberinput.py"}], "after_files": [{"content": "from decimal import Decimal, InvalidOperation\n\nfrom travertino.size import at_least\n\nfrom ..libs.android.text import InputType, TextWatcher\nfrom ..libs.android.util import TypedValue\nfrom ..libs.android.view import Gravity, View__MeasureSpec\nfrom ..libs.android.widget import EditText\nfrom .base import Widget, align\n\n\ndef decimal_from_string(s):\n \"\"\"Convert s to a `Decimal`, returning `None` if it's not a valid number.\"\"\"\n try:\n return Decimal(s)\n except InvalidOperation:\n return None\n\n\ndef string_from_decimal(d):\n '''Implement the inverse of `decimal_from_string()`. This way, Toga's\n `NumericInput` can pass us a `None` or `Decimal`, and we can always place\n a String in the Android `EditText`.'''\n if d is None:\n return \"\"\n return str(d)\n\n\nclass TogaNumberInputWatcher(TextWatcher):\n def __init__(self, impl):\n super().__init__()\n self.interface = impl.interface\n\n def beforeTextChanged(self, _charSequence, _start, _count, _after):\n pass\n\n def afterTextChanged(self, editable):\n # Toga `NumberInput` stores the value as a property on the `interface`.\n self.interface._value = decimal_from_string(editable.toString())\n # Call the user on_change callback, if it exists.\n if self.interface.on_change:\n self.interface.on_change(widget=self.interface)\n\n def onTextChanged(self, _charSequence, _start, _before, _count):\n pass\n\n\nclass NumberInput(Widget):\n def create(self):\n self.native = EditText(self._native_activity)\n self.native.addTextChangedListener(TogaNumberInputWatcher(self))\n\n # A `NumberInput` in Toga supports signed decimal numbers.\n self.native.setInputType(\n InputType.TYPE_CLASS_NUMBER\n | InputType.TYPE_NUMBER_FLAG_DECIMAL\n | InputType.TYPE_NUMBER_FLAG_SIGNED\n )\n\n def set_readonly(self, value):\n self.native.setFocusable(not value)\n\n def set_placeholder(self, value):\n # Android EditText's setHint() requires a Python string.\n self.native.setHint(value if value is not None else \"\")\n\n def set_alignment(self, value):\n self.native.setGravity(Gravity.CENTER_VERTICAL | align(value))\n\n def set_font(self, font):\n if font:\n font_impl = font.bind(self.interface.factory)\n self.native.setTextSize(TypedValue.COMPLEX_UNIT_SP, font_impl.get_size())\n self.native.setTypeface(font_impl.get_typeface(), font_impl.get_style())\n\n def set_value(self, value):\n # Store a string in the Android widget. The `afterTextChanged` method\n # will call the user on_change handler.\n self.native.setText(string_from_decimal(value))\n\n def set_step(self, step):\n self.interface.factory.not_implemented(\"NumberInput.set_step()\")\n\n def set_max_value(self, value):\n self.interface.factory.not_implemented(\"NumberInput.set_max_value()\")\n\n def set_min_value(self, value):\n self.interface.factory.not_implemented(\"NumberInput.set_min_value()\")\n\n def set_on_change(self, handler):\n # No special handling required.\n pass\n\n def rehint(self):\n # On Android, EditText's measure() throws NullPointerException if the widget has no\n # LayoutParams.\n if not self.native.getLayoutParams():\n return\n self.native.measure(\n View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED\n )\n self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n", "path": "src/android/toga_android/widgets/numberinput.py"}]}
| 1,576 | 197 |
gh_patches_debug_22749
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5133
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add stripe test key for testing purposes on the heroku deployment
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
Stripe test key needs to be added for testing purposes on the heroku deployment.
**Describe the solution you'd like**
<!-- A clear and concise description of what you want to happen. -->
Update the `populate_db.py` file to add it in the settings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `populate_db.py`
Content:
```
1 from app import current_app
2 from app.models import db
3 from app.api.helpers.db import get_or_create # , save_to_db
4
5 # Admin message settings
6 from app.api.helpers.system_mails import MAILS
7 from app.models.message_setting import MessageSettings
8
9 # Event Role-Service Permissions
10 from app.models.role import Role
11 from app.models.service import Service
12 from app.models.permission import Permission
13
14 from app.models.track import Track
15 from app.models.session import Session
16 from app.models.speaker import Speaker
17 from app.models.sponsor import Sponsor
18 from app.models.microlocation import Microlocation
19
20 from app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR
21
22 # Admin Panel Permissions
23 from app.models.panel_permission import PanelPermission
24 from app.models.custom_system_role import CustomSysRole
25
26 from app.models.setting import Setting
27 from app.models.image_size import ImageSizes
28 from app.models.module import Module
29
30 # EventTopic
31 from app.models.event_topic import EventTopic
32
33 # EventType
34 from app.models.event_type import EventType
35
36 # EventLocation
37 from app.models.event_location import EventLocation
38
39 # User Permissions
40 from app.models.user_permission import UserPermission
41 SALES = 'sales'
42
43
44 def create_roles():
45 get_or_create(Role, name=ORGANIZER, title_name='Organizer')
46 get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')
47 get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')
48 get_or_create(Role, name=MODERATOR, title_name='Moderator')
49 get_or_create(Role, name=ATTENDEE, title_name='Attendee')
50 get_or_create(Role, name=REGISTRAR, title_name='Registrar')
51
52
53 def create_services():
54 track = Track.get_service_name()
55 session = Session.get_service_name()
56 speaker = Speaker.get_service_name()
57 sponsor = Sponsor.get_service_name()
58 microlocation = Microlocation.get_service_name()
59
60 get_or_create(Service, name=track)
61 get_or_create(Service, name=session)
62 get_or_create(Service, name=speaker)
63 get_or_create(Service, name=sponsor)
64 get_or_create(Service, name=microlocation)
65
66
67 def create_settings():
68 get_or_create(Setting, app_name='Open Event')
69
70
71 def create_event_image_sizes():
72 get_or_create(
73 ImageSizes, type='event-image', full_width=1300,
74 full_height=500, full_aspect=True, full_quality=80,
75 icon_width=75, icon_height=30, icon_aspect=True,
76 icon_quality=80, thumbnail_width=500, thumbnail_height=200,
77 thumbnail_aspect=True, thumbnail_quality=80, logo_width=500,
78 logo_height=200
79 )
80
81
82 def create_speaker_image_sizes():
83 get_or_create(
84 ImageSizes, type='speaker-image', icon_size_width_height=35, icon_size_quality=80,
85 small_size_width_height=50, small_size_quality=80,
86 thumbnail_size_width_height=500, thumbnail_quality=80
87 )
88
89
90 def create_modules():
91 get_or_create(Module, donation_include=False)
92
93
94 def create_event_topics():
95 event_topic = ['Health & Wellness', 'Home & Lifestyle',
96 'Charity & Causes', 'Other', 'Religion & Spirituality',
97 'Community & Culture', 'Government & Politics',
98 'Government & Politics', 'Auto, Boat & Air',
99 'Travel & Outdoor', 'Hobbies & Special Interest',
100 'Sports & Fitness', 'Business & Professional',
101 'Music', 'Seasonal & Holiday',
102 'Film, Media & Entertainment', 'Family & Education',
103 'Science & Technology', 'Performing & Visual Arts',
104 'Food & Drink', 'Family & Education']
105 for topic in event_topic:
106 get_or_create(EventTopic, name=topic)
107
108
109 def create_event_types():
110 event_type = ['Camp, Treat & Retreat', 'Dinner or Gala',
111 'Other', 'Concert or Performance', 'Conference',
112 'Seminar or Talk', 'Convention',
113 'Festival or Fair', 'Tour',
114 'Screening', 'Game or Competition',
115 'Party or Social Gathering', 'Race or Endurance Event',
116 'Meeting or Networking Event', 'Attraction',
117 'Class, Training, or Workshop', 'Appearance or Signing',
118 'Tournament', 'Rally']
119 for type_ in event_type:
120 get_or_create(EventType, name=type_)
121
122
123 def create_event_locations():
124 event_location = ['India', 'Singapore', 'Berlin', 'New York', 'Hong Kong']
125 for loc_ in event_location:
126 get_or_create(EventLocation, name=loc_)
127
128
129 def create_permissions():
130 orgr = Role.query.get(1)
131 coorgr = Role.query.get(2)
132 track_orgr = Role.query.get(3)
133 mod = Role.query.get(4)
134 attend = Role.query.get(5)
135 regist = Role.query.get(6)
136
137 track = Service.query.get(1)
138 session = Service.query.get(2)
139 speaker = Service.query.get(3)
140 sponsor = Service.query.get(4)
141 microlocation = Service.query.get(5)
142
143 # For ORGANIZER
144 # All four permissions set to True
145 services = [track, session, speaker, sponsor, microlocation]
146 roles = [attend, regist]
147 for service in services:
148 perm, _ = get_or_create(Permission, role=orgr, service=service)
149 db.session.add(perm)
150
151 # For COORGANIZER
152 for service in services:
153 perm, _ = get_or_create(Permission, role=coorgr, service=service)
154 perm.can_create, perm.can_delete = False, False
155 db.session.add(perm)
156
157 # For TRACK_ORGANIZER
158 for service in services:
159 perm, _ = get_or_create(Permission, role=track_orgr, service=service)
160 if not service == track:
161 perm.can_create, perm.can_update, perm.can_delete = False, False, False
162 db.session.add(perm)
163
164 # For MODERATOR
165 for service in services:
166 perm, _ = get_or_create(Permission, role=mod, service=service)
167 perm.can_create, perm.can_update, perm.can_delete = False, False, False
168 db.session.add(perm)
169
170 # For ATTENDEE and REGISTRAR
171 services = [track, session, speaker, sponsor, microlocation]
172 roles = [attend, regist]
173 for role in roles:
174 for service in services:
175 perm, _ = get_or_create(Permission, role=role, service=service)
176 perm.can_create, perm.can_update, perm.can_delete = False, False, False
177 db.session.add(perm)
178
179
180 def create_custom_sys_roles():
181 role, _ = get_or_create(CustomSysRole, name='Sales Admin')
182 db.session.add(role)
183 role, _ = get_or_create(CustomSysRole, name='Marketer')
184 db.session.add(role)
185
186
187 def create_panel_permissions():
188 sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()
189 perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)
190 db.session.add(perm)
191 marketer = CustomSysRole.query.filter_by(name='Marketer').first()
192 perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)
193 db.session.add(perm)
194
195
196 def create_user_permissions():
197 # Publish Event
198 user_perm, _ = get_or_create(UserPermission, name='publish_event',
199 description='Publish event (make event live)')
200 user_perm.verified_user = True
201 db.session.add(user_perm)
202
203 # Create Event
204 user_perm, _ = get_or_create(UserPermission, name='create_event',
205 description='Create event')
206 user_perm.verified_user, user_perm.unverified_user = True, False
207 db.session.add(user_perm)
208
209
210 def create_admin_message_settings():
211 default_mails = ["Next Event",
212 "Session Schedule Change",
213 "User email",
214 "Invitation For Papers",
215 "After Event",
216 "Ticket(s) Purchased",
217 "Session Accept or Reject",
218 "Event Published",
219 "Event Export Failed",
220 "Event Exported",
221 "Event Role Invitation",
222 "New Session Proposal"]
223 for mail in MAILS:
224 if mail in default_mails:
225 get_or_create(MessageSettings, action=mail, mail_status=True,
226 notification_status=True, user_control_status=True)
227 else:
228 get_or_create(
229 MessageSettings, action=mail, mail_status=False,
230 notification_status=False, user_control_status=False
231 )
232
233
234 def populate():
235 """
236 Create defined Roles, Services and Permissions.
237 """
238 print('Creating roles...')
239 create_roles()
240 print('Creating services...')
241 create_services()
242 print('Creating permissions...')
243 create_permissions()
244 print('Creating custom system roles...')
245 create_custom_sys_roles()
246 print('Creating admin panel permissions...')
247 create_panel_permissions()
248 print('Creating user permissions...')
249 create_user_permissions()
250 print('Creating settings...')
251 create_settings()
252 print('Creating modules...')
253 create_modules()
254 print('Creating event image size...')
255 create_event_image_sizes()
256 print('Creating speaker image size...')
257 create_speaker_image_sizes()
258 print('Creating Event Topics...')
259 create_event_topics()
260 print('Creating Event Types...')
261 create_event_types()
262 print('Creating Event Locations...')
263 create_event_locations()
264 print('Creating admin message settings...')
265 create_admin_message_settings()
266
267
268 def populate_without_print():
269 """
270 Create defined Roles, Services and Permissions.
271 """
272 create_roles()
273 create_services()
274 create_permissions()
275 create_custom_sys_roles()
276 create_panel_permissions()
277 create_user_permissions()
278 create_settings()
279 create_modules()
280 create_event_image_sizes()
281 create_speaker_image_sizes()
282 create_event_topics()
283 create_event_types()
284 create_event_locations()
285 create_admin_message_settings()
286
287 db.session.commit()
288
289
290 if __name__ == '__main__':
291 with current_app.app_context():
292 populate()
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/populate_db.py b/populate_db.py
--- a/populate_db.py
+++ b/populate_db.py
@@ -1,6 +1,7 @@
from app import current_app
from app.models import db
from app.api.helpers.db import get_or_create # , save_to_db
+from envparse import env
# Admin message settings
from app.api.helpers.system_mails import MAILS
@@ -67,6 +68,21 @@
def create_settings():
get_or_create(Setting, app_name='Open Event')
+ if current_app.config['DEVELOPMENT']:
+ # get the stripe keys from the env file and save it in the settings.
+ env.read_envfile()
+ stripe_secret_key = env('STRIPE_SECRET_KEY', default=None)
+ stripe_publishable_key = env('STRIPE_PUBLISHABLE_KEY', default=None)
+ stripe_client_id = env('STRIPE_CLIENT_ID', default=None)
+
+ if stripe_client_id and stripe_secret_key and stripe_publishable_key:
+ setting, _ = get_or_create(Setting, app_name='Open Event')
+ setting.stripe_client_id = stripe_client_id
+ setting.stripe_publishable_key = stripe_publishable_key
+ setting.stripe_secret_key = stripe_secret_key
+ db.session.add(setting)
+ db.session.commit()
+
def create_event_image_sizes():
get_or_create(
|
{"golden_diff": "diff --git a/populate_db.py b/populate_db.py\n--- a/populate_db.py\n+++ b/populate_db.py\n@@ -1,6 +1,7 @@\n from app import current_app\n from app.models import db\n from app.api.helpers.db import get_or_create # , save_to_db\n+from envparse import env\n \n # Admin message settings\n from app.api.helpers.system_mails import MAILS\n@@ -67,6 +68,21 @@\n def create_settings():\n get_or_create(Setting, app_name='Open Event')\n \n+ if current_app.config['DEVELOPMENT']:\n+ # get the stripe keys from the env file and save it in the settings.\n+ env.read_envfile()\n+ stripe_secret_key = env('STRIPE_SECRET_KEY', default=None)\n+ stripe_publishable_key = env('STRIPE_PUBLISHABLE_KEY', default=None)\n+ stripe_client_id = env('STRIPE_CLIENT_ID', default=None)\n+\n+ if stripe_client_id and stripe_secret_key and stripe_publishable_key:\n+ setting, _ = get_or_create(Setting, app_name='Open Event')\n+ setting.stripe_client_id = stripe_client_id\n+ setting.stripe_publishable_key = stripe_publishable_key\n+ setting.stripe_secret_key = stripe_secret_key\n+ db.session.add(setting)\n+ db.session.commit()\n+\n \n def create_event_image_sizes():\n get_or_create(\n", "issue": "Add stripe test key for testing purposes on the heroku deployment\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\nStripe test key needs to be added for testing purposes on the heroku deployment.\r\n\r\n**Describe the solution you'd like**\r\n<!-- A clear and concise description of what you want to happen. -->\r\nUpdate the `populate_db.py` file to add it in the settings. \r\n\n", "before_files": [{"content": "from app import current_app\nfrom app.models import db\nfrom app.api.helpers.db import get_or_create # , save_to_db\n\n# Admin message settings\nfrom app.api.helpers.system_mails import MAILS\nfrom app.models.message_setting import MessageSettings\n\n# Event Role-Service Permissions\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.permission import Permission\n\nfrom app.models.track import Track\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.microlocation import Microlocation\n\nfrom app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR\n\n# Admin Panel Permissions\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.custom_system_role import CustomSysRole\n\nfrom app.models.setting import Setting\nfrom app.models.image_size import ImageSizes\nfrom app.models.module import Module\n\n# EventTopic\nfrom app.models.event_topic import EventTopic\n\n# EventType\nfrom app.models.event_type import EventType\n\n# EventLocation\nfrom app.models.event_location import EventLocation\n\n# User Permissions\nfrom app.models.user_permission import UserPermission\nSALES = 'sales'\n\n\ndef create_roles():\n get_or_create(Role, name=ORGANIZER, title_name='Organizer')\n get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')\n get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')\n get_or_create(Role, name=MODERATOR, title_name='Moderator')\n get_or_create(Role, name=ATTENDEE, title_name='Attendee')\n get_or_create(Role, name=REGISTRAR, title_name='Registrar')\n\n\ndef create_services():\n track = Track.get_service_name()\n session = Session.get_service_name()\n speaker = Speaker.get_service_name()\n sponsor = Sponsor.get_service_name()\n microlocation = Microlocation.get_service_name()\n\n get_or_create(Service, name=track)\n get_or_create(Service, name=session)\n get_or_create(Service, name=speaker)\n get_or_create(Service, name=sponsor)\n get_or_create(Service, name=microlocation)\n\n\ndef create_settings():\n get_or_create(Setting, app_name='Open Event')\n\n\ndef create_event_image_sizes():\n get_or_create(\n ImageSizes, type='event-image', full_width=1300,\n full_height=500, full_aspect=True, full_quality=80,\n icon_width=75, icon_height=30, icon_aspect=True,\n icon_quality=80, thumbnail_width=500, thumbnail_height=200,\n thumbnail_aspect=True, thumbnail_quality=80, logo_width=500,\n logo_height=200\n )\n\n\ndef create_speaker_image_sizes():\n get_or_create(\n ImageSizes, type='speaker-image', icon_size_width_height=35, icon_size_quality=80,\n small_size_width_height=50, small_size_quality=80,\n thumbnail_size_width_height=500, thumbnail_quality=80\n )\n\n\ndef create_modules():\n get_or_create(Module, donation_include=False)\n\n\ndef create_event_topics():\n event_topic = ['Health & Wellness', 'Home & Lifestyle',\n 'Charity & Causes', 'Other', 'Religion & Spirituality',\n 'Community & Culture', 'Government & Politics',\n 'Government & Politics', 'Auto, Boat & Air',\n 'Travel & Outdoor', 'Hobbies & Special Interest',\n 'Sports & Fitness', 'Business & Professional',\n 'Music', 'Seasonal & Holiday',\n 'Film, Media & Entertainment', 'Family & Education',\n 'Science & Technology', 'Performing & Visual Arts',\n 'Food & Drink', 'Family & Education']\n for topic in event_topic:\n get_or_create(EventTopic, name=topic)\n\n\ndef create_event_types():\n event_type = ['Camp, Treat & Retreat', 'Dinner or Gala',\n 'Other', 'Concert or Performance', 'Conference',\n 'Seminar or Talk', 'Convention',\n 'Festival or Fair', 'Tour',\n 'Screening', 'Game or Competition',\n 'Party or Social Gathering', 'Race or Endurance Event',\n 'Meeting or Networking Event', 'Attraction',\n 'Class, Training, or Workshop', 'Appearance or Signing',\n 'Tournament', 'Rally']\n for type_ in event_type:\n get_or_create(EventType, name=type_)\n\n\ndef create_event_locations():\n event_location = ['India', 'Singapore', 'Berlin', 'New York', 'Hong Kong']\n for loc_ in event_location:\n get_or_create(EventLocation, name=loc_)\n\n\ndef create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n attend = Role.query.get(5)\n regist = Role.query.get(6)\n\n track = Service.query.get(1)\n session = Service.query.get(2)\n speaker = Service.query.get(3)\n sponsor = Service.query.get(4)\n microlocation = Service.query.get(5)\n\n # For ORGANIZER\n # All four permissions set to True\n services = [track, session, speaker, sponsor, microlocation]\n roles = [attend, regist]\n for service in services:\n perm, _ = get_or_create(Permission, role=orgr, service=service)\n db.session.add(perm)\n\n # For COORGANIZER\n for service in services:\n perm, _ = get_or_create(Permission, role=coorgr, service=service)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n # For TRACK_ORGANIZER\n for service in services:\n perm, _ = get_or_create(Permission, role=track_orgr, service=service)\n if not service == track:\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n # For MODERATOR\n for service in services:\n perm, _ = get_or_create(Permission, role=mod, service=service)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n # For ATTENDEE and REGISTRAR\n services = [track, session, speaker, sponsor, microlocation]\n roles = [attend, regist]\n for role in roles:\n for service in services:\n perm, _ = get_or_create(Permission, role=role, service=service)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n\ndef create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n db.session.add(role)\n role, _ = get_or_create(CustomSysRole, name='Marketer')\n db.session.add(role)\n\n\ndef create_panel_permissions():\n sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)\n db.session.add(perm)\n marketer = CustomSysRole.query.filter_by(name='Marketer').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)\n db.session.add(perm)\n\n\ndef create_user_permissions():\n # Publish Event\n user_perm, _ = get_or_create(UserPermission, name='publish_event',\n description='Publish event (make event live)')\n user_perm.verified_user = True\n db.session.add(user_perm)\n\n # Create Event\n user_perm, _ = get_or_create(UserPermission, name='create_event',\n description='Create event')\n user_perm.verified_user, user_perm.unverified_user = True, False\n db.session.add(user_perm)\n\n\ndef create_admin_message_settings():\n default_mails = [\"Next Event\",\n \"Session Schedule Change\",\n \"User email\",\n \"Invitation For Papers\",\n \"After Event\",\n \"Ticket(s) Purchased\",\n \"Session Accept or Reject\",\n \"Event Published\",\n \"Event Export Failed\",\n \"Event Exported\",\n \"Event Role Invitation\",\n \"New Session Proposal\"]\n for mail in MAILS:\n if mail in default_mails:\n get_or_create(MessageSettings, action=mail, mail_status=True,\n notification_status=True, user_control_status=True)\n else:\n get_or_create(\n MessageSettings, action=mail, mail_status=False,\n notification_status=False, user_control_status=False\n )\n\n\ndef populate():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n print('Creating roles...')\n create_roles()\n print('Creating services...')\n create_services()\n print('Creating permissions...')\n create_permissions()\n print('Creating custom system roles...')\n create_custom_sys_roles()\n print('Creating admin panel permissions...')\n create_panel_permissions()\n print('Creating user permissions...')\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n print('Creating modules...')\n create_modules()\n print('Creating event image size...')\n create_event_image_sizes()\n print('Creating speaker image size...')\n create_speaker_image_sizes()\n print('Creating Event Topics...')\n create_event_topics()\n print('Creating Event Types...')\n create_event_types()\n print('Creating Event Locations...')\n create_event_locations()\n print('Creating admin message settings...')\n create_admin_message_settings()\n\n\ndef populate_without_print():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n create_roles()\n create_services()\n create_permissions()\n create_custom_sys_roles()\n create_panel_permissions()\n create_user_permissions()\n create_settings()\n create_modules()\n create_event_image_sizes()\n create_speaker_image_sizes()\n create_event_topics()\n create_event_types()\n create_event_locations()\n create_admin_message_settings()\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n with current_app.app_context():\n populate()\n", "path": "populate_db.py"}], "after_files": [{"content": "from app import current_app\nfrom app.models import db\nfrom app.api.helpers.db import get_or_create # , save_to_db\nfrom envparse import env\n\n# Admin message settings\nfrom app.api.helpers.system_mails import MAILS\nfrom app.models.message_setting import MessageSettings\n\n# Event Role-Service Permissions\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.permission import Permission\n\nfrom app.models.track import Track\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.sponsor import Sponsor\nfrom app.models.microlocation import Microlocation\n\nfrom app.models.user import ORGANIZER, COORGANIZER, TRACK_ORGANIZER, MODERATOR, ATTENDEE, REGISTRAR\n\n# Admin Panel Permissions\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.custom_system_role import CustomSysRole\n\nfrom app.models.setting import Setting\nfrom app.models.image_size import ImageSizes\nfrom app.models.module import Module\n\n# EventTopic\nfrom app.models.event_topic import EventTopic\n\n# EventType\nfrom app.models.event_type import EventType\n\n# EventLocation\nfrom app.models.event_location import EventLocation\n\n# User Permissions\nfrom app.models.user_permission import UserPermission\nSALES = 'sales'\n\n\ndef create_roles():\n get_or_create(Role, name=ORGANIZER, title_name='Organizer')\n get_or_create(Role, name=COORGANIZER, title_name='Co-organizer')\n get_or_create(Role, name=TRACK_ORGANIZER, title_name='Track Organizer')\n get_or_create(Role, name=MODERATOR, title_name='Moderator')\n get_or_create(Role, name=ATTENDEE, title_name='Attendee')\n get_or_create(Role, name=REGISTRAR, title_name='Registrar')\n\n\ndef create_services():\n track = Track.get_service_name()\n session = Session.get_service_name()\n speaker = Speaker.get_service_name()\n sponsor = Sponsor.get_service_name()\n microlocation = Microlocation.get_service_name()\n\n get_or_create(Service, name=track)\n get_or_create(Service, name=session)\n get_or_create(Service, name=speaker)\n get_or_create(Service, name=sponsor)\n get_or_create(Service, name=microlocation)\n\n\ndef create_settings():\n get_or_create(Setting, app_name='Open Event')\n\n if current_app.config['DEVELOPMENT']:\n # get the stripe keys from the env file and save it in the settings.\n env.read_envfile()\n stripe_secret_key = env('STRIPE_SECRET_KEY', default=None)\n stripe_publishable_key = env('STRIPE_PUBLISHABLE_KEY', default=None)\n stripe_client_id = env('STRIPE_CLIENT_ID', default=None)\n\n if stripe_client_id and stripe_secret_key and stripe_publishable_key:\n setting, _ = get_or_create(Setting, app_name='Open Event')\n setting.stripe_client_id = stripe_client_id\n setting.stripe_publishable_key = stripe_publishable_key\n setting.stripe_secret_key = stripe_secret_key\n db.session.add(setting)\n db.session.commit()\n\n\ndef create_event_image_sizes():\n get_or_create(\n ImageSizes, type='event-image', full_width=1300,\n full_height=500, full_aspect=True, full_quality=80,\n icon_width=75, icon_height=30, icon_aspect=True,\n icon_quality=80, thumbnail_width=500, thumbnail_height=200,\n thumbnail_aspect=True, thumbnail_quality=80, logo_width=500,\n logo_height=200\n )\n\n\ndef create_speaker_image_sizes():\n get_or_create(\n ImageSizes, type='speaker-image', icon_size_width_height=35, icon_size_quality=80,\n small_size_width_height=50, small_size_quality=80,\n thumbnail_size_width_height=500, thumbnail_quality=80\n )\n\n\ndef create_modules():\n get_or_create(Module, donation_include=False)\n\n\ndef create_event_topics():\n event_topic = ['Health & Wellness', 'Home & Lifestyle',\n 'Charity & Causes', 'Other', 'Religion & Spirituality',\n 'Community & Culture', 'Government & Politics',\n 'Government & Politics', 'Auto, Boat & Air',\n 'Travel & Outdoor', 'Hobbies & Special Interest',\n 'Sports & Fitness', 'Business & Professional',\n 'Music', 'Seasonal & Holiday',\n 'Film, Media & Entertainment', 'Family & Education',\n 'Science & Technology', 'Performing & Visual Arts',\n 'Food & Drink', 'Family & Education']\n for topic in event_topic:\n get_or_create(EventTopic, name=topic)\n\n\ndef create_event_types():\n event_type = ['Camp, Treat & Retreat', 'Dinner or Gala',\n 'Other', 'Concert or Performance', 'Conference',\n 'Seminar or Talk', 'Convention',\n 'Festival or Fair', 'Tour',\n 'Screening', 'Game or Competition',\n 'Party or Social Gathering', 'Race or Endurance Event',\n 'Meeting or Networking Event', 'Attraction',\n 'Class, Training, or Workshop', 'Appearance or Signing',\n 'Tournament', 'Rally']\n for type_ in event_type:\n get_or_create(EventType, name=type_)\n\n\ndef create_event_locations():\n event_location = ['India', 'Singapore', 'Berlin', 'New York', 'Hong Kong']\n for loc_ in event_location:\n get_or_create(EventLocation, name=loc_)\n\n\ndef create_permissions():\n orgr = Role.query.get(1)\n coorgr = Role.query.get(2)\n track_orgr = Role.query.get(3)\n mod = Role.query.get(4)\n attend = Role.query.get(5)\n regist = Role.query.get(6)\n\n track = Service.query.get(1)\n session = Service.query.get(2)\n speaker = Service.query.get(3)\n sponsor = Service.query.get(4)\n microlocation = Service.query.get(5)\n\n # For ORGANIZER\n # All four permissions set to True\n services = [track, session, speaker, sponsor, microlocation]\n roles = [attend, regist]\n for service in services:\n perm, _ = get_or_create(Permission, role=orgr, service=service)\n db.session.add(perm)\n\n # For COORGANIZER\n for service in services:\n perm, _ = get_or_create(Permission, role=coorgr, service=service)\n perm.can_create, perm.can_delete = False, False\n db.session.add(perm)\n\n # For TRACK_ORGANIZER\n for service in services:\n perm, _ = get_or_create(Permission, role=track_orgr, service=service)\n if not service == track:\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n # For MODERATOR\n for service in services:\n perm, _ = get_or_create(Permission, role=mod, service=service)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n # For ATTENDEE and REGISTRAR\n services = [track, session, speaker, sponsor, microlocation]\n roles = [attend, regist]\n for role in roles:\n for service in services:\n perm, _ = get_or_create(Permission, role=role, service=service)\n perm.can_create, perm.can_update, perm.can_delete = False, False, False\n db.session.add(perm)\n\n\ndef create_custom_sys_roles():\n role, _ = get_or_create(CustomSysRole, name='Sales Admin')\n db.session.add(role)\n role, _ = get_or_create(CustomSysRole, name='Marketer')\n db.session.add(role)\n\n\ndef create_panel_permissions():\n sales_admin = CustomSysRole.query.filter_by(name='Sales Admin').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=sales_admin)\n db.session.add(perm)\n marketer = CustomSysRole.query.filter_by(name='Marketer').first()\n perm, _ = get_or_create(PanelPermission, panel_name=SALES, role=marketer)\n db.session.add(perm)\n\n\ndef create_user_permissions():\n # Publish Event\n user_perm, _ = get_or_create(UserPermission, name='publish_event',\n description='Publish event (make event live)')\n user_perm.verified_user = True\n db.session.add(user_perm)\n\n # Create Event\n user_perm, _ = get_or_create(UserPermission, name='create_event',\n description='Create event')\n user_perm.verified_user, user_perm.unverified_user = True, False\n db.session.add(user_perm)\n\n\ndef create_admin_message_settings():\n default_mails = [\"Next Event\",\n \"Session Schedule Change\",\n \"User email\",\n \"Invitation For Papers\",\n \"After Event\",\n \"Ticket(s) Purchased\",\n \"Session Accept or Reject\",\n \"Event Published\",\n \"Event Export Failed\",\n \"Event Exported\",\n \"Event Role Invitation\",\n \"New Session Proposal\"]\n for mail in MAILS:\n if mail in default_mails:\n get_or_create(MessageSettings, action=mail, mail_status=True,\n notification_status=True, user_control_status=True)\n else:\n get_or_create(\n MessageSettings, action=mail, mail_status=False,\n notification_status=False, user_control_status=False\n )\n\n\ndef populate():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n print('Creating roles...')\n create_roles()\n print('Creating services...')\n create_services()\n print('Creating permissions...')\n create_permissions()\n print('Creating custom system roles...')\n create_custom_sys_roles()\n print('Creating admin panel permissions...')\n create_panel_permissions()\n print('Creating user permissions...')\n create_user_permissions()\n print('Creating settings...')\n create_settings()\n print('Creating modules...')\n create_modules()\n print('Creating event image size...')\n create_event_image_sizes()\n print('Creating speaker image size...')\n create_speaker_image_sizes()\n print('Creating Event Topics...')\n create_event_topics()\n print('Creating Event Types...')\n create_event_types()\n print('Creating Event Locations...')\n create_event_locations()\n print('Creating admin message settings...')\n create_admin_message_settings()\n\n\ndef populate_without_print():\n \"\"\"\n Create defined Roles, Services and Permissions.\n \"\"\"\n create_roles()\n create_services()\n create_permissions()\n create_custom_sys_roles()\n create_panel_permissions()\n create_user_permissions()\n create_settings()\n create_modules()\n create_event_image_sizes()\n create_speaker_image_sizes()\n create_event_topics()\n create_event_types()\n create_event_locations()\n create_admin_message_settings()\n\n db.session.commit()\n\n\nif __name__ == '__main__':\n with current_app.app_context():\n populate()\n", "path": "populate_db.py"}]}
| 3,352 | 309 |
gh_patches_debug_11123
|
rasdani/github-patches
|
git_diff
|
kivy__python-for-android-662
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
.jam files not installed
The user-config.jam in https://github.com/kivy/python-for-android/tree/master/pythonforandroid/recipes/boost does not show up in the installed p4a recipes folder /home/paul/.local/lib/python2.7/site-packages/pythonforandroid/recipes/boost/
Perhaps .jam files have to be added to this array as well: https://github.com/kived/python-for-android/commit/93fcf656e2aafc6a75ee06dab3e471e1eb509d87
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1
2 from setuptools import setup, find_packages
3 from os import walk
4 from os.path import join, dirname, sep
5 import os
6 import glob
7
8 # NOTE: All package data should also be set in MANIFEST.in
9
10 packages = find_packages()
11
12 package_data = {'': ['*.tmpl',
13 '*.patch', ], }
14
15 data_files = []
16
17 # By specifying every file manually, package_data will be able to
18 # include them in binary distributions. Note that we have to add
19 # everything as a 'pythonforandroid' rule, using '' apparently doesn't
20 # work.
21 def recursively_include(results, directory, patterns):
22 for root, subfolders, files in walk(directory):
23 for fn in files:
24 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):
25 continue
26 filename = join(root, fn)
27 directory = 'pythonforandroid'
28 if directory not in results:
29 results[directory] = []
30 results[directory].append(join(*filename.split(sep)[1:]))
31
32 recursively_include(package_data, 'pythonforandroid/recipes',
33 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
34 '*.mk', ])
35 recursively_include(package_data, 'pythonforandroid/bootstraps',
36 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
37 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])
38 recursively_include(package_data, 'pythonforandroid/bootstraps',
39 ['sdl-config', ])
40 recursively_include(package_data, 'pythonforandroid',
41 ['liblink', 'biglink', 'liblink.sh'])
42
43 setup(name='python-for-android',
44 version='0.3',
45 description='Android APK packager for Python scripts and apps',
46 author='The Kivy team',
47 author_email='[email protected]',
48 url='https://github.com/kivy/python-for-android',
49 license='MIT',
50 install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',
51 'six'],
52 entry_points={
53 'console_scripts': [
54 'python-for-android = pythonforandroid.toolchain:main',
55 'p4a = pythonforandroid.toolchain:main',
56 ],
57 'distutils.commands': [
58 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',
59 ],
60 },
61 classifiers = [
62 'Development Status :: 3 - Alpha',
63 'Intended Audience :: Developers',
64 'License :: OSI Approved :: MIT License',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: OS Independent',
67 'Operating System :: POSIX :: Linux',
68 'Operating System :: MacOS :: MacOS X',
69 'Programming Language :: C',
70 'Programming Language :: Python :: 2',
71 'Programming Language :: Python :: 3',
72 'Topic :: Software Development',
73 'Topic :: Utilities',
74 ],
75 packages=packages,
76 package_data=package_data,
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,7 +31,7 @@
recursively_include(package_data, 'pythonforandroid/recipes',
['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
- '*.mk', ])
+ '*.mk', '*.jam', ])
recursively_include(package_data, 'pythonforandroid/bootstraps',
['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
'*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,7 @@\n \n recursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n- '*.mk', ])\n+ '*.mk', '*.jam', ])\n recursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\n", "issue": ".jam files not installed\nThe user-config.jam in https://github.com/kivy/python-for-android/tree/master/pythonforandroid/recipes/boost does not show up in the installed p4a recipes folder /home/paul/.local/lib/python2.7/site-packages/pythonforandroid/recipes/boost/\n\nPerhaps .jam files have to be added to this array as well: https://github.com/kived/python-for-android/commit/93fcf656e2aafc6a75ee06dab3e471e1eb509d87\n\n", "before_files": [{"content": "\nfrom setuptools import setup, find_packages\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport glob\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nsetup(name='python-for-android',\n version='0.3',\n description='Android APK packager for Python scripts and apps',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android', \n license='MIT', \n install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',\n 'six'],\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.toolchain:main',\n 'p4a = pythonforandroid.toolchain:main',\n ],\n 'distutils.commands': [\n 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}], "after_files": [{"content": "\nfrom setuptools import setup, find_packages\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport glob\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nsetup(name='python-for-android',\n version='0.3',\n description='Android APK packager for Python scripts and apps',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android', \n license='MIT', \n install_requires=['appdirs', 'colorama>0.3', 'sh', 'jinja2', 'argparse',\n 'six'],\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.toolchain:main',\n 'p4a = pythonforandroid.toolchain:main',\n ],\n 'distutils.commands': [\n 'bdist_apk = pythonforandroid.bdist_apk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]}
| 1,177 | 137 |
gh_patches_debug_6908
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-2966
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Confusing message or behavior in SCM when url can't be deduced
It raises:
```python
if not origin:
raise ConanException("Repo origin cannot be deduced by 'auto', using source folder")
```
But it doesn't use the source folder, as execution is aborted. Maybe change the message? Or allow to proceed and create the package locally, even if the remote URL is not defined?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/cmd/export.py`
Content:
```
1 """ manages the movement of conanfiles and associated files from the user space
2 to the local store, as an initial step before building or uploading to remotes
3 """
4
5 import os
6 import shutil
7
8 from conans.client.cmd.export_linter import conan_linter
9 from conans.client.file_copier import FileCopier
10 from conans.client.loader_parse import load_conanfile_class
11 from conans.client.output import ScopedOutput
12 from conans.client.source import get_scm
13 from conans.errors import ConanException
14 from conans.model.conan_file import create_exports, create_exports_sources
15 from conans.model.manifest import FileTreeManifest
16 from conans.model.ref import ConanFileReference
17 from conans.paths import CONAN_MANIFEST, CONANFILE
18 from conans.util.files import save, rmdir, is_dirty, set_dirty, mkdir
19 from conans.util.log import logger
20 from conans.search.search import search_recipes
21
22
23 def export_alias(reference, target_reference, client_cache):
24 conanfile = """
25 from conans import ConanFile
26
27 class AliasConanfile(ConanFile):
28 alias = "%s"
29 """ % str(target_reference)
30
31 export_path = client_cache.export(reference)
32 mkdir(export_path)
33 save(os.path.join(export_path, CONANFILE), conanfile)
34 mkdir(client_cache.export_sources(reference))
35 digest = FileTreeManifest.create(export_path)
36 digest.save(export_path)
37
38
39 def cmd_export(conanfile_path, name, version, user, channel, keep_source,
40 output, client_cache):
41 """ Export the recipe
42 param conanfile_path: the original source directory of the user containing a
43 conanfile.py
44 param user: user under this package will be exported
45 param channel: string (stable, testing,...)
46 """
47 logger.debug("Exporting %s" % conanfile_path)
48
49 conan_linter(conanfile_path, output)
50 conanfile = _load_export_conanfile(conanfile_path, output, name, version)
51 conan_ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)
52 conan_ref_str = str(conan_ref)
53 # Maybe a platform check could be added, but depends on disk partition
54 refs = search_recipes(client_cache, conan_ref_str, ignorecase=True)
55 if refs and conan_ref not in refs:
56 raise ConanException("Cannot export package with same name but different case\n"
57 "You exported '%s' but already existing '%s'"
58 % (conan_ref_str, " ".join(str(s) for s in refs)))
59 output = ScopedOutput(str(conan_ref), output)
60 with client_cache.conanfile_write_lock(conan_ref):
61 _export_conanfile(conanfile_path, output, client_cache, conanfile, conan_ref, keep_source)
62
63
64 def _load_export_conanfile(conanfile_path, output, name, version):
65 conanfile = load_conanfile_class(conanfile_path)
66
67 for field in ["url", "license", "description"]:
68 field_value = getattr(conanfile, field, None)
69 if not field_value:
70 output.warn("Conanfile doesn't have '%s'.\n"
71 "It is recommended to add it as attribute" % field)
72
73 try:
74 # Exports is the only object field, we need to do this, because conan export needs it
75 conanfile.exports = create_exports(conanfile)
76 conanfile.exports_sources = create_exports_sources(conanfile)
77 except Exception as e: # re-raise with file name
78 raise ConanException("%s: %s" % (conanfile_path, str(e)))
79
80 # check name and version were specified
81 if not conanfile.name:
82 if name:
83 conanfile.name = name
84 else:
85 raise ConanException("conanfile didn't specify name")
86 elif name and name != conanfile.name:
87 raise ConanException("Package recipe exported with name %s!=%s" % (name, conanfile.name))
88
89 if not conanfile.version:
90 if version:
91 conanfile.version = version
92 else:
93 raise ConanException("conanfile didn't specify version")
94 elif version and version != conanfile.version:
95 raise ConanException("Package recipe exported with version %s!=%s"
96 % (version, conanfile.version))
97
98 return conanfile
99
100
101 def _capture_export_scm_data(conanfile, src_path, destination_folder, output, paths, conan_ref):
102
103 scm_src_file = paths.scm_folder(conan_ref)
104 if os.path.exists(scm_src_file):
105 os.unlink(scm_src_file)
106
107 scm = get_scm(conanfile, src_path)
108
109 if not scm or not (scm.capture_origin or scm.capture_revision):
110 return
111
112 if scm.url == "auto":
113 origin = scm.get_remote_url()
114 if not origin:
115 raise ConanException("Repo origin cannot be deduced by 'auto', using source folder")
116 output.success("Repo origin deduced by 'auto': %s" % origin)
117 scm.url = origin
118 if scm.revision == "auto":
119 scm.revision = scm.get_revision()
120 output.success("Revision deduced by 'auto': %s" % scm.revision)
121
122 # Generate the scm_folder.txt file pointing to the src_path
123 save(scm_src_file, src_path.replace("\\", "/"))
124 scm.replace_in_file(os.path.join(destination_folder, "conanfile.py"))
125
126
127 def _export_conanfile(conanfile_path, output, paths, conanfile, conan_ref, keep_source):
128
129 exports_folder = paths.export(conan_ref)
130 exports_source_folder = paths.export_sources(conan_ref, conanfile.short_paths)
131 previous_digest = _init_export_folder(exports_folder, exports_source_folder)
132 _execute_export(conanfile_path, conanfile, exports_folder, exports_source_folder, output)
133 shutil.copy2(conanfile_path, os.path.join(exports_folder, CONANFILE))
134
135 _capture_export_scm_data(conanfile, os.path.dirname(conanfile_path), exports_folder,
136 output, paths, conan_ref)
137
138 digest = FileTreeManifest.create(exports_folder, exports_source_folder)
139
140 if previous_digest and previous_digest == digest:
141 output.info("The stored package has not changed")
142 modified_recipe = False
143 digest = previous_digest # Use the old one, keep old timestamp
144 else:
145 output.success('A new %s version was exported' % CONANFILE)
146 output.info('Folder: %s' % exports_folder)
147 modified_recipe = True
148 digest.save(exports_folder)
149
150 source = paths.source(conan_ref, conanfile.short_paths)
151 remove = False
152 if is_dirty(source):
153 output.info("Source folder is corrupted, forcing removal")
154 remove = True
155 elif modified_recipe and not keep_source and os.path.exists(source):
156 output.info("Package recipe modified in export, forcing source folder removal")
157 output.info("Use the --keep-source, -k option to skip it")
158 remove = True
159 if remove:
160 output.info("Removing 'source' folder, this can take a while for big packages")
161 try:
162 # remove only the internal
163 rmdir(source)
164 except BaseException as e:
165 output.error("Unable to delete source folder. "
166 "Will be marked as corrupted for deletion")
167 output.warn(str(e))
168 set_dirty(source)
169
170
171 def _init_export_folder(destination_folder, destination_src_folder):
172 previous_digest = None
173 try:
174 if os.path.exists(destination_folder):
175 if os.path.exists(os.path.join(destination_folder, CONAN_MANIFEST)):
176 previous_digest = FileTreeManifest.load(destination_folder)
177 # Maybe here we want to invalidate cache
178 rmdir(destination_folder)
179 os.makedirs(destination_folder)
180 except Exception as e:
181 raise ConanException("Unable to create folder %s\n%s" % (destination_folder, str(e)))
182 try:
183 if os.path.exists(destination_src_folder):
184 rmdir(destination_src_folder)
185 os.makedirs(destination_src_folder)
186 except Exception as e:
187 raise ConanException("Unable to create folder %s\n%s" % (destination_src_folder, str(e)))
188 return previous_digest
189
190
191 def _execute_export(conanfile_path, conanfile, destination_folder, destination_source_folder,
192 output):
193
194 origin_folder = os.path.dirname(conanfile_path)
195
196 def classify_patterns(patterns):
197 patterns = patterns or []
198 included, excluded = [], []
199 for p in patterns:
200 if p.startswith("!"):
201 excluded.append(p[1:])
202 else:
203 included.append(p)
204 return included, excluded
205
206 included_exports, excluded_exports = classify_patterns(conanfile.exports)
207 included_sources, excluded_sources = classify_patterns(conanfile.exports_sources)
208
209 try:
210 os.unlink(os.path.join(origin_folder, CONANFILE + 'c'))
211 except OSError:
212 pass
213
214 copier = FileCopier(origin_folder, destination_folder)
215 for pattern in included_exports:
216 copier(pattern, links=True, excludes=excluded_exports)
217 copier = FileCopier(origin_folder, destination_source_folder)
218 for pattern in included_sources:
219 copier(pattern, links=True, excludes=excluded_sources)
220 package_output = ScopedOutput("%s export" % output.scope, output)
221 copier.report(package_output)
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/cmd/export.py b/conans/client/cmd/export.py
--- a/conans/client/cmd/export.py
+++ b/conans/client/cmd/export.py
@@ -112,7 +112,7 @@
if scm.url == "auto":
origin = scm.get_remote_url()
if not origin:
- raise ConanException("Repo origin cannot be deduced by 'auto', using source folder")
+ raise ConanException("Repo origin cannot be deduced by 'auto'")
output.success("Repo origin deduced by 'auto': %s" % origin)
scm.url = origin
if scm.revision == "auto":
|
{"golden_diff": "diff --git a/conans/client/cmd/export.py b/conans/client/cmd/export.py\n--- a/conans/client/cmd/export.py\n+++ b/conans/client/cmd/export.py\n@@ -112,7 +112,7 @@\n if scm.url == \"auto\":\n origin = scm.get_remote_url()\n if not origin:\n- raise ConanException(\"Repo origin cannot be deduced by 'auto', using source folder\")\n+ raise ConanException(\"Repo origin cannot be deduced by 'auto'\")\n output.success(\"Repo origin deduced by 'auto': %s\" % origin)\n scm.url = origin\n if scm.revision == \"auto\":\n", "issue": "Confusing message or behavior in SCM when url can't be deduced\nIt raises:\r\n\r\n```python\r\nif not origin:\r\n raise ConanException(\"Repo origin cannot be deduced by 'auto', using source folder\")\r\n```\r\n\r\nBut it doesn't use the source folder, as execution is aborted. Maybe change the message? Or allow to proceed and create the package locally, even if the remote URL is not defined?\r\n\r\n\n", "before_files": [{"content": "\"\"\" manages the movement of conanfiles and associated files from the user space\nto the local store, as an initial step before building or uploading to remotes\n\"\"\"\n\nimport os\nimport shutil\n\nfrom conans.client.cmd.export_linter import conan_linter\nfrom conans.client.file_copier import FileCopier\nfrom conans.client.loader_parse import load_conanfile_class\nfrom conans.client.output import ScopedOutput\nfrom conans.client.source import get_scm\nfrom conans.errors import ConanException\nfrom conans.model.conan_file import create_exports, create_exports_sources\nfrom conans.model.manifest import FileTreeManifest\nfrom conans.model.ref import ConanFileReference\nfrom conans.paths import CONAN_MANIFEST, CONANFILE\nfrom conans.util.files import save, rmdir, is_dirty, set_dirty, mkdir\nfrom conans.util.log import logger\nfrom conans.search.search import search_recipes\n\n\ndef export_alias(reference, target_reference, client_cache):\n conanfile = \"\"\"\nfrom conans import ConanFile\n\nclass AliasConanfile(ConanFile):\n alias = \"%s\"\n\"\"\" % str(target_reference)\n\n export_path = client_cache.export(reference)\n mkdir(export_path)\n save(os.path.join(export_path, CONANFILE), conanfile)\n mkdir(client_cache.export_sources(reference))\n digest = FileTreeManifest.create(export_path)\n digest.save(export_path)\n\n\ndef cmd_export(conanfile_path, name, version, user, channel, keep_source,\n output, client_cache):\n \"\"\" Export the recipe\n param conanfile_path: the original source directory of the user containing a\n conanfile.py\n param user: user under this package will be exported\n param channel: string (stable, testing,...)\n \"\"\"\n logger.debug(\"Exporting %s\" % conanfile_path)\n\n conan_linter(conanfile_path, output)\n conanfile = _load_export_conanfile(conanfile_path, output, name, version)\n conan_ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)\n conan_ref_str = str(conan_ref)\n # Maybe a platform check could be added, but depends on disk partition\n refs = search_recipes(client_cache, conan_ref_str, ignorecase=True)\n if refs and conan_ref not in refs:\n raise ConanException(\"Cannot export package with same name but different case\\n\"\n \"You exported '%s' but already existing '%s'\"\n % (conan_ref_str, \" \".join(str(s) for s in refs)))\n output = ScopedOutput(str(conan_ref), output)\n with client_cache.conanfile_write_lock(conan_ref):\n _export_conanfile(conanfile_path, output, client_cache, conanfile, conan_ref, keep_source)\n\n\ndef _load_export_conanfile(conanfile_path, output, name, version):\n conanfile = load_conanfile_class(conanfile_path)\n\n for field in [\"url\", \"license\", \"description\"]:\n field_value = getattr(conanfile, field, None)\n if not field_value:\n output.warn(\"Conanfile doesn't have '%s'.\\n\"\n \"It is recommended to add it as attribute\" % field)\n\n try:\n # Exports is the only object field, we need to do this, because conan export needs it\n conanfile.exports = create_exports(conanfile)\n conanfile.exports_sources = create_exports_sources(conanfile)\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n # check name and version were specified\n if not conanfile.name:\n if name:\n conanfile.name = name\n else:\n raise ConanException(\"conanfile didn't specify name\")\n elif name and name != conanfile.name:\n raise ConanException(\"Package recipe exported with name %s!=%s\" % (name, conanfile.name))\n\n if not conanfile.version:\n if version:\n conanfile.version = version\n else:\n raise ConanException(\"conanfile didn't specify version\")\n elif version and version != conanfile.version:\n raise ConanException(\"Package recipe exported with version %s!=%s\"\n % (version, conanfile.version))\n\n return conanfile\n\n\ndef _capture_export_scm_data(conanfile, src_path, destination_folder, output, paths, conan_ref):\n\n scm_src_file = paths.scm_folder(conan_ref)\n if os.path.exists(scm_src_file):\n os.unlink(scm_src_file)\n\n scm = get_scm(conanfile, src_path)\n\n if not scm or not (scm.capture_origin or scm.capture_revision):\n return\n\n if scm.url == \"auto\":\n origin = scm.get_remote_url()\n if not origin:\n raise ConanException(\"Repo origin cannot be deduced by 'auto', using source folder\")\n output.success(\"Repo origin deduced by 'auto': %s\" % origin)\n scm.url = origin\n if scm.revision == \"auto\":\n scm.revision = scm.get_revision()\n output.success(\"Revision deduced by 'auto': %s\" % scm.revision)\n\n # Generate the scm_folder.txt file pointing to the src_path\n save(scm_src_file, src_path.replace(\"\\\\\", \"/\"))\n scm.replace_in_file(os.path.join(destination_folder, \"conanfile.py\"))\n\n\ndef _export_conanfile(conanfile_path, output, paths, conanfile, conan_ref, keep_source):\n\n exports_folder = paths.export(conan_ref)\n exports_source_folder = paths.export_sources(conan_ref, conanfile.short_paths)\n previous_digest = _init_export_folder(exports_folder, exports_source_folder)\n _execute_export(conanfile_path, conanfile, exports_folder, exports_source_folder, output)\n shutil.copy2(conanfile_path, os.path.join(exports_folder, CONANFILE))\n\n _capture_export_scm_data(conanfile, os.path.dirname(conanfile_path), exports_folder,\n output, paths, conan_ref)\n\n digest = FileTreeManifest.create(exports_folder, exports_source_folder)\n\n if previous_digest and previous_digest == digest:\n output.info(\"The stored package has not changed\")\n modified_recipe = False\n digest = previous_digest # Use the old one, keep old timestamp\n else:\n output.success('A new %s version was exported' % CONANFILE)\n output.info('Folder: %s' % exports_folder)\n modified_recipe = True\n digest.save(exports_folder)\n\n source = paths.source(conan_ref, conanfile.short_paths)\n remove = False\n if is_dirty(source):\n output.info(\"Source folder is corrupted, forcing removal\")\n remove = True\n elif modified_recipe and not keep_source and os.path.exists(source):\n output.info(\"Package recipe modified in export, forcing source folder removal\")\n output.info(\"Use the --keep-source, -k option to skip it\")\n remove = True\n if remove:\n output.info(\"Removing 'source' folder, this can take a while for big packages\")\n try:\n # remove only the internal\n rmdir(source)\n except BaseException as e:\n output.error(\"Unable to delete source folder. \"\n \"Will be marked as corrupted for deletion\")\n output.warn(str(e))\n set_dirty(source)\n\n\ndef _init_export_folder(destination_folder, destination_src_folder):\n previous_digest = None\n try:\n if os.path.exists(destination_folder):\n if os.path.exists(os.path.join(destination_folder, CONAN_MANIFEST)):\n previous_digest = FileTreeManifest.load(destination_folder)\n # Maybe here we want to invalidate cache\n rmdir(destination_folder)\n os.makedirs(destination_folder)\n except Exception as e:\n raise ConanException(\"Unable to create folder %s\\n%s\" % (destination_folder, str(e)))\n try:\n if os.path.exists(destination_src_folder):\n rmdir(destination_src_folder)\n os.makedirs(destination_src_folder)\n except Exception as e:\n raise ConanException(\"Unable to create folder %s\\n%s\" % (destination_src_folder, str(e)))\n return previous_digest\n\n\ndef _execute_export(conanfile_path, conanfile, destination_folder, destination_source_folder,\n output):\n\n origin_folder = os.path.dirname(conanfile_path)\n\n def classify_patterns(patterns):\n patterns = patterns or []\n included, excluded = [], []\n for p in patterns:\n if p.startswith(\"!\"):\n excluded.append(p[1:])\n else:\n included.append(p)\n return included, excluded\n\n included_exports, excluded_exports = classify_patterns(conanfile.exports)\n included_sources, excluded_sources = classify_patterns(conanfile.exports_sources)\n\n try:\n os.unlink(os.path.join(origin_folder, CONANFILE + 'c'))\n except OSError:\n pass\n\n copier = FileCopier(origin_folder, destination_folder)\n for pattern in included_exports:\n copier(pattern, links=True, excludes=excluded_exports)\n copier = FileCopier(origin_folder, destination_source_folder)\n for pattern in included_sources:\n copier(pattern, links=True, excludes=excluded_sources)\n package_output = ScopedOutput(\"%s export\" % output.scope, output)\n copier.report(package_output)\n", "path": "conans/client/cmd/export.py"}], "after_files": [{"content": "\"\"\" manages the movement of conanfiles and associated files from the user space\nto the local store, as an initial step before building or uploading to remotes\n\"\"\"\n\nimport os\nimport shutil\n\nfrom conans.client.cmd.export_linter import conan_linter\nfrom conans.client.file_copier import FileCopier\nfrom conans.client.loader_parse import load_conanfile_class\nfrom conans.client.output import ScopedOutput\nfrom conans.client.source import get_scm\nfrom conans.errors import ConanException\nfrom conans.model.conan_file import create_exports, create_exports_sources\nfrom conans.model.manifest import FileTreeManifest\nfrom conans.model.ref import ConanFileReference\nfrom conans.paths import CONAN_MANIFEST, CONANFILE\nfrom conans.util.files import save, rmdir, is_dirty, set_dirty, mkdir\nfrom conans.util.log import logger\nfrom conans.search.search import search_recipes\n\n\ndef export_alias(reference, target_reference, client_cache):\n conanfile = \"\"\"\nfrom conans import ConanFile\n\nclass AliasConanfile(ConanFile):\n alias = \"%s\"\n\"\"\" % str(target_reference)\n\n export_path = client_cache.export(reference)\n mkdir(export_path)\n save(os.path.join(export_path, CONANFILE), conanfile)\n mkdir(client_cache.export_sources(reference))\n digest = FileTreeManifest.create(export_path)\n digest.save(export_path)\n\n\ndef cmd_export(conanfile_path, name, version, user, channel, keep_source,\n output, client_cache):\n \"\"\" Export the recipe\n param conanfile_path: the original source directory of the user containing a\n conanfile.py\n param user: user under this package will be exported\n param channel: string (stable, testing,...)\n \"\"\"\n logger.debug(\"Exporting %s\" % conanfile_path)\n\n conan_linter(conanfile_path, output)\n conanfile = _load_export_conanfile(conanfile_path, output, name, version)\n conan_ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)\n conan_ref_str = str(conan_ref)\n # Maybe a platform check could be added, but depends on disk partition\n refs = search_recipes(client_cache, conan_ref_str, ignorecase=True)\n if refs and conan_ref not in refs:\n raise ConanException(\"Cannot export package with same name but different case\\n\"\n \"You exported '%s' but already existing '%s'\"\n % (conan_ref_str, \" \".join(str(s) for s in refs)))\n output = ScopedOutput(str(conan_ref), output)\n with client_cache.conanfile_write_lock(conan_ref):\n _export_conanfile(conanfile_path, output, client_cache, conanfile, conan_ref, keep_source)\n\n\ndef _load_export_conanfile(conanfile_path, output, name, version):\n conanfile = load_conanfile_class(conanfile_path)\n\n for field in [\"url\", \"license\", \"description\"]:\n field_value = getattr(conanfile, field, None)\n if not field_value:\n output.warn(\"Conanfile doesn't have '%s'.\\n\"\n \"It is recommended to add it as attribute\" % field)\n\n try:\n # Exports is the only object field, we need to do this, because conan export needs it\n conanfile.exports = create_exports(conanfile)\n conanfile.exports_sources = create_exports_sources(conanfile)\n except Exception as e: # re-raise with file name\n raise ConanException(\"%s: %s\" % (conanfile_path, str(e)))\n\n # check name and version were specified\n if not conanfile.name:\n if name:\n conanfile.name = name\n else:\n raise ConanException(\"conanfile didn't specify name\")\n elif name and name != conanfile.name:\n raise ConanException(\"Package recipe exported with name %s!=%s\" % (name, conanfile.name))\n\n if not conanfile.version:\n if version:\n conanfile.version = version\n else:\n raise ConanException(\"conanfile didn't specify version\")\n elif version and version != conanfile.version:\n raise ConanException(\"Package recipe exported with version %s!=%s\"\n % (version, conanfile.version))\n\n return conanfile\n\n\ndef _capture_export_scm_data(conanfile, src_path, destination_folder, output, paths, conan_ref):\n\n scm_src_file = paths.scm_folder(conan_ref)\n if os.path.exists(scm_src_file):\n os.unlink(scm_src_file)\n\n scm = get_scm(conanfile, src_path)\n\n if not scm or not (scm.capture_origin or scm.capture_revision):\n return\n\n if scm.url == \"auto\":\n origin = scm.get_remote_url()\n if not origin:\n raise ConanException(\"Repo origin cannot be deduced by 'auto'\")\n output.success(\"Repo origin deduced by 'auto': %s\" % origin)\n scm.url = origin\n if scm.revision == \"auto\":\n scm.revision = scm.get_revision()\n output.success(\"Revision deduced by 'auto': %s\" % scm.revision)\n\n # Generate the scm_folder.txt file pointing to the src_path\n save(scm_src_file, src_path.replace(\"\\\\\", \"/\"))\n scm.replace_in_file(os.path.join(destination_folder, \"conanfile.py\"))\n\n\ndef _export_conanfile(conanfile_path, output, paths, conanfile, conan_ref, keep_source):\n\n exports_folder = paths.export(conan_ref)\n exports_source_folder = paths.export_sources(conan_ref, conanfile.short_paths)\n previous_digest = _init_export_folder(exports_folder, exports_source_folder)\n _execute_export(conanfile_path, conanfile, exports_folder, exports_source_folder, output)\n shutil.copy2(conanfile_path, os.path.join(exports_folder, CONANFILE))\n\n _capture_export_scm_data(conanfile, os.path.dirname(conanfile_path), exports_folder,\n output, paths, conan_ref)\n\n digest = FileTreeManifest.create(exports_folder, exports_source_folder)\n\n if previous_digest and previous_digest == digest:\n output.info(\"The stored package has not changed\")\n modified_recipe = False\n digest = previous_digest # Use the old one, keep old timestamp\n else:\n output.success('A new %s version was exported' % CONANFILE)\n output.info('Folder: %s' % exports_folder)\n modified_recipe = True\n digest.save(exports_folder)\n\n source = paths.source(conan_ref, conanfile.short_paths)\n remove = False\n if is_dirty(source):\n output.info(\"Source folder is corrupted, forcing removal\")\n remove = True\n elif modified_recipe and not keep_source and os.path.exists(source):\n output.info(\"Package recipe modified in export, forcing source folder removal\")\n output.info(\"Use the --keep-source, -k option to skip it\")\n remove = True\n if remove:\n output.info(\"Removing 'source' folder, this can take a while for big packages\")\n try:\n # remove only the internal\n rmdir(source)\n except BaseException as e:\n output.error(\"Unable to delete source folder. \"\n \"Will be marked as corrupted for deletion\")\n output.warn(str(e))\n set_dirty(source)\n\n\ndef _init_export_folder(destination_folder, destination_src_folder):\n previous_digest = None\n try:\n if os.path.exists(destination_folder):\n if os.path.exists(os.path.join(destination_folder, CONAN_MANIFEST)):\n previous_digest = FileTreeManifest.load(destination_folder)\n # Maybe here we want to invalidate cache\n rmdir(destination_folder)\n os.makedirs(destination_folder)\n except Exception as e:\n raise ConanException(\"Unable to create folder %s\\n%s\" % (destination_folder, str(e)))\n try:\n if os.path.exists(destination_src_folder):\n rmdir(destination_src_folder)\n os.makedirs(destination_src_folder)\n except Exception as e:\n raise ConanException(\"Unable to create folder %s\\n%s\" % (destination_src_folder, str(e)))\n return previous_digest\n\n\ndef _execute_export(conanfile_path, conanfile, destination_folder, destination_source_folder,\n output):\n\n origin_folder = os.path.dirname(conanfile_path)\n\n def classify_patterns(patterns):\n patterns = patterns or []\n included, excluded = [], []\n for p in patterns:\n if p.startswith(\"!\"):\n excluded.append(p[1:])\n else:\n included.append(p)\n return included, excluded\n\n included_exports, excluded_exports = classify_patterns(conanfile.exports)\n included_sources, excluded_sources = classify_patterns(conanfile.exports_sources)\n\n try:\n os.unlink(os.path.join(origin_folder, CONANFILE + 'c'))\n except OSError:\n pass\n\n copier = FileCopier(origin_folder, destination_folder)\n for pattern in included_exports:\n copier(pattern, links=True, excludes=excluded_exports)\n copier = FileCopier(origin_folder, destination_source_folder)\n for pattern in included_sources:\n copier(pattern, links=True, excludes=excluded_sources)\n package_output = ScopedOutput(\"%s export\" % output.scope, output)\n copier.report(package_output)\n", "path": "conans/client/cmd/export.py"}]}
| 2,938 | 140 |
gh_patches_debug_13305
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1470
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing error message: Oops. That didn't work. The following error occured:
**Describe the bug**: No error message appear
```
>> python3 manage.py elasticapm test
Trying to send a test error to APM Server using these settings:
SERVICE_NAME: DJANGO_DEVELOPMENT
SECRET_TOKEN: xxx
SERVER: http://localhost:8200
Oops. That didn't work. The following error occured:
Fetched APM Server version 7.17.0
```
**To Reproduce**
Not sure how to reproduce this.
Where can I find a detailed error message?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/transport/http.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # BSD 3-Clause License
4 #
5 # Copyright (c) 2019, Elasticsearch BV
6 # All rights reserved.
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions are met:
10 #
11 # * Redistributions of source code must retain the above copyright notice, this
12 # list of conditions and the following disclaimer.
13 #
14 # * Redistributions in binary form must reproduce the above copyright notice,
15 # this list of conditions and the following disclaimer in the documentation
16 # and/or other materials provided with the distribution.
17 #
18 # * Neither the name of the copyright holder nor the names of its
19 # contributors may be used to endorse or promote products derived from
20 # this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 import hashlib
34 import json
35 import re
36 import ssl
37 import urllib.parse
38 from urllib.request import getproxies_environment, proxy_bypass_environment
39
40 import urllib3
41 from urllib3.exceptions import MaxRetryError, TimeoutError
42
43 from elasticapm.transport.exceptions import TransportException
44 from elasticapm.transport.http_base import HTTPTransportBase
45 from elasticapm.utils import json_encoder, read_pem_file
46 from elasticapm.utils.logging import get_logger
47
48 try:
49 import certifi
50 except ImportError:
51 certifi = None
52
53 logger = get_logger("elasticapm.transport.http")
54
55
56 class Transport(HTTPTransportBase):
57 def __init__(self, url: str, *args, **kwargs) -> None:
58 super(Transport, self).__init__(url, *args, **kwargs)
59 pool_kwargs = {"cert_reqs": "CERT_REQUIRED", "ca_certs": self.ca_certs, "block": True}
60 if url.startswith("https"):
61 if self._server_cert:
62 pool_kwargs.update(
63 {"assert_fingerprint": self.cert_fingerprint, "assert_hostname": False, "cert_reqs": ssl.CERT_NONE}
64 )
65 del pool_kwargs["ca_certs"]
66 elif not self._verify_server_cert:
67 pool_kwargs["cert_reqs"] = ssl.CERT_NONE
68 pool_kwargs["assert_hostname"] = False
69 self._pool_kwargs = pool_kwargs
70 self._http = None
71 self._url = url
72
73 def send(self, data):
74 response = None
75
76 headers = self._headers.copy() if self._headers else {}
77 headers.update(self.auth_headers)
78
79 url = self._url
80 try:
81 try:
82 response = self.http.urlopen(
83 "POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
84 )
85 logger.debug("Sent request, url=%s size=%.2fkb status=%s", url, len(data) / 1024.0, response.status)
86 except Exception as e:
87 print_trace = True
88 if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):
89 message = "Connection to APM Server timed out " "(url: %s, timeout: %s seconds)" % (
90 self._url,
91 self._timeout,
92 )
93 print_trace = False
94 else:
95 message = "Unable to reach APM Server: %s (url: %s)" % (e, self._url)
96 raise TransportException(message, data, print_trace=print_trace)
97 body = response.read()
98 if response.status >= 400:
99 if response.status == 429: # rate-limited
100 message = "Temporarily rate limited: "
101 print_trace = False
102 else:
103 message = "HTTP %s: " % response.status
104 print_trace = True
105 message += body.decode("utf8", errors="replace")[:10000]
106 raise TransportException(message, data, print_trace=print_trace)
107 return response.getheader("Location")
108 finally:
109 if response:
110 response.close()
111
112 @property
113 def http(self) -> urllib3.PoolManager:
114 if not self._http:
115 url_parts = urllib.parse.urlparse(self._url)
116 proxies = getproxies_environment()
117 proxy_url = proxies.get("https", proxies.get("http", None))
118 if proxy_url and not proxy_bypass_environment(url_parts.netloc):
119 self._http = urllib3.ProxyManager(proxy_url, **self._pool_kwargs)
120 else:
121 self._http = urllib3.PoolManager(**self._pool_kwargs)
122 return self._http
123
124 def handle_fork(self) -> None:
125 # reset http pool to avoid sharing connections with the parent process
126 self._http = None
127
128 def get_config(self, current_version=None, keys=None):
129 """
130 Gets configuration from a remote APM Server
131
132 :param current_version: version of the current configuration
133 :param keys: a JSON-serializable dict to identify this instance, e.g.
134 {
135 "service": {
136 "name": "foo",
137 "environment": "bar"
138 }
139 }
140 :return: a three-tuple of new version, config dictionary and validity in seconds.
141 Any element of the tuple can be None.
142 """
143 url = self._config_url
144 data = json_encoder.dumps(keys).encode("utf-8")
145 headers = self._headers.copy()
146 headers[b"Content-Type"] = "application/json"
147 headers.pop(b"Content-Encoding", None) # remove gzip content-encoding header
148 headers.update(self.auth_headers)
149 max_age = 300
150 if current_version:
151 headers["If-None-Match"] = current_version
152 try:
153 response = self.http.urlopen(
154 "POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
155 )
156 except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
157 logger.debug("HTTP error while fetching remote config: %s", str(e))
158 return current_version, None, max_age
159 body = response.read()
160 if "Cache-Control" in response.headers:
161 try:
162 max_age = int(next(re.finditer(r"max-age=(\d+)", response.headers["Cache-Control"])).groups()[0])
163 except StopIteration:
164 logger.debug("Could not parse Cache-Control header: %s", response.headers["Cache-Control"])
165 if response.status == 304:
166 # config is unchanged, return
167 logger.debug("Configuration unchanged")
168 return current_version, None, max_age
169 elif response.status >= 400:
170 return None, None, max_age
171
172 if not body:
173 logger.debug("APM Server answered with empty body and status code %s", response.status)
174 return current_version, None, max_age
175 body = body.decode("utf-8")
176 try:
177 data = json_encoder.loads(body)
178 return response.headers.get("Etag"), data, max_age
179 except json.JSONDecodeError:
180 logger.warning("Failed decoding APM Server response as JSON: %s", body)
181 return current_version, None, max_age
182
183 def _process_queue(self):
184 if not self.client.server_version:
185 self.fetch_server_info()
186 super()._process_queue()
187
188 def fetch_server_info(self):
189 headers = self._headers.copy() if self._headers else {}
190 headers.update(self.auth_headers)
191 headers["accept"] = "text/plain"
192 try:
193 response = self.http.urlopen("GET", self._server_info_url, headers=headers, timeout=self._timeout)
194 body = response.data
195 data = json_encoder.loads(body.decode("utf8"))
196 version = data["version"]
197 logger.info("Fetched APM Server version %s", version)
198 self.client.server_version = version_string_to_tuple(version)
199 except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
200 logger.warning("HTTP error while fetching server information: %s", str(e))
201 except json.JSONDecodeError as e:
202 logger.warning("JSON decoding error while fetching server information: %s", str(e))
203 except (KeyError, TypeError):
204 logger.warning("No version key found in server response: %s", response.data)
205
206 @property
207 def cert_fingerprint(self):
208 if self._server_cert:
209 with open(self._server_cert, "rb") as f:
210 cert_data = read_pem_file(f)
211 digest = hashlib.sha256()
212 digest.update(cert_data)
213 return digest.hexdigest()
214 return None
215
216 @property
217 def auth_headers(self):
218 headers = super(Transport, self).auth_headers
219 return {k.encode("ascii"): v.encode("ascii") for k, v in headers.items()}
220
221 @property
222 def ca_certs(self):
223 """
224 Return location of certificate store. If it is available and not disabled via setting,
225 this will return the location of the certifi certificate store.
226 """
227 return certifi.where() if (certifi and self.client.config.use_certifi) else None
228
229
230 def version_string_to_tuple(version):
231 if version:
232 version_parts = re.split(r"[.\-]", version)
233 return tuple(int(p) if p.isdigit() else p for p in version_parts)
234 return ()
235
236
237 # left for backwards compatibility
238 AsyncTransport = Transport
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py
--- a/elasticapm/transport/http.py
+++ b/elasticapm/transport/http.py
@@ -194,7 +194,7 @@
body = response.data
data = json_encoder.loads(body.decode("utf8"))
version = data["version"]
- logger.info("Fetched APM Server version %s", version)
+ logger.debug("Fetched APM Server version %s", version)
self.client.server_version = version_string_to_tuple(version)
except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
logger.warning("HTTP error while fetching server information: %s", str(e))
|
{"golden_diff": "diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py\n--- a/elasticapm/transport/http.py\n+++ b/elasticapm/transport/http.py\n@@ -194,7 +194,7 @@\n body = response.data\n data = json_encoder.loads(body.decode(\"utf8\"))\n version = data[\"version\"]\n- logger.info(\"Fetched APM Server version %s\", version)\n+ logger.debug(\"Fetched APM Server version %s\", version)\n self.client.server_version = version_string_to_tuple(version)\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.warning(\"HTTP error while fetching server information: %s\", str(e))\n", "issue": "Missing error message: Oops. That didn't work. The following error occured:\n**Describe the bug**: No error message appear\r\n\r\n```\r\n>> python3 manage.py elasticapm test\r\n\r\nTrying to send a test error to APM Server using these settings:\r\n\r\nSERVICE_NAME:\tDJANGO_DEVELOPMENT\r\nSECRET_TOKEN:\txxx\r\nSERVER:\t\thttp://localhost:8200\r\n\r\nOops. That didn't work. The following error occured: \r\n\r\nFetched APM Server version 7.17.0\r\n```\r\n\r\n**To Reproduce**\r\n\r\nNot sure how to reproduce this. \r\nWhere can I find a detailed error message?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport hashlib\nimport json\nimport re\nimport ssl\nimport urllib.parse\nfrom urllib.request import getproxies_environment, proxy_bypass_environment\n\nimport urllib3\nfrom urllib3.exceptions import MaxRetryError, TimeoutError\n\nfrom elasticapm.transport.exceptions import TransportException\nfrom elasticapm.transport.http_base import HTTPTransportBase\nfrom elasticapm.utils import json_encoder, read_pem_file\nfrom elasticapm.utils.logging import get_logger\n\ntry:\n import certifi\nexcept ImportError:\n certifi = None\n\nlogger = get_logger(\"elasticapm.transport.http\")\n\n\nclass Transport(HTTPTransportBase):\n def __init__(self, url: str, *args, **kwargs) -> None:\n super(Transport, self).__init__(url, *args, **kwargs)\n pool_kwargs = {\"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": self.ca_certs, \"block\": True}\n if url.startswith(\"https\"):\n if self._server_cert:\n pool_kwargs.update(\n {\"assert_fingerprint\": self.cert_fingerprint, \"assert_hostname\": False, \"cert_reqs\": ssl.CERT_NONE}\n )\n del pool_kwargs[\"ca_certs\"]\n elif not self._verify_server_cert:\n pool_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n pool_kwargs[\"assert_hostname\"] = False\n self._pool_kwargs = pool_kwargs\n self._http = None\n self._url = url\n\n def send(self, data):\n response = None\n\n headers = self._headers.copy() if self._headers else {}\n headers.update(self.auth_headers)\n\n url = self._url\n try:\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n logger.debug(\"Sent request, url=%s size=%.2fkb status=%s\", url, len(data) / 1024.0, response.status)\n except Exception as e:\n print_trace = True\n if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):\n message = \"Connection to APM Server timed out \" \"(url: %s, timeout: %s seconds)\" % (\n self._url,\n self._timeout,\n )\n print_trace = False\n else:\n message = \"Unable to reach APM Server: %s (url: %s)\" % (e, self._url)\n raise TransportException(message, data, print_trace=print_trace)\n body = response.read()\n if response.status >= 400:\n if response.status == 429: # rate-limited\n message = \"Temporarily rate limited: \"\n print_trace = False\n else:\n message = \"HTTP %s: \" % response.status\n print_trace = True\n message += body.decode(\"utf8\", errors=\"replace\")[:10000]\n raise TransportException(message, data, print_trace=print_trace)\n return response.getheader(\"Location\")\n finally:\n if response:\n response.close()\n\n @property\n def http(self) -> urllib3.PoolManager:\n if not self._http:\n url_parts = urllib.parse.urlparse(self._url)\n proxies = getproxies_environment()\n proxy_url = proxies.get(\"https\", proxies.get(\"http\", None))\n if proxy_url and not proxy_bypass_environment(url_parts.netloc):\n self._http = urllib3.ProxyManager(proxy_url, **self._pool_kwargs)\n else:\n self._http = urllib3.PoolManager(**self._pool_kwargs)\n return self._http\n\n def handle_fork(self) -> None:\n # reset http pool to avoid sharing connections with the parent process\n self._http = None\n\n def get_config(self, current_version=None, keys=None):\n \"\"\"\n Gets configuration from a remote APM Server\n\n :param current_version: version of the current configuration\n :param keys: a JSON-serializable dict to identify this instance, e.g.\n {\n \"service\": {\n \"name\": \"foo\",\n \"environment\": \"bar\"\n }\n }\n :return: a three-tuple of new version, config dictionary and validity in seconds.\n Any element of the tuple can be None.\n \"\"\"\n url = self._config_url\n data = json_encoder.dumps(keys).encode(\"utf-8\")\n headers = self._headers.copy()\n headers[b\"Content-Type\"] = \"application/json\"\n headers.pop(b\"Content-Encoding\", None) # remove gzip content-encoding header\n headers.update(self.auth_headers)\n max_age = 300\n if current_version:\n headers[\"If-None-Match\"] = current_version\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.debug(\"HTTP error while fetching remote config: %s\", str(e))\n return current_version, None, max_age\n body = response.read()\n if \"Cache-Control\" in response.headers:\n try:\n max_age = int(next(re.finditer(r\"max-age=(\\d+)\", response.headers[\"Cache-Control\"])).groups()[0])\n except StopIteration:\n logger.debug(\"Could not parse Cache-Control header: %s\", response.headers[\"Cache-Control\"])\n if response.status == 304:\n # config is unchanged, return\n logger.debug(\"Configuration unchanged\")\n return current_version, None, max_age\n elif response.status >= 400:\n return None, None, max_age\n\n if not body:\n logger.debug(\"APM Server answered with empty body and status code %s\", response.status)\n return current_version, None, max_age\n body = body.decode(\"utf-8\")\n try:\n data = json_encoder.loads(body)\n return response.headers.get(\"Etag\"), data, max_age\n except json.JSONDecodeError:\n logger.warning(\"Failed decoding APM Server response as JSON: %s\", body)\n return current_version, None, max_age\n\n def _process_queue(self):\n if not self.client.server_version:\n self.fetch_server_info()\n super()._process_queue()\n\n def fetch_server_info(self):\n headers = self._headers.copy() if self._headers else {}\n headers.update(self.auth_headers)\n headers[\"accept\"] = \"text/plain\"\n try:\n response = self.http.urlopen(\"GET\", self._server_info_url, headers=headers, timeout=self._timeout)\n body = response.data\n data = json_encoder.loads(body.decode(\"utf8\"))\n version = data[\"version\"]\n logger.info(\"Fetched APM Server version %s\", version)\n self.client.server_version = version_string_to_tuple(version)\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.warning(\"HTTP error while fetching server information: %s\", str(e))\n except json.JSONDecodeError as e:\n logger.warning(\"JSON decoding error while fetching server information: %s\", str(e))\n except (KeyError, TypeError):\n logger.warning(\"No version key found in server response: %s\", response.data)\n\n @property\n def cert_fingerprint(self):\n if self._server_cert:\n with open(self._server_cert, \"rb\") as f:\n cert_data = read_pem_file(f)\n digest = hashlib.sha256()\n digest.update(cert_data)\n return digest.hexdigest()\n return None\n\n @property\n def auth_headers(self):\n headers = super(Transport, self).auth_headers\n return {k.encode(\"ascii\"): v.encode(\"ascii\") for k, v in headers.items()}\n\n @property\n def ca_certs(self):\n \"\"\"\n Return location of certificate store. If it is available and not disabled via setting,\n this will return the location of the certifi certificate store.\n \"\"\"\n return certifi.where() if (certifi and self.client.config.use_certifi) else None\n\n\ndef version_string_to_tuple(version):\n if version:\n version_parts = re.split(r\"[.\\-]\", version)\n return tuple(int(p) if p.isdigit() else p for p in version_parts)\n return ()\n\n\n# left for backwards compatibility\nAsyncTransport = Transport\n", "path": "elasticapm/transport/http.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport hashlib\nimport json\nimport re\nimport ssl\nimport urllib.parse\nfrom urllib.request import getproxies_environment, proxy_bypass_environment\n\nimport urllib3\nfrom urllib3.exceptions import MaxRetryError, TimeoutError\n\nfrom elasticapm.transport.exceptions import TransportException\nfrom elasticapm.transport.http_base import HTTPTransportBase\nfrom elasticapm.utils import json_encoder, read_pem_file\nfrom elasticapm.utils.logging import get_logger\n\ntry:\n import certifi\nexcept ImportError:\n certifi = None\n\nlogger = get_logger(\"elasticapm.transport.http\")\n\n\nclass Transport(HTTPTransportBase):\n def __init__(self, url: str, *args, **kwargs) -> None:\n super(Transport, self).__init__(url, *args, **kwargs)\n pool_kwargs = {\"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": self.ca_certs, \"block\": True}\n if url.startswith(\"https\"):\n if self._server_cert:\n pool_kwargs.update(\n {\"assert_fingerprint\": self.cert_fingerprint, \"assert_hostname\": False, \"cert_reqs\": ssl.CERT_NONE}\n )\n del pool_kwargs[\"ca_certs\"]\n elif not self._verify_server_cert:\n pool_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n pool_kwargs[\"assert_hostname\"] = False\n self._pool_kwargs = pool_kwargs\n self._http = None\n self._url = url\n\n def send(self, data):\n response = None\n\n headers = self._headers.copy() if self._headers else {}\n headers.update(self.auth_headers)\n\n url = self._url\n try:\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n logger.debug(\"Sent request, url=%s size=%.2fkb status=%s\", url, len(data) / 1024.0, response.status)\n except Exception as e:\n print_trace = True\n if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):\n message = \"Connection to APM Server timed out \" \"(url: %s, timeout: %s seconds)\" % (\n self._url,\n self._timeout,\n )\n print_trace = False\n else:\n message = \"Unable to reach APM Server: %s (url: %s)\" % (e, self._url)\n raise TransportException(message, data, print_trace=print_trace)\n body = response.read()\n if response.status >= 400:\n if response.status == 429: # rate-limited\n message = \"Temporarily rate limited: \"\n print_trace = False\n else:\n message = \"HTTP %s: \" % response.status\n print_trace = True\n message += body.decode(\"utf8\", errors=\"replace\")[:10000]\n raise TransportException(message, data, print_trace=print_trace)\n return response.getheader(\"Location\")\n finally:\n if response:\n response.close()\n\n @property\n def http(self) -> urllib3.PoolManager:\n if not self._http:\n url_parts = urllib.parse.urlparse(self._url)\n proxies = getproxies_environment()\n proxy_url = proxies.get(\"https\", proxies.get(\"http\", None))\n if proxy_url and not proxy_bypass_environment(url_parts.netloc):\n self._http = urllib3.ProxyManager(proxy_url, **self._pool_kwargs)\n else:\n self._http = urllib3.PoolManager(**self._pool_kwargs)\n return self._http\n\n def handle_fork(self) -> None:\n # reset http pool to avoid sharing connections with the parent process\n self._http = None\n\n def get_config(self, current_version=None, keys=None):\n \"\"\"\n Gets configuration from a remote APM Server\n\n :param current_version: version of the current configuration\n :param keys: a JSON-serializable dict to identify this instance, e.g.\n {\n \"service\": {\n \"name\": \"foo\",\n \"environment\": \"bar\"\n }\n }\n :return: a three-tuple of new version, config dictionary and validity in seconds.\n Any element of the tuple can be None.\n \"\"\"\n url = self._config_url\n data = json_encoder.dumps(keys).encode(\"utf-8\")\n headers = self._headers.copy()\n headers[b\"Content-Type\"] = \"application/json\"\n headers.pop(b\"Content-Encoding\", None) # remove gzip content-encoding header\n headers.update(self.auth_headers)\n max_age = 300\n if current_version:\n headers[\"If-None-Match\"] = current_version\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.debug(\"HTTP error while fetching remote config: %s\", str(e))\n return current_version, None, max_age\n body = response.read()\n if \"Cache-Control\" in response.headers:\n try:\n max_age = int(next(re.finditer(r\"max-age=(\\d+)\", response.headers[\"Cache-Control\"])).groups()[0])\n except StopIteration:\n logger.debug(\"Could not parse Cache-Control header: %s\", response.headers[\"Cache-Control\"])\n if response.status == 304:\n # config is unchanged, return\n logger.debug(\"Configuration unchanged\")\n return current_version, None, max_age\n elif response.status >= 400:\n return None, None, max_age\n\n if not body:\n logger.debug(\"APM Server answered with empty body and status code %s\", response.status)\n return current_version, None, max_age\n body = body.decode(\"utf-8\")\n try:\n data = json_encoder.loads(body)\n return response.headers.get(\"Etag\"), data, max_age\n except json.JSONDecodeError:\n logger.warning(\"Failed decoding APM Server response as JSON: %s\", body)\n return current_version, None, max_age\n\n def _process_queue(self):\n if not self.client.server_version:\n self.fetch_server_info()\n super()._process_queue()\n\n def fetch_server_info(self):\n headers = self._headers.copy() if self._headers else {}\n headers.update(self.auth_headers)\n headers[\"accept\"] = \"text/plain\"\n try:\n response = self.http.urlopen(\"GET\", self._server_info_url, headers=headers, timeout=self._timeout)\n body = response.data\n data = json_encoder.loads(body.decode(\"utf8\"))\n version = data[\"version\"]\n logger.debug(\"Fetched APM Server version %s\", version)\n self.client.server_version = version_string_to_tuple(version)\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.warning(\"HTTP error while fetching server information: %s\", str(e))\n except json.JSONDecodeError as e:\n logger.warning(\"JSON decoding error while fetching server information: %s\", str(e))\n except (KeyError, TypeError):\n logger.warning(\"No version key found in server response: %s\", response.data)\n\n @property\n def cert_fingerprint(self):\n if self._server_cert:\n with open(self._server_cert, \"rb\") as f:\n cert_data = read_pem_file(f)\n digest = hashlib.sha256()\n digest.update(cert_data)\n return digest.hexdigest()\n return None\n\n @property\n def auth_headers(self):\n headers = super(Transport, self).auth_headers\n return {k.encode(\"ascii\"): v.encode(\"ascii\") for k, v in headers.items()}\n\n @property\n def ca_certs(self):\n \"\"\"\n Return location of certificate store. If it is available and not disabled via setting,\n this will return the location of the certifi certificate store.\n \"\"\"\n return certifi.where() if (certifi and self.client.config.use_certifi) else None\n\n\ndef version_string_to_tuple(version):\n if version:\n version_parts = re.split(r\"[.\\-]\", version)\n return tuple(int(p) if p.isdigit() else p for p in version_parts)\n return ()\n\n\n# left for backwards compatibility\nAsyncTransport = Transport\n", "path": "elasticapm/transport/http.py"}]}
| 3,158 | 163 |
gh_patches_debug_21145
|
rasdani/github-patches
|
git_diff
|
adap__flower-2889
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
If gRPC server is running behing a load balancer, clients may not be able to connect
### Describe the bug
Flower is using the `peer()` method from `grpc.ServicerContext` in order to identify unique flower clients. However, in some situations (like when running the gRPC server behind a load balancer or proxy) different clients can have the same peer identifier (i.e. the same `IP:PORT`), as HTTP/2 connections are multiplexed.
### Steps/Code to Reproduce
This can be tested with a simple [Traefik](https://doc.traefik.io/traefik/) setup as follows (https://doc.traefik.io/traefik/user-guides/grpc/):
`traefik.yaml`:
```
entryPoints:
web:
address: :80
providers:
file:
filename: grpc.yaml
api: {}
```
`grpc.yaml`:
```
http:
routers:
to-grpc:
entryPoints:
- "web"
service: "srv-grpc"
rule: "HostRegexp(`.*`)"
services:
srv-grpc:
loadBalancer:
servers:
- url: h2c://127.0.0.1:5000
```
The Traefik server can be executed as follows:
```
./traefik --configFile=traefik.yaml
```
Then we can launch then a Flower sever with the following code:
```
import flwr as fl
def acc_wavg(metrics):
n = sum([i for i, _ in metrics])
acc = sum([i * metric["accuracy"] / n for i, metric in metrics])
return {"accuracy": acc}
strategy = fl.server.strategy.FedAvg(
min_available_clients=2,
evaluate_metrics_aggregation_fn=acc_wavg
)
fl.server.start_server(
server_address="0.0.0.0:5000",
config=fl.server.ServerConfig(num_rounds=3),
strategy=strategy,
)
```
### Expected Results
When we connect two clients to the loadbalancer IP, on port 80, the first one connects and keeps waiting:
```
INFO flwr 2023-06-28 10:03:14,848 | grpc.py:50 | Opened insecure gRPC connection (no certificates were passed)
DEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.IDLE
DEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.CONNECTING
DEBUG flwr 2023-06-28 10:03:14,875 | connection.py:39 | ChannelConnectivity.READY
```
However, the second one is not connected:
```
DEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.IDLE
DEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.CONNECTING
DEBUG flwr 2023-06-28 09:37:32,578 | connection.py:39 | ChannelConnectivity.READY
DEBUG flwr 2023-06-28 09:37:32,784 | connection.py:113 | gRPC channel closed
Traceback (most recent call last):
File "/home/alvaro/w/prj/fl/fl.py", line 68, in <module>
fl.client.start_numpy_client(
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py", line 252, in start_numpy_client
start_client(
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py", line 174, in start_client
server_message = receive()
^^^^^^^^^
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/grpc_client/connection.py", line 105, in <lambda>
receive: Callable[[], ServerMessage] = lambda: next(server_message_iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py", line 475, in __next__
return self._next()
^^^^^^^^^^^^
File "/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py", line 879, in _next
raise StopIteration()
StopIteration
```
This is because the second client connection is discarded [here]https://github.com/adap/flower/blob/main/src/py/flwr/server/client_manager.py#L149) as it is coming from the same gRPC peer (i.e. the load balancer) due to this code [here](https://github.com/adap/flower/blob/main/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py#L91), as due to the above I assume that the tuple `ip:port` is not unique for gRPC clients, therefore it cannot be used to identify a unique flower client (as a matter of fact, the `ip:port` are those of the load balancer, not those of the actual client).
As a test and quick hack we have moved to use uuids to identify the clients [here](https://github.com/AI4EOSC/flower/commit/b215d9f3cce1ad8806e296db4fe105a8b7f5c6c9). To my understanding, it is safe to do that way, as the peer identifier is not used elsewhere, but I do not know if it is planned to do something else with them.
### Actual Results
Two and more clients should be able to connect when running behind a load balancer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py`
Content:
```
1 # Copyright 2020 Flower Labs GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Servicer for FlowerService.
16
17 Relevant knowledge for reading this modules code:
18 - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
19 """
20
21
22 from typing import Callable, Iterator
23
24 import grpc
25 from iterators import TimeoutIterator
26
27 from flwr.proto import transport_pb2_grpc # pylint: disable=E0611
28 from flwr.proto.transport_pb2 import ( # pylint: disable=E0611
29 ClientMessage,
30 ServerMessage,
31 )
32 from flwr.server.client_manager import ClientManager
33 from flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper
34 from flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy
35
36
37 def default_bridge_factory() -> GrpcBridge:
38 """Return GrpcBridge instance."""
39 return GrpcBridge()
40
41
42 def default_grpc_client_proxy_factory(cid: str, bridge: GrpcBridge) -> GrpcClientProxy:
43 """Return GrpcClientProxy instance."""
44 return GrpcClientProxy(cid=cid, bridge=bridge)
45
46
47 def register_client_proxy(
48 client_manager: ClientManager,
49 client_proxy: GrpcClientProxy,
50 context: grpc.ServicerContext,
51 ) -> bool:
52 """Try registering GrpcClientProxy with ClientManager."""
53 is_success = client_manager.register(client_proxy)
54 if is_success:
55
56 def rpc_termination_callback() -> None:
57 client_proxy.bridge.close()
58 client_manager.unregister(client_proxy)
59
60 context.add_callback(rpc_termination_callback)
61 return is_success
62
63
64 class FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):
65 """FlowerServiceServicer for bi-directional gRPC message stream."""
66
67 def __init__(
68 self,
69 client_manager: ClientManager,
70 grpc_bridge_factory: Callable[[], GrpcBridge] = default_bridge_factory,
71 grpc_client_proxy_factory: Callable[
72 [str, GrpcBridge], GrpcClientProxy
73 ] = default_grpc_client_proxy_factory,
74 ) -> None:
75 self.client_manager: ClientManager = client_manager
76 self.grpc_bridge_factory = grpc_bridge_factory
77 self.client_proxy_factory = grpc_client_proxy_factory
78
79 def Join( # pylint: disable=invalid-name
80 self,
81 request_iterator: Iterator[ClientMessage],
82 context: grpc.ServicerContext,
83 ) -> Iterator[ServerMessage]:
84 """Facilitate bi-directional streaming of messages between server and client.
85
86 Invoked by each gRPC client which participates in the network.
87
88 Protocol:
89 - The first message is sent from the server to the client
90 - Both `ServerMessage` and `ClientMessage` are message "wrappers"
91 wrapping the actual message
92 - The `Join` method is (pretty much) unaware of the protocol
93 """
94 peer: str = context.peer()
95 bridge = self.grpc_bridge_factory()
96 client_proxy = self.client_proxy_factory(peer, bridge)
97 is_success = register_client_proxy(self.client_manager, client_proxy, context)
98
99 if is_success:
100 # Get iterators
101 client_message_iterator = TimeoutIterator(
102 iterator=request_iterator, reset_on_next=True
103 )
104 ins_wrapper_iterator = bridge.ins_wrapper_iterator()
105
106 # All messages will be pushed to client bridge directly
107 while True:
108 try:
109 # Get ins_wrapper from bridge and yield server_message
110 ins_wrapper: InsWrapper = next(ins_wrapper_iterator)
111 yield ins_wrapper.server_message
112
113 # Set current timeout, might be None
114 if ins_wrapper.timeout is not None:
115 client_message_iterator.set_timeout(ins_wrapper.timeout)
116
117 # Wait for client message
118 client_message = next(client_message_iterator)
119
120 if client_message is client_message_iterator.get_sentinel():
121 # Important: calling `context.abort` in gRPC always
122 # raises an exception so that all code after the call to
123 # `context.abort` will not run. If subsequent code should
124 # be executed, the `rpc_termination_callback` can be used
125 # (as shown in the `register_client` function).
126 details = f"Timeout of {ins_wrapper.timeout}sec was exceeded."
127 context.abort(
128 code=grpc.StatusCode.DEADLINE_EXCEEDED,
129 details=details,
130 )
131 # This return statement is only for the linter so it understands
132 # that client_message in subsequent lines is not None
133 # It does not understand that `context.abort` will terminate
134 # this execution context by raising an exception.
135 return
136
137 bridge.set_res_wrapper(
138 res_wrapper=ResWrapper(client_message=client_message)
139 )
140 except StopIteration:
141 break
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py
--- a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py
+++ b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py
@@ -18,7 +18,7 @@
- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
"""
-
+import uuid
from typing import Callable, Iterator
import grpc
@@ -91,9 +91,12 @@
wrapping the actual message
- The `Join` method is (pretty much) unaware of the protocol
"""
- peer: str = context.peer()
+ # When running Flower behind a proxy, the peer can be the same for
+ # different clients, so instead of `cid: str = context.peer()` we
+ # use a `UUID4` that is unique.
+ cid: str = uuid.uuid4().hex
bridge = self.grpc_bridge_factory()
- client_proxy = self.client_proxy_factory(peer, bridge)
+ client_proxy = self.client_proxy_factory(cid, bridge)
is_success = register_client_proxy(self.client_manager, client_proxy, context)
if is_success:
|
{"golden_diff": "diff --git a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py\n--- a/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py\n+++ b/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py\n@@ -18,7 +18,7 @@\n - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md\n \"\"\"\n \n-\n+import uuid\n from typing import Callable, Iterator\n \n import grpc\n@@ -91,9 +91,12 @@\n wrapping the actual message\n - The `Join` method is (pretty much) unaware of the protocol\n \"\"\"\n- peer: str = context.peer()\n+ # When running Flower behind a proxy, the peer can be the same for\n+ # different clients, so instead of `cid: str = context.peer()` we\n+ # use a `UUID4` that is unique.\n+ cid: str = uuid.uuid4().hex\n bridge = self.grpc_bridge_factory()\n- client_proxy = self.client_proxy_factory(peer, bridge)\n+ client_proxy = self.client_proxy_factory(cid, bridge)\n is_success = register_client_proxy(self.client_manager, client_proxy, context)\n \n if is_success:\n", "issue": "If gRPC server is running behing a load balancer, clients may not be able to connect\n### Describe the bug\r\n\r\nFlower is using the `peer()` method from `grpc.ServicerContext` in order to identify unique flower clients. However, in some situations (like when running the gRPC server behind a load balancer or proxy) different clients can have the same peer identifier (i.e. the same `IP:PORT`), as HTTP/2 connections are multiplexed.\r\n\r\n### Steps/Code to Reproduce\r\n\r\nThis can be tested with a simple [Traefik](https://doc.traefik.io/traefik/) setup as follows (https://doc.traefik.io/traefik/user-guides/grpc/):\r\n\r\n`traefik.yaml`:\r\n```\r\nentryPoints:\r\n web:\r\n address: :80\r\n\r\nproviders:\r\n file:\r\n filename: grpc.yaml\r\n\r\napi: {}\r\n```\r\n\r\n`grpc.yaml`:\r\n```\r\nhttp:\r\n routers:\r\n to-grpc:\r\n entryPoints: \r\n - \"web\"\r\n service: \"srv-grpc\"\r\n rule: \"HostRegexp(`.*`)\"\r\n\r\n services:\r\n srv-grpc:\r\n loadBalancer:\r\n servers:\r\n - url: h2c://127.0.0.1:5000\r\n```\r\n\r\nThe Traefik server can be executed as follows:\r\n```\r\n ./traefik --configFile=traefik.yaml\r\n```\r\n\r\nThen we can launch then a Flower sever with the following code:\r\n```\r\nimport flwr as fl\r\n\r\ndef acc_wavg(metrics):\r\n n = sum([i for i, _ in metrics])\r\n acc = sum([i * metric[\"accuracy\"] / n for i, metric in metrics])\r\n return {\"accuracy\": acc}\r\n \r\nstrategy = fl.server.strategy.FedAvg(\r\n min_available_clients=2,\r\n evaluate_metrics_aggregation_fn=acc_wavg\r\n)\r\n \r\nfl.server.start_server(\r\n server_address=\"0.0.0.0:5000\",\r\n config=fl.server.ServerConfig(num_rounds=3),\r\n strategy=strategy,\r\n)\r\n```\r\n\r\n### Expected Results\r\n\r\nWhen we connect two clients to the loadbalancer IP, on port 80, the first one connects and keeps waiting:\r\n\r\n```\r\nINFO flwr 2023-06-28 10:03:14,848 | grpc.py:50 | Opened insecure gRPC connection (no certificates were passed)\r\nDEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.IDLE\r\nDEBUG flwr 2023-06-28 10:03:14,849 | connection.py:39 | ChannelConnectivity.CONNECTING\r\nDEBUG flwr 2023-06-28 10:03:14,875 | connection.py:39 | ChannelConnectivity.READY\r\n```\r\n\r\nHowever, the second one is not connected:\r\n```\r\nDEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.IDLE\r\nDEBUG flwr 2023-06-28 09:37:32,554 | connection.py:39 | ChannelConnectivity.CONNECTING\r\nDEBUG flwr 2023-06-28 09:37:32,578 | connection.py:39 | ChannelConnectivity.READY\r\nDEBUG flwr 2023-06-28 09:37:32,784 | connection.py:113 | gRPC channel closed\r\nTraceback (most recent call last):\r\n File \"/home/alvaro/w/prj/fl/fl.py\", line 68, in <module>\r\n fl.client.start_numpy_client(\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py\", line 252, in start_numpy_client\r\n start_client(\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/app.py\", line 174, in start_client\r\n server_message = receive()\r\n ^^^^^^^^^\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/flwr/client/grpc_client/connection.py\", line 105, in <lambda>\r\n receive: Callable[[], ServerMessage] = lambda: next(server_message_iterator)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py\", line 475, in __next__\r\n return self._next()\r\n ^^^^^^^^^^^^\r\n File \"/home/alvaro/w/prj/fl/VENV/lib/python3.11/site-packages/grpc/_channel.py\", line 879, in _next\r\n raise StopIteration()\r\nStopIteration\r\n```\r\n\r\nThis is because the second client connection is discarded [here]https://github.com/adap/flower/blob/main/src/py/flwr/server/client_manager.py#L149) as it is coming from the same gRPC peer (i.e. the load balancer) due to this code [here](https://github.com/adap/flower/blob/main/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py#L91), as due to the above I assume that the tuple `ip:port` is not unique for gRPC clients, therefore it cannot be used to identify a unique flower client (as a matter of fact, the `ip:port` are those of the load balancer, not those of the actual client).\r\n\r\nAs a test and quick hack we have moved to use uuids to identify the clients [here](https://github.com/AI4EOSC/flower/commit/b215d9f3cce1ad8806e296db4fe105a8b7f5c6c9). To my understanding, it is safe to do that way, as the peer identifier is not used elsewhere, but I do not know if it is planned to do something else with them.\r\n\r\n### Actual Results\r\n\r\nTwo and more clients should be able to connect when running behind a load balancer.\n", "before_files": [{"content": "# Copyright 2020 Flower Labs GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Servicer for FlowerService.\n\nRelevant knowledge for reading this modules code:\n- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md\n\"\"\"\n\n\nfrom typing import Callable, Iterator\n\nimport grpc\nfrom iterators import TimeoutIterator\n\nfrom flwr.proto import transport_pb2_grpc # pylint: disable=E0611\nfrom flwr.proto.transport_pb2 import ( # pylint: disable=E0611\n ClientMessage,\n ServerMessage,\n)\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper\nfrom flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy\n\n\ndef default_bridge_factory() -> GrpcBridge:\n \"\"\"Return GrpcBridge instance.\"\"\"\n return GrpcBridge()\n\n\ndef default_grpc_client_proxy_factory(cid: str, bridge: GrpcBridge) -> GrpcClientProxy:\n \"\"\"Return GrpcClientProxy instance.\"\"\"\n return GrpcClientProxy(cid=cid, bridge=bridge)\n\n\ndef register_client_proxy(\n client_manager: ClientManager,\n client_proxy: GrpcClientProxy,\n context: grpc.ServicerContext,\n) -> bool:\n \"\"\"Try registering GrpcClientProxy with ClientManager.\"\"\"\n is_success = client_manager.register(client_proxy)\n if is_success:\n\n def rpc_termination_callback() -> None:\n client_proxy.bridge.close()\n client_manager.unregister(client_proxy)\n\n context.add_callback(rpc_termination_callback)\n return is_success\n\n\nclass FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):\n \"\"\"FlowerServiceServicer for bi-directional gRPC message stream.\"\"\"\n\n def __init__(\n self,\n client_manager: ClientManager,\n grpc_bridge_factory: Callable[[], GrpcBridge] = default_bridge_factory,\n grpc_client_proxy_factory: Callable[\n [str, GrpcBridge], GrpcClientProxy\n ] = default_grpc_client_proxy_factory,\n ) -> None:\n self.client_manager: ClientManager = client_manager\n self.grpc_bridge_factory = grpc_bridge_factory\n self.client_proxy_factory = grpc_client_proxy_factory\n\n def Join( # pylint: disable=invalid-name\n self,\n request_iterator: Iterator[ClientMessage],\n context: grpc.ServicerContext,\n ) -> Iterator[ServerMessage]:\n \"\"\"Facilitate bi-directional streaming of messages between server and client.\n\n Invoked by each gRPC client which participates in the network.\n\n Protocol:\n - The first message is sent from the server to the client\n - Both `ServerMessage` and `ClientMessage` are message \"wrappers\"\n wrapping the actual message\n - The `Join` method is (pretty much) unaware of the protocol\n \"\"\"\n peer: str = context.peer()\n bridge = self.grpc_bridge_factory()\n client_proxy = self.client_proxy_factory(peer, bridge)\n is_success = register_client_proxy(self.client_manager, client_proxy, context)\n\n if is_success:\n # Get iterators\n client_message_iterator = TimeoutIterator(\n iterator=request_iterator, reset_on_next=True\n )\n ins_wrapper_iterator = bridge.ins_wrapper_iterator()\n\n # All messages will be pushed to client bridge directly\n while True:\n try:\n # Get ins_wrapper from bridge and yield server_message\n ins_wrapper: InsWrapper = next(ins_wrapper_iterator)\n yield ins_wrapper.server_message\n\n # Set current timeout, might be None\n if ins_wrapper.timeout is not None:\n client_message_iterator.set_timeout(ins_wrapper.timeout)\n\n # Wait for client message\n client_message = next(client_message_iterator)\n\n if client_message is client_message_iterator.get_sentinel():\n # Important: calling `context.abort` in gRPC always\n # raises an exception so that all code after the call to\n # `context.abort` will not run. If subsequent code should\n # be executed, the `rpc_termination_callback` can be used\n # (as shown in the `register_client` function).\n details = f\"Timeout of {ins_wrapper.timeout}sec was exceeded.\"\n context.abort(\n code=grpc.StatusCode.DEADLINE_EXCEEDED,\n details=details,\n )\n # This return statement is only for the linter so it understands\n # that client_message in subsequent lines is not None\n # It does not understand that `context.abort` will terminate\n # this execution context by raising an exception.\n return\n\n bridge.set_res_wrapper(\n res_wrapper=ResWrapper(client_message=client_message)\n )\n except StopIteration:\n break\n", "path": "src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py"}], "after_files": [{"content": "# Copyright 2020 Flower Labs GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Servicer for FlowerService.\n\nRelevant knowledge for reading this modules code:\n- https://github.com/grpc/grpc/blob/master/doc/statuscodes.md\n\"\"\"\n\nimport uuid\nfrom typing import Callable, Iterator\n\nimport grpc\nfrom iterators import TimeoutIterator\n\nfrom flwr.proto import transport_pb2_grpc # pylint: disable=E0611\nfrom flwr.proto.transport_pb2 import ( # pylint: disable=E0611\n ClientMessage,\n ServerMessage,\n)\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.fleet.grpc_bidi.grpc_bridge import GrpcBridge, InsWrapper, ResWrapper\nfrom flwr.server.fleet.grpc_bidi.grpc_client_proxy import GrpcClientProxy\n\n\ndef default_bridge_factory() -> GrpcBridge:\n \"\"\"Return GrpcBridge instance.\"\"\"\n return GrpcBridge()\n\n\ndef default_grpc_client_proxy_factory(cid: str, bridge: GrpcBridge) -> GrpcClientProxy:\n \"\"\"Return GrpcClientProxy instance.\"\"\"\n return GrpcClientProxy(cid=cid, bridge=bridge)\n\n\ndef register_client_proxy(\n client_manager: ClientManager,\n client_proxy: GrpcClientProxy,\n context: grpc.ServicerContext,\n) -> bool:\n \"\"\"Try registering GrpcClientProxy with ClientManager.\"\"\"\n is_success = client_manager.register(client_proxy)\n if is_success:\n\n def rpc_termination_callback() -> None:\n client_proxy.bridge.close()\n client_manager.unregister(client_proxy)\n\n context.add_callback(rpc_termination_callback)\n return is_success\n\n\nclass FlowerServiceServicer(transport_pb2_grpc.FlowerServiceServicer):\n \"\"\"FlowerServiceServicer for bi-directional gRPC message stream.\"\"\"\n\n def __init__(\n self,\n client_manager: ClientManager,\n grpc_bridge_factory: Callable[[], GrpcBridge] = default_bridge_factory,\n grpc_client_proxy_factory: Callable[\n [str, GrpcBridge], GrpcClientProxy\n ] = default_grpc_client_proxy_factory,\n ) -> None:\n self.client_manager: ClientManager = client_manager\n self.grpc_bridge_factory = grpc_bridge_factory\n self.client_proxy_factory = grpc_client_proxy_factory\n\n def Join( # pylint: disable=invalid-name\n self,\n request_iterator: Iterator[ClientMessage],\n context: grpc.ServicerContext,\n ) -> Iterator[ServerMessage]:\n \"\"\"Facilitate bi-directional streaming of messages between server and client.\n\n Invoked by each gRPC client which participates in the network.\n\n Protocol:\n - The first message is sent from the server to the client\n - Both `ServerMessage` and `ClientMessage` are message \"wrappers\"\n wrapping the actual message\n - The `Join` method is (pretty much) unaware of the protocol\n \"\"\"\n # When running Flower behind a proxy, the peer can be the same for\n # different clients, so instead of `cid: str = context.peer()` we\n # use a `UUID4` that is unique.\n cid: str = uuid.uuid4().hex\n bridge = self.grpc_bridge_factory()\n client_proxy = self.client_proxy_factory(cid, bridge)\n is_success = register_client_proxy(self.client_manager, client_proxy, context)\n\n if is_success:\n # Get iterators\n client_message_iterator = TimeoutIterator(\n iterator=request_iterator, reset_on_next=True\n )\n ins_wrapper_iterator = bridge.ins_wrapper_iterator()\n\n # All messages will be pushed to client bridge directly\n while True:\n try:\n # Get ins_wrapper from bridge and yield server_message\n ins_wrapper: InsWrapper = next(ins_wrapper_iterator)\n yield ins_wrapper.server_message\n\n # Set current timeout, might be None\n if ins_wrapper.timeout is not None:\n client_message_iterator.set_timeout(ins_wrapper.timeout)\n\n # Wait for client message\n client_message = next(client_message_iterator)\n\n if client_message is client_message_iterator.get_sentinel():\n # Important: calling `context.abort` in gRPC always\n # raises an exception so that all code after the call to\n # `context.abort` will not run. If subsequent code should\n # be executed, the `rpc_termination_callback` can be used\n # (as shown in the `register_client` function).\n details = f\"Timeout of {ins_wrapper.timeout}sec was exceeded.\"\n context.abort(\n code=grpc.StatusCode.DEADLINE_EXCEEDED,\n details=details,\n )\n # This return statement is only for the linter so it understands\n # that client_message in subsequent lines is not None\n # It does not understand that `context.abort` will terminate\n # this execution context by raising an exception.\n return\n\n bridge.set_res_wrapper(\n res_wrapper=ResWrapper(client_message=client_message)\n )\n except StopIteration:\n break\n", "path": "src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py"}]}
| 3,107 | 284 |
gh_patches_debug_17340
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1089
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EC2 resource detector hangs for a long time outside of an EC2 instance
**Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.
The environment I initially saw this in was a container running in Docker compose on an AWS EC2 instance but I've been able to reproduce it on my laptop as well. I think it will show up in anything not directly running in AWS.
**Steps to reproduce**
Describe exactly how to reproduce the error. Include a code sample if applicable.
The following code reproduced the issue on my laptop:
```python
from opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector
from opentelemetry.sdk.resources import get_aggregated_resources
resource = get_aggregated_resources(
detectors=[AwsEc2ResourceDetector()]
)
```
**What is the expected behavior?**
It should complete quickly (this is the behavior I see running on an EC2 instance).
**What is the actual behavior?**
What did you see instead?
On my laptop, it will hand ~indefinitely.
Note: one solution is just to remove the resource detector but we'd like to be able to include it and just have it fail silently, which is the behavior we've seen in other resource detectors.
**Additional context**
I think the problem is here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/80969a06da77d1e616124de178d12a1ebe3ffe7f/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py#L37
It looks like the request is using a 1000 _second_ timeout which I suspect is intended to be a 1000 _millisecond_ timeout. At least with the server program I've been working on that will block the startup of the program until the request completes.
You can verify by running:
```
curl http://169.254.169.254/latest/api/token
```
Which is one of the requests that the resource detector makes -- it should hang indefinitely as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16 import logging
17 from urllib.request import Request, urlopen
18
19 from opentelemetry.sdk.resources import Resource, ResourceDetector
20 from opentelemetry.semconv.resource import (
21 CloudPlatformValues,
22 CloudProviderValues,
23 ResourceAttributes,
24 )
25
26 logger = logging.getLogger(__name__)
27
28 _AWS_METADATA_TOKEN_HEADER = "X-aws-ec2-metadata-token"
29 _GET_METHOD = "GET"
30
31
32 def _aws_http_request(method, path, headers):
33 with urlopen(
34 Request(
35 "http://169.254.169.254" + path, headers=headers, method=method
36 ),
37 timeout=1000,
38 ) as response:
39 return response.read().decode("utf-8")
40
41
42 def _get_token():
43 return _aws_http_request(
44 "PUT",
45 "/latest/api/token",
46 {"X-aws-ec2-metadata-token-ttl-seconds": "60"},
47 )
48
49
50 def _get_identity(token):
51 return _aws_http_request(
52 _GET_METHOD,
53 "/latest/dynamic/instance-identity/document",
54 {_AWS_METADATA_TOKEN_HEADER: token},
55 )
56
57
58 def _get_host(token):
59 return _aws_http_request(
60 _GET_METHOD,
61 "/latest/meta-data/hostname",
62 {_AWS_METADATA_TOKEN_HEADER: token},
63 )
64
65
66 class AwsEc2ResourceDetector(ResourceDetector):
67 """Detects attribute values only available when the app is running on AWS
68 Elastic Compute Cloud (EC2) and returns them in a Resource.
69
70 Uses a special URI to get instance meta-data. See more: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
71 """
72
73 def detect(self) -> "Resource":
74 try:
75 token = _get_token()
76 identity_dict = json.loads(_get_identity(token))
77 hostname = _get_host(token)
78
79 return Resource(
80 {
81 ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,
82 ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EC2.value,
83 ResourceAttributes.CLOUD_ACCOUNT_ID: identity_dict[
84 "accountId"
85 ],
86 ResourceAttributes.CLOUD_REGION: identity_dict["region"],
87 ResourceAttributes.CLOUD_AVAILABILITY_ZONE: identity_dict[
88 "availabilityZone"
89 ],
90 ResourceAttributes.HOST_ID: identity_dict["instanceId"],
91 ResourceAttributes.HOST_TYPE: identity_dict[
92 "instanceType"
93 ],
94 ResourceAttributes.HOST_NAME: hostname,
95 }
96 )
97 # pylint: disable=broad-except
98 except Exception as exception:
99 if self.raise_on_error:
100 raise exception
101
102 logger.warning("%s failed: %s", self.__class__.__name__, exception)
103 return Resource.get_empty()
104
```
Path: `sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16 import logging
17 import ssl
18 from urllib.request import Request, urlopen
19
20 from opentelemetry.sdk.resources import Resource, ResourceDetector
21 from opentelemetry.semconv.resource import (
22 CloudPlatformValues,
23 CloudProviderValues,
24 ResourceAttributes,
25 )
26
27 logger = logging.getLogger(__name__)
28
29 _CONTAINER_ID_LENGTH = 64
30 _GET_METHOD = "GET"
31
32
33 def _aws_http_request(method, path, cred_value):
34 with urlopen(
35 Request(
36 "https://kubernetes.default.svc" + path,
37 headers={"Authorization": cred_value},
38 method=method,
39 ),
40 timeout=2000,
41 context=ssl.create_default_context(
42 cafile="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
43 ),
44 ) as response:
45 return response.read().decode("utf-8")
46
47
48 def _get_k8s_cred_value():
49 try:
50 with open(
51 "/var/run/secrets/kubernetes.io/serviceaccount/token",
52 encoding="utf8",
53 ) as token_file:
54 return "Bearer " + token_file.read()
55 # pylint: disable=broad-except
56 except Exception as exception:
57 logger.error("Failed to get k8s token: %s", exception)
58 raise exception
59
60
61 def _is_eks(cred_value):
62 return _aws_http_request(
63 _GET_METHOD,
64 "/api/v1/namespaces/kube-system/configmaps/aws-auth",
65 cred_value,
66 )
67
68
69 def _get_cluster_info(cred_value):
70 return _aws_http_request(
71 _GET_METHOD,
72 "/api/v1/namespaces/amazon-cloudwatch/configmaps/cluster-info",
73 cred_value,
74 )
75
76
77 def _get_cluster_name(cred_value) -> str:
78 cluster_info = json.loads(_get_cluster_info(cred_value))
79 cluster_name = ""
80 try:
81 cluster_name = cluster_info["data"]["cluster.name"]
82 except KeyError as exception:
83 logger.warning("Cannot get cluster name on EKS: %s", exception)
84
85 return cluster_name
86
87
88 def _get_container_id():
89 container_id = ""
90 with open("/proc/self/cgroup", encoding="utf8") as container_info_file:
91 for raw_line in container_info_file.readlines():
92 line = raw_line.strip()
93 # Subsequent IDs should be the same, exit if found one
94 if len(line) > _CONTAINER_ID_LENGTH:
95 container_id = line[-_CONTAINER_ID_LENGTH:]
96 break
97 return container_id
98
99
100 class AwsEksResourceDetector(ResourceDetector):
101 """Detects attribute values only available when the app is running on AWS
102 Elastic Kubernetes Service (EKS) and returns them in a Resource.
103
104 NOTE: Uses a `cluster-info` configmap in the `amazon-cloudwatch` namespace. See more here: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-quickstart.html#Container-Insights-setup-EKS-quickstart-Fluentd
105 """
106
107 def detect(self) -> "Resource":
108 try:
109 cred_value = _get_k8s_cred_value()
110
111 if not _is_eks(cred_value):
112 raise RuntimeError(
113 "Could not confirm process is running on EKS."
114 )
115
116 cluster_name = _get_cluster_name(cred_value)
117 container_id = _get_container_id()
118
119 if not container_id and not cluster_name:
120 raise RuntimeError(
121 "Neither cluster name nor container ID found on EKS process."
122 )
123
124 return Resource(
125 {
126 ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,
127 ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EKS.value,
128 ResourceAttributes.K8S_CLUSTER_NAME: cluster_name,
129 ResourceAttributes.CONTAINER_ID: container_id,
130 }
131 )
132 # pylint: disable=broad-except
133 except Exception as exception:
134 if self.raise_on_error:
135 raise exception
136
137 logger.warning("%s failed: %s", self.__class__.__name__, exception)
138 return Resource.get_empty()
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py
--- a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py
+++ b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py
@@ -34,7 +34,7 @@
Request(
"http://169.254.169.254" + path, headers=headers, method=method
),
- timeout=1000,
+ timeout=5,
) as response:
return response.read().decode("utf-8")
diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py
--- a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py
+++ b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py
@@ -37,7 +37,7 @@
headers={"Authorization": cred_value},
method=method,
),
- timeout=2000,
+ timeout=5,
context=ssl.create_default_context(
cafile="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
),
|
{"golden_diff": "diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py\n--- a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py\n+++ b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py\n@@ -34,7 +34,7 @@\n Request(\n \"http://169.254.169.254\" + path, headers=headers, method=method\n ),\n- timeout=1000,\n+ timeout=5,\n ) as response:\n return response.read().decode(\"utf-8\")\n \ndiff --git a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py\n--- a/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py\n+++ b/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py\n@@ -37,7 +37,7 @@\n headers={\"Authorization\": cred_value},\n method=method,\n ),\n- timeout=2000,\n+ timeout=5,\n context=ssl.create_default_context(\n cafile=\"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\"\n ),\n", "issue": "EC2 resource detector hangs for a long time outside of an EC2 instance\n**Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.\r\n\r\nThe environment I initially saw this in was a container running in Docker compose on an AWS EC2 instance but I've been able to reproduce it on my laptop as well. I think it will show up in anything not directly running in AWS.\r\n\r\n**Steps to reproduce**\r\nDescribe exactly how to reproduce the error. Include a code sample if applicable.\r\n\r\nThe following code reproduced the issue on my laptop:\r\n\r\n```python\r\nfrom opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector\r\nfrom opentelemetry.sdk.resources import get_aggregated_resources\r\n\r\nresource = get_aggregated_resources(\r\n detectors=[AwsEc2ResourceDetector()]\r\n)\r\n```\r\n\r\n**What is the expected behavior?**\r\n\r\nIt should complete quickly (this is the behavior I see running on an EC2 instance).\r\n\r\n**What is the actual behavior?**\r\n\r\nWhat did you see instead?\r\n\r\nOn my laptop, it will hand ~indefinitely.\r\n\r\nNote: one solution is just to remove the resource detector but we'd like to be able to include it and just have it fail silently, which is the behavior we've seen in other resource detectors.\r\n\r\n**Additional context**\r\n\r\nI think the problem is here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/80969a06da77d1e616124de178d12a1ebe3ffe7f/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py#L37\r\n\r\nIt looks like the request is using a 1000 _second_ timeout which I suspect is intended to be a 1000 _millisecond_ timeout. At least with the server program I've been working on that will block the startup of the program until the request completes.\r\n\r\nYou can verify by running:\r\n\r\n```\r\ncurl http://169.254.169.254/latest/api/token\r\n```\r\n\r\nWhich is one of the requests that the resource detector makes -- it should hang indefinitely as well.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nfrom urllib.request import Request, urlopen\n\nfrom opentelemetry.sdk.resources import Resource, ResourceDetector\nfrom opentelemetry.semconv.resource import (\n CloudPlatformValues,\n CloudProviderValues,\n ResourceAttributes,\n)\n\nlogger = logging.getLogger(__name__)\n\n_AWS_METADATA_TOKEN_HEADER = \"X-aws-ec2-metadata-token\"\n_GET_METHOD = \"GET\"\n\n\ndef _aws_http_request(method, path, headers):\n with urlopen(\n Request(\n \"http://169.254.169.254\" + path, headers=headers, method=method\n ),\n timeout=1000,\n ) as response:\n return response.read().decode(\"utf-8\")\n\n\ndef _get_token():\n return _aws_http_request(\n \"PUT\",\n \"/latest/api/token\",\n {\"X-aws-ec2-metadata-token-ttl-seconds\": \"60\"},\n )\n\n\ndef _get_identity(token):\n return _aws_http_request(\n _GET_METHOD,\n \"/latest/dynamic/instance-identity/document\",\n {_AWS_METADATA_TOKEN_HEADER: token},\n )\n\n\ndef _get_host(token):\n return _aws_http_request(\n _GET_METHOD,\n \"/latest/meta-data/hostname\",\n {_AWS_METADATA_TOKEN_HEADER: token},\n )\n\n\nclass AwsEc2ResourceDetector(ResourceDetector):\n \"\"\"Detects attribute values only available when the app is running on AWS\n Elastic Compute Cloud (EC2) and returns them in a Resource.\n\n Uses a special URI to get instance meta-data. See more: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html\n \"\"\"\n\n def detect(self) -> \"Resource\":\n try:\n token = _get_token()\n identity_dict = json.loads(_get_identity(token))\n hostname = _get_host(token)\n\n return Resource(\n {\n ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,\n ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EC2.value,\n ResourceAttributes.CLOUD_ACCOUNT_ID: identity_dict[\n \"accountId\"\n ],\n ResourceAttributes.CLOUD_REGION: identity_dict[\"region\"],\n ResourceAttributes.CLOUD_AVAILABILITY_ZONE: identity_dict[\n \"availabilityZone\"\n ],\n ResourceAttributes.HOST_ID: identity_dict[\"instanceId\"],\n ResourceAttributes.HOST_TYPE: identity_dict[\n \"instanceType\"\n ],\n ResourceAttributes.HOST_NAME: hostname,\n }\n )\n # pylint: disable=broad-except\n except Exception as exception:\n if self.raise_on_error:\n raise exception\n\n logger.warning(\"%s failed: %s\", self.__class__.__name__, exception)\n return Resource.get_empty()\n", "path": "sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport ssl\nfrom urllib.request import Request, urlopen\n\nfrom opentelemetry.sdk.resources import Resource, ResourceDetector\nfrom opentelemetry.semconv.resource import (\n CloudPlatformValues,\n CloudProviderValues,\n ResourceAttributes,\n)\n\nlogger = logging.getLogger(__name__)\n\n_CONTAINER_ID_LENGTH = 64\n_GET_METHOD = \"GET\"\n\n\ndef _aws_http_request(method, path, cred_value):\n with urlopen(\n Request(\n \"https://kubernetes.default.svc\" + path,\n headers={\"Authorization\": cred_value},\n method=method,\n ),\n timeout=2000,\n context=ssl.create_default_context(\n cafile=\"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\"\n ),\n ) as response:\n return response.read().decode(\"utf-8\")\n\n\ndef _get_k8s_cred_value():\n try:\n with open(\n \"/var/run/secrets/kubernetes.io/serviceaccount/token\",\n encoding=\"utf8\",\n ) as token_file:\n return \"Bearer \" + token_file.read()\n # pylint: disable=broad-except\n except Exception as exception:\n logger.error(\"Failed to get k8s token: %s\", exception)\n raise exception\n\n\ndef _is_eks(cred_value):\n return _aws_http_request(\n _GET_METHOD,\n \"/api/v1/namespaces/kube-system/configmaps/aws-auth\",\n cred_value,\n )\n\n\ndef _get_cluster_info(cred_value):\n return _aws_http_request(\n _GET_METHOD,\n \"/api/v1/namespaces/amazon-cloudwatch/configmaps/cluster-info\",\n cred_value,\n )\n\n\ndef _get_cluster_name(cred_value) -> str:\n cluster_info = json.loads(_get_cluster_info(cred_value))\n cluster_name = \"\"\n try:\n cluster_name = cluster_info[\"data\"][\"cluster.name\"]\n except KeyError as exception:\n logger.warning(\"Cannot get cluster name on EKS: %s\", exception)\n\n return cluster_name\n\n\ndef _get_container_id():\n container_id = \"\"\n with open(\"/proc/self/cgroup\", encoding=\"utf8\") as container_info_file:\n for raw_line in container_info_file.readlines():\n line = raw_line.strip()\n # Subsequent IDs should be the same, exit if found one\n if len(line) > _CONTAINER_ID_LENGTH:\n container_id = line[-_CONTAINER_ID_LENGTH:]\n break\n return container_id\n\n\nclass AwsEksResourceDetector(ResourceDetector):\n \"\"\"Detects attribute values only available when the app is running on AWS\n Elastic Kubernetes Service (EKS) and returns them in a Resource.\n\n NOTE: Uses a `cluster-info` configmap in the `amazon-cloudwatch` namespace. See more here: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-quickstart.html#Container-Insights-setup-EKS-quickstart-Fluentd\n \"\"\"\n\n def detect(self) -> \"Resource\":\n try:\n cred_value = _get_k8s_cred_value()\n\n if not _is_eks(cred_value):\n raise RuntimeError(\n \"Could not confirm process is running on EKS.\"\n )\n\n cluster_name = _get_cluster_name(cred_value)\n container_id = _get_container_id()\n\n if not container_id and not cluster_name:\n raise RuntimeError(\n \"Neither cluster name nor container ID found on EKS process.\"\n )\n\n return Resource(\n {\n ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,\n ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EKS.value,\n ResourceAttributes.K8S_CLUSTER_NAME: cluster_name,\n ResourceAttributes.CONTAINER_ID: container_id,\n }\n )\n # pylint: disable=broad-except\n except Exception as exception:\n if self.raise_on_error:\n raise exception\n\n logger.warning(\"%s failed: %s\", self.__class__.__name__, exception)\n return Resource.get_empty()\n", "path": "sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nfrom urllib.request import Request, urlopen\n\nfrom opentelemetry.sdk.resources import Resource, ResourceDetector\nfrom opentelemetry.semconv.resource import (\n CloudPlatformValues,\n CloudProviderValues,\n ResourceAttributes,\n)\n\nlogger = logging.getLogger(__name__)\n\n_AWS_METADATA_TOKEN_HEADER = \"X-aws-ec2-metadata-token\"\n_GET_METHOD = \"GET\"\n\n\ndef _aws_http_request(method, path, headers):\n with urlopen(\n Request(\n \"http://169.254.169.254\" + path, headers=headers, method=method\n ),\n timeout=5,\n ) as response:\n return response.read().decode(\"utf-8\")\n\n\ndef _get_token():\n return _aws_http_request(\n \"PUT\",\n \"/latest/api/token\",\n {\"X-aws-ec2-metadata-token-ttl-seconds\": \"60\"},\n )\n\n\ndef _get_identity(token):\n return _aws_http_request(\n _GET_METHOD,\n \"/latest/dynamic/instance-identity/document\",\n {_AWS_METADATA_TOKEN_HEADER: token},\n )\n\n\ndef _get_host(token):\n return _aws_http_request(\n _GET_METHOD,\n \"/latest/meta-data/hostname\",\n {_AWS_METADATA_TOKEN_HEADER: token},\n )\n\n\nclass AwsEc2ResourceDetector(ResourceDetector):\n \"\"\"Detects attribute values only available when the app is running on AWS\n Elastic Compute Cloud (EC2) and returns them in a Resource.\n\n Uses a special URI to get instance meta-data. See more: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html\n \"\"\"\n\n def detect(self) -> \"Resource\":\n try:\n token = _get_token()\n identity_dict = json.loads(_get_identity(token))\n hostname = _get_host(token)\n\n return Resource(\n {\n ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,\n ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EC2.value,\n ResourceAttributes.CLOUD_ACCOUNT_ID: identity_dict[\n \"accountId\"\n ],\n ResourceAttributes.CLOUD_REGION: identity_dict[\"region\"],\n ResourceAttributes.CLOUD_AVAILABILITY_ZONE: identity_dict[\n \"availabilityZone\"\n ],\n ResourceAttributes.HOST_ID: identity_dict[\"instanceId\"],\n ResourceAttributes.HOST_TYPE: identity_dict[\n \"instanceType\"\n ],\n ResourceAttributes.HOST_NAME: hostname,\n }\n )\n # pylint: disable=broad-except\n except Exception as exception:\n if self.raise_on_error:\n raise exception\n\n logger.warning(\"%s failed: %s\", self.__class__.__name__, exception)\n return Resource.get_empty()\n", "path": "sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport ssl\nfrom urllib.request import Request, urlopen\n\nfrom opentelemetry.sdk.resources import Resource, ResourceDetector\nfrom opentelemetry.semconv.resource import (\n CloudPlatformValues,\n CloudProviderValues,\n ResourceAttributes,\n)\n\nlogger = logging.getLogger(__name__)\n\n_CONTAINER_ID_LENGTH = 64\n_GET_METHOD = \"GET\"\n\n\ndef _aws_http_request(method, path, cred_value):\n with urlopen(\n Request(\n \"https://kubernetes.default.svc\" + path,\n headers={\"Authorization\": cred_value},\n method=method,\n ),\n timeout=5,\n context=ssl.create_default_context(\n cafile=\"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\"\n ),\n ) as response:\n return response.read().decode(\"utf-8\")\n\n\ndef _get_k8s_cred_value():\n try:\n with open(\n \"/var/run/secrets/kubernetes.io/serviceaccount/token\",\n encoding=\"utf8\",\n ) as token_file:\n return \"Bearer \" + token_file.read()\n # pylint: disable=broad-except\n except Exception as exception:\n logger.error(\"Failed to get k8s token: %s\", exception)\n raise exception\n\n\ndef _is_eks(cred_value):\n return _aws_http_request(\n _GET_METHOD,\n \"/api/v1/namespaces/kube-system/configmaps/aws-auth\",\n cred_value,\n )\n\n\ndef _get_cluster_info(cred_value):\n return _aws_http_request(\n _GET_METHOD,\n \"/api/v1/namespaces/amazon-cloudwatch/configmaps/cluster-info\",\n cred_value,\n )\n\n\ndef _get_cluster_name(cred_value) -> str:\n cluster_info = json.loads(_get_cluster_info(cred_value))\n cluster_name = \"\"\n try:\n cluster_name = cluster_info[\"data\"][\"cluster.name\"]\n except KeyError as exception:\n logger.warning(\"Cannot get cluster name on EKS: %s\", exception)\n\n return cluster_name\n\n\ndef _get_container_id():\n container_id = \"\"\n with open(\"/proc/self/cgroup\", encoding=\"utf8\") as container_info_file:\n for raw_line in container_info_file.readlines():\n line = raw_line.strip()\n # Subsequent IDs should be the same, exit if found one\n if len(line) > _CONTAINER_ID_LENGTH:\n container_id = line[-_CONTAINER_ID_LENGTH:]\n break\n return container_id\n\n\nclass AwsEksResourceDetector(ResourceDetector):\n \"\"\"Detects attribute values only available when the app is running on AWS\n Elastic Kubernetes Service (EKS) and returns them in a Resource.\n\n NOTE: Uses a `cluster-info` configmap in the `amazon-cloudwatch` namespace. See more here: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-quickstart.html#Container-Insights-setup-EKS-quickstart-Fluentd\n \"\"\"\n\n def detect(self) -> \"Resource\":\n try:\n cred_value = _get_k8s_cred_value()\n\n if not _is_eks(cred_value):\n raise RuntimeError(\n \"Could not confirm process is running on EKS.\"\n )\n\n cluster_name = _get_cluster_name(cred_value)\n container_id = _get_container_id()\n\n if not container_id and not cluster_name:\n raise RuntimeError(\n \"Neither cluster name nor container ID found on EKS process.\"\n )\n\n return Resource(\n {\n ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,\n ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EKS.value,\n ResourceAttributes.K8S_CLUSTER_NAME: cluster_name,\n ResourceAttributes.CONTAINER_ID: container_id,\n }\n )\n # pylint: disable=broad-except\n except Exception as exception:\n if self.raise_on_error:\n raise exception\n\n logger.warning(\"%s failed: %s\", self.__class__.__name__, exception)\n return Resource.get_empty()\n", "path": "sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/eks.py"}]}
| 3,051 | 338 |
gh_patches_debug_36804
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-124
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement Default Absolute Value Functions in Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing the elementwise absolute value of a Tensor of arbitrary type. abs() should return a new tensor and abs_ should perform the operation inline. For a great reference on how
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation of abs() and abs_() on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
Implement Default addmm Functionality in Base Tensor Type
**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing each operation on a Tensor of arbitrary type. addmm_() should return a new tensor and addmm_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.
**Acceptance Criteria:**
- If the Base Tensor type's attribute "encrypted" is set to True, it should return a NotImplemented error.
- a unit test demonstrating the correct operation of addmm() and addmm_() on the Base Tensor type implemented over int and float Tensors.
- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/tensor.py`
Content:
```
1 import numpy as np
2
3 def _ensure_ndarray(arr):
4 if not isinstance(arr, np.ndarray):
5 arr = np.array(arr)
6
7 return arr
8
9 class TensorBase(object):
10 """
11 A base tensor class that perform basic element-wise operation such as
12 addition, subtraction, multiplication and division
13 """
14
15 def __init__(self, arr_like, encrypted=False):
16 self.data = _ensure_ndarray(arr_like)
17 self.encrypted = encrypted
18
19 def __add__(self, arr_like):
20 """Performs element-wise addition between two array like objects"""
21 if self.encrypted:
22 return NotImplemented
23
24 arr_like = _ensure_ndarray(arr_like)
25 return self.data + arr_like
26
27 def __iadd__(self, arr_like):
28 """Performs in place element-wise addition between two array like objects"""
29 if self.encrypted:
30 return NotImplemented
31
32 arr_like = _ensure_ndarray(arr_like)
33 self.data = self.data + arr_like
34 return self.data
35
36 def __sub__(self, arr_like):
37 """Performs element-wise subtraction between two array like objects"""
38 if self.encrypted:
39 return NotImplemented
40
41 arr_like = _ensure_ndarray(arr_like)
42 return self.data - arr_like
43
44 def __isub__(self, arr_like):
45 """Performs in place element-wise subtraction between two array like objects"""
46 if self.encrypted:
47 return NotImplemented
48
49 arr_like = _ensure_ndarray(arr_like)
50 self.data = self.data - arr_like
51 return self.data
52
53 def __mul__(self, arr_like):
54 """Performs element-wise multiplication between two array like objects"""
55 if self.encrypted:
56 return NotImplemented
57
58 arr_like = _ensure_ndarray(arr_like)
59 return self.data * arr_like
60
61 def __imul__(self, arr_like):
62 """Performs in place element-wise multiplication between two array like objects"""
63 if self.encrypted:
64 return NotImplemented
65
66 arr_like = _ensure_ndarray(arr_like)
67 self.data = self.data * arr_like
68 return self.data
69
70 def __truediv__(self, arr_like):
71 """Performs element-wise division between two array like objects"""
72 if self.encrypted:
73 return NotImplemented
74
75 arr_like = _ensure_ndarray(arr_like)
76 return self.data / arr_like
77
78 def __itruediv__(self, arr_like):
79 """Performs in place element-wise subtraction between two array like objects"""
80 if self.encrypted:
81 return NotImplemented
82
83 arr_like = _ensure_ndarray(arr_like)
84 self.data = self.data / arr_like
85 return self.data
86
87 def shape(self):
88 """Returns a tuple of input array dimensions."""
89 if self.encrypted:
90 return NotImplemented
91
92 return self.data.shape
93
94 def sum(self, dim=None):
95 """Returns the sum of all elements in the input array."""
96 if self.encrypted:
97 return NotImplemented
98
99 if dim is None:
100 return self.data.sum()
101 else:
102 return self.data.sum(axis=dim)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/syft/tensor.py b/syft/tensor.py
--- a/syft/tensor.py
+++ b/syft/tensor.py
@@ -84,6 +84,19 @@
self.data = self.data / arr_like
return self.data
+ def abs(self):
+ """Returns absolute value of tensor as a new tensor"""
+ if self.encrypted:
+ return NotImplemented
+ return np.absolute(self.data)
+
+ def abs_(self):
+ """Replaces tensor values with its absolute value"""
+ if self.encrypted:
+ return NotImplemented
+ self.data=np.absolute(self.data)
+ return self.data
+
def shape(self):
"""Returns a tuple of input array dimensions."""
if self.encrypted:
@@ -100,3 +113,33 @@
return self.data.sum()
else:
return self.data.sum(axis=dim)
+
+ def addmm(self,tensor2,mat,beta=1,alpha=1):
+ """Performs ((Mat*Beta)+((Tensor1.Tensor2)*Alpha)) and returns the result as a Tensor
+ Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way.
+ *If both tensors are 1-dimensional, their dot product is returned.
+ *If both arguments are 2-D they are multiplied like conventional matrices.
+ *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly.
+ *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
+ *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.
+ """
+ if self.encrypted or tensor2.encrypted or mat.encrypted:
+ return NotImplemented
+ else:
+ return TensorBase(np.array((mat*beta)+((np.matmul(self.data,tensor2.data))*alpha)))
+
+ def addmm_(self,tensor2,mat,beta=1,alpha=1):
+ """Performs ((Mat*Beta)+((Tensor1.Tensor2)*Alpha)) and updates Tensor1 with result and reurns it
+ Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way.
+ *If both tensors are 1-dimensional, their dot product is returned.
+ *If both arguments are 2-D they are multiplied like conventional matrices.
+ *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly.
+ *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
+ *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.
+ """
+ if self.encrypted is True or tensor2.encrypted is True or mat.encrypted is True:
+ return NotImplemented
+ else:
+ self.data=np.array((mat*beta)+((np.matmul(self.data,tensor2.data))*alpha))
+ return self
+
|
{"golden_diff": "diff --git a/syft/tensor.py b/syft/tensor.py\n--- a/syft/tensor.py\n+++ b/syft/tensor.py\n@@ -84,6 +84,19 @@\n self.data = self.data / arr_like\n return self.data\n \n+ def abs(self):\n+ \"\"\"Returns absolute value of tensor as a new tensor\"\"\"\n+ if self.encrypted:\n+ return NotImplemented\n+ return np.absolute(self.data)\n+ \n+ def abs_(self):\n+ \"\"\"Replaces tensor values with its absolute value\"\"\"\n+ if self.encrypted:\n+ return NotImplemented\n+ self.data=np.absolute(self.data)\n+ return self.data\n+\n def shape(self):\n \"\"\"Returns a tuple of input array dimensions.\"\"\"\n if self.encrypted:\n@@ -100,3 +113,33 @@\n return self.data.sum()\n else:\n return self.data.sum(axis=dim)\n+ \n+ def addmm(self,tensor2,mat,beta=1,alpha=1):\n+ \"\"\"Performs ((Mat*Beta)+((Tensor1.Tensor2)*Alpha)) and returns the result as a Tensor\n+ Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way.\n+ *If both tensors are 1-dimensional, their dot product is returned.\n+ *If both arguments are 2-D they are multiplied like conventional matrices.\n+ *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly.\n+ *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n+ *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.\n+ \"\"\"\n+ if self.encrypted or tensor2.encrypted or mat.encrypted:\n+ return NotImplemented\n+ else:\n+ return TensorBase(np.array((mat*beta)+((np.matmul(self.data,tensor2.data))*alpha)))\n+\n+ def addmm_(self,tensor2,mat,beta=1,alpha=1):\n+ \"\"\"Performs ((Mat*Beta)+((Tensor1.Tensor2)*Alpha)) and updates Tensor1 with result and reurns it\n+ Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way.\n+ *If both tensors are 1-dimensional, their dot product is returned.\n+ *If both arguments are 2-D they are multiplied like conventional matrices.\n+ *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly.\n+ *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n+ *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.\n+ \"\"\"\n+ if self.encrypted is True or tensor2.encrypted is True or mat.encrypted is True:\n+ return NotImplemented\n+ else:\n+ self.data=np.array((mat*beta)+((np.matmul(self.data,tensor2.data))*alpha))\n+ return self\n+\n", "issue": "Implement Default Absolute Value Functions in Base Tensor Type\n**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing the elementwise absolute value of a Tensor of arbitrary type. abs() should return a new tensor and abs_ should perform the operation inline. For a great reference on how \r\n\r\n**Acceptance Criteria:**\r\n- If the Base Tensor type's attribute \"encrypted\" is set to True, it should return a NotImplemented error.\r\n- a unit test demonstrating the correct operation of abs() and abs_() on the Base Tensor type implemented over int and float Tensors.\r\n- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.\nImplement Default addmm Functionality in Base Tensor Type\n**User Story A:** As a Data Scientist using Syft's Base Tensor type, we want to implement a default method for computing each operation on a Tensor of arbitrary type. addmm_() should return a new tensor and addmm_() should perform the operation inline. For a reference on the operation this performs check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation.\r\n\r\n**Acceptance Criteria:**\r\n- If the Base Tensor type's attribute \"encrypted\" is set to True, it should return a NotImplemented error.\r\n- a unit test demonstrating the correct operation of addmm() and addmm_() on the Base Tensor type implemented over int and float Tensors.\r\n- inline documentation in the python code. For inspiration on inline documentation, please check out [PyTorch](http://pytorch.org/docs/master/tensors.html)'s documentation for this operator.\n", "before_files": [{"content": "import numpy as np\n\ndef _ensure_ndarray(arr):\n if not isinstance(arr, np.ndarray):\n arr = np.array(arr)\n\n return arr\n\nclass TensorBase(object):\n \"\"\"\n A base tensor class that perform basic element-wise operation such as\n addition, subtraction, multiplication and division\n \"\"\"\n\n def __init__(self, arr_like, encrypted=False):\n self.data = _ensure_ndarray(arr_like)\n self.encrypted = encrypted\n\n def __add__(self, arr_like):\n \"\"\"Performs element-wise addition between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data + arr_like\n\n def __iadd__(self, arr_like):\n \"\"\"Performs in place element-wise addition between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data + arr_like\n return self.data\n\n def __sub__(self, arr_like):\n \"\"\"Performs element-wise subtraction between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data - arr_like\n\n def __isub__(self, arr_like):\n \"\"\"Performs in place element-wise subtraction between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data - arr_like\n return self.data\n\n def __mul__(self, arr_like):\n \"\"\"Performs element-wise multiplication between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data * arr_like\n\n def __imul__(self, arr_like):\n \"\"\"Performs in place element-wise multiplication between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data * arr_like\n return self.data\n\n def __truediv__(self, arr_like):\n \"\"\"Performs element-wise division between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data / arr_like\n\n def __itruediv__(self, arr_like):\n \"\"\"Performs in place element-wise subtraction between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data / arr_like\n return self.data\n\n def shape(self):\n \"\"\"Returns a tuple of input array dimensions.\"\"\"\n if self.encrypted:\n return NotImplemented\n\n return self.data.shape\n\n def sum(self, dim=None):\n \"\"\"Returns the sum of all elements in the input array.\"\"\"\n if self.encrypted:\n return NotImplemented\n\n if dim is None:\n return self.data.sum()\n else:\n return self.data.sum(axis=dim)\n", "path": "syft/tensor.py"}], "after_files": [{"content": "import numpy as np\n\ndef _ensure_ndarray(arr):\n if not isinstance(arr, np.ndarray):\n arr = np.array(arr)\n\n return arr\n\nclass TensorBase(object):\n \"\"\"\n A base tensor class that perform basic element-wise operation such as\n addition, subtraction, multiplication and division\n \"\"\"\n\n def __init__(self, arr_like, encrypted=False):\n self.data = _ensure_ndarray(arr_like)\n self.encrypted = encrypted\n\n def __add__(self, arr_like):\n \"\"\"Performs element-wise addition between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data + arr_like\n\n def __iadd__(self, arr_like):\n \"\"\"Performs in place element-wise addition between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data + arr_like\n return self.data\n\n def __sub__(self, arr_like):\n \"\"\"Performs element-wise subtraction between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data - arr_like\n\n def __isub__(self, arr_like):\n \"\"\"Performs in place element-wise subtraction between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data - arr_like\n return self.data\n\n def __mul__(self, arr_like):\n \"\"\"Performs element-wise multiplication between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data * arr_like\n\n def __imul__(self, arr_like):\n \"\"\"Performs in place element-wise multiplication between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data * arr_like\n return self.data\n\n def __truediv__(self, arr_like):\n \"\"\"Performs element-wise division between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n return self.data / arr_like\n\n def __itruediv__(self, arr_like):\n \"\"\"Performs in place element-wise subtraction between two array like objects\"\"\"\n if self.encrypted:\n return NotImplemented\n\n arr_like = _ensure_ndarray(arr_like)\n self.data = self.data / arr_like\n return self.data\n\n def abs(self):\n \"\"\"Returns absolute value of tensor as a new tensor\"\"\"\n if self.encrypted:\n return NotImplemented\n return np.absolute(self.data)\n \n def abs_(self):\n \"\"\"Replaces tensor values with its absolute value\"\"\"\n if self.encrypted:\n return NotImplemented\n self.data=np.absolute(self.data)\n return self.data\n\n def shape(self):\n \"\"\"Returns a tuple of input array dimensions.\"\"\"\n if self.encrypted:\n return NotImplemented\n\n return self.data.shape\n\n def sum(self, dim=None):\n \"\"\"Returns the sum of all elements in the input array.\"\"\"\n if self.encrypted:\n return NotImplemented\n\n if dim is None:\n return self.data.sum()\n else:\n return self.data.sum(axis=dim)\n \n def addmm(self,tensor2,mat,beta=1,alpha=1):\n \"\"\"Performs ((Mat*Beta)+((Tensor1.Tensor2)*Alpha)) and returns the result as a Tensor\n Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way.\n *If both tensors are 1-dimensional, their dot product is returned.\n *If both arguments are 2-D they are multiplied like conventional matrices.\n *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly.\n *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.\n \"\"\"\n if self.encrypted or tensor2.encrypted or mat.encrypted:\n return NotImplemented\n else:\n return TensorBase(np.array((mat*beta)+((np.matmul(self.data,tensor2.data))*alpha)))\n\n def addmm_(self,tensor2,mat,beta=1,alpha=1):\n \"\"\"Performs ((Mat*Beta)+((Tensor1.Tensor2)*Alpha)) and updates Tensor1 with result and reurns it\n Tensor1.Tensor2 is performed as Matrix product of two array The behavior depends on the arguments in the following way.\n *If both tensors are 1-dimensional, their dot product is returned.\n *If both arguments are 2-D they are multiplied like conventional matrices.\n *If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly.\n *If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n *If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.\n \"\"\"\n if self.encrypted is True or tensor2.encrypted is True or mat.encrypted is True:\n return NotImplemented\n else:\n self.data=np.array((mat*beta)+((np.matmul(self.data,tensor2.data))*alpha))\n return self\n\n", "path": "syft/tensor.py"}]}
| 1,485 | 756 |
gh_patches_debug_4215
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1956
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Memory permission backend implementation of remove_principal() is wrong
According to the `PermissionBase` docstring, `remove_principal` is supposed to `Remove a principal from every user`. In other words, `remove_principal(principal)` is equivalent to `remove_user_principal(user_id, principal) for user_id in all_possible_user_ids`. However, the current implementation stores all permissions of all kinds in one hash table, and removes the principal from permissions of non-user things as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/permission/memory.py`
Content:
```
1 import re
2
3 from kinto.core.decorators import synchronized
4 from kinto.core.permission import PermissionBase
5
6
7 class Permission(PermissionBase):
8 """Permission backend implementation in local process memory.
9
10 Enable in configuration::
11
12 kinto.permission_backend = kinto.core.permission.memory
13
14 :noindex:
15 """
16
17 def __init__(self, *args, **kwargs):
18 super().__init__(*args, **kwargs)
19 self.flush()
20
21 def initialize_schema(self, dry_run=False):
22 # Nothing to do.
23 pass
24
25 def flush(self):
26 self._store = {}
27
28 @synchronized
29 def add_user_principal(self, user_id, principal):
30 user_key = f"user:{user_id}"
31 user_principals = self._store.get(user_key, set())
32 user_principals.add(principal)
33 self._store[user_key] = user_principals
34
35 @synchronized
36 def remove_user_principal(self, user_id, principal):
37 user_key = f"user:{user_id}"
38 user_principals = self._store.get(user_key, set())
39 try:
40 user_principals.remove(principal)
41 except KeyError:
42 pass
43 if len(user_principals) == 0:
44 if user_key in self._store:
45 del self._store[user_key]
46 else:
47 self._store[user_key] = user_principals
48
49 @synchronized
50 def remove_principal(self, principal):
51 for user_principals in self._store.values():
52 try:
53 user_principals.remove(principal)
54 except KeyError:
55 pass
56
57 @synchronized
58 def get_user_principals(self, user_id):
59 # Fetch the groups the user is in.
60 user_key = f"user:{user_id}"
61 members = self._store.get(user_key, set())
62 # Fetch the groups system.Authenticated is in.
63 group_authenticated = self._store.get("user:system.Authenticated", set())
64 return members | group_authenticated
65
66 @synchronized
67 def add_principal_to_ace(self, object_id, permission, principal):
68 permission_key = f"permission:{object_id}:{permission}"
69 object_permission_principals = self._store.get(permission_key, set())
70 object_permission_principals.add(principal)
71 self._store[permission_key] = object_permission_principals
72
73 @synchronized
74 def remove_principal_from_ace(self, object_id, permission, principal):
75 permission_key = f"permission:{object_id}:{permission}"
76 object_permission_principals = self._store.get(permission_key, set())
77 try:
78 object_permission_principals.remove(principal)
79 except KeyError:
80 pass
81 if len(object_permission_principals) == 0:
82 if permission_key in self._store:
83 del self._store[permission_key]
84 else:
85 self._store[permission_key] = object_permission_principals
86
87 @synchronized
88 def get_object_permission_principals(self, object_id, permission):
89 permission_key = f"permission:{object_id}:{permission}"
90 members = self._store.get(permission_key, set())
91 return members
92
93 @synchronized
94 def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):
95 principals = set(principals)
96 candidates = []
97 if bound_permissions is None:
98 for key, value in self._store.items():
99 _, object_id, permission = key.split(":", 2)
100 candidates.append((object_id, permission, value))
101 else:
102 for pattern, perm in bound_permissions:
103 id_match = ".*" if with_children else "[^/]+"
104 regexp = re.compile(f"^{pattern.replace('*', id_match)}$")
105 for key, value in self._store.items():
106 if key.endswith(perm):
107 object_id = key.split(":")[1]
108 if regexp.match(object_id):
109 candidates.append((object_id, perm, value))
110
111 perms_by_object_id = {}
112 for (object_id, perm, value) in candidates:
113 if len(principals & value) > 0:
114 perms_by_object_id.setdefault(object_id, set()).add(perm)
115 return perms_by_object_id
116
117 @synchronized
118 def get_authorized_principals(self, bound_permissions):
119 principals = set()
120 for obj_id, perm in bound_permissions:
121 principals |= self.get_object_permission_principals(obj_id, perm)
122 return principals
123
124 @synchronized
125 def get_objects_permissions(self, objects_ids, permissions=None):
126 result = []
127 for object_id in objects_ids:
128 if permissions is None:
129 aces = [k for k in self._store.keys() if k.startswith(f"permission:{object_id}:")]
130 else:
131 aces = [f"permission:{object_id}:{permission}" for permission in permissions]
132 perms = {}
133 for ace in aces:
134 # Should work with 'permission:/url/id:object:create'.
135 permission = ace.split(":", 2)[2]
136 perms[permission] = set(self._store[ace])
137 result.append(perms)
138 return result
139
140 @synchronized
141 def replace_object_permissions(self, object_id, permissions):
142 for permission, principals in permissions.items():
143 permission_key = f"permission:{object_id}:{permission}"
144 if permission_key in self._store and len(principals) == 0:
145 del self._store[permission_key]
146 elif principals:
147 self._store[permission_key] = set(principals)
148 return permissions
149
150 @synchronized
151 def delete_object_permissions(self, *object_id_list):
152 to_delete = []
153 for key in self._store.keys():
154 object_id = key.split(":")[1]
155 for pattern in object_id_list:
156 regexp = re.compile(f"^{pattern.replace('*', '.*')}$")
157 if regexp.match(object_id):
158 to_delete.append(key)
159 for k in to_delete:
160 del self._store[k]
161
162
163 def load_from_config(config):
164 return Permission()
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/core/permission/memory.py b/kinto/core/permission/memory.py
--- a/kinto/core/permission/memory.py
+++ b/kinto/core/permission/memory.py
@@ -48,7 +48,9 @@
@synchronized
def remove_principal(self, principal):
- for user_principals in self._store.values():
+ for key, user_principals in self._store.items():
+ if not key.startswith("user:"):
+ continue
try:
user_principals.remove(principal)
except KeyError:
|
{"golden_diff": "diff --git a/kinto/core/permission/memory.py b/kinto/core/permission/memory.py\n--- a/kinto/core/permission/memory.py\n+++ b/kinto/core/permission/memory.py\n@@ -48,7 +48,9 @@\n \n @synchronized\n def remove_principal(self, principal):\n- for user_principals in self._store.values():\n+ for key, user_principals in self._store.items():\n+ if not key.startswith(\"user:\"):\n+ continue\n try:\n user_principals.remove(principal)\n except KeyError:\n", "issue": "Memory permission backend implementation of remove_principal() is wrong\nAccording to the `PermissionBase` docstring, `remove_principal` is supposed to `Remove a principal from every user`. In other words, `remove_principal(principal)` is equivalent to `remove_user_principal(user_id, principal) for user_id in all_possible_user_ids`. However, the current implementation stores all permissions of all kinds in one hash table, and removes the principal from permissions of non-user things as well.\n", "before_files": [{"content": "import re\n\nfrom kinto.core.decorators import synchronized\nfrom kinto.core.permission import PermissionBase\n\n\nclass Permission(PermissionBase):\n \"\"\"Permission backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.permission_backend = kinto.core.permission.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._store = {}\n\n @synchronized\n def add_user_principal(self, user_id, principal):\n user_key = f\"user:{user_id}\"\n user_principals = self._store.get(user_key, set())\n user_principals.add(principal)\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_user_principal(self, user_id, principal):\n user_key = f\"user:{user_id}\"\n user_principals = self._store.get(user_key, set())\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n if len(user_principals) == 0:\n if user_key in self._store:\n del self._store[user_key]\n else:\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_principal(self, principal):\n for user_principals in self._store.values():\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n\n @synchronized\n def get_user_principals(self, user_id):\n # Fetch the groups the user is in.\n user_key = f\"user:{user_id}\"\n members = self._store.get(user_key, set())\n # Fetch the groups system.Authenticated is in.\n group_authenticated = self._store.get(\"user:system.Authenticated\", set())\n return members | group_authenticated\n\n @synchronized\n def add_principal_to_ace(self, object_id, permission, principal):\n permission_key = f\"permission:{object_id}:{permission}\"\n object_permission_principals = self._store.get(permission_key, set())\n object_permission_principals.add(principal)\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def remove_principal_from_ace(self, object_id, permission, principal):\n permission_key = f\"permission:{object_id}:{permission}\"\n object_permission_principals = self._store.get(permission_key, set())\n try:\n object_permission_principals.remove(principal)\n except KeyError:\n pass\n if len(object_permission_principals) == 0:\n if permission_key in self._store:\n del self._store[permission_key]\n else:\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def get_object_permission_principals(self, object_id, permission):\n permission_key = f\"permission:{object_id}:{permission}\"\n members = self._store.get(permission_key, set())\n return members\n\n @synchronized\n def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):\n principals = set(principals)\n candidates = []\n if bound_permissions is None:\n for key, value in self._store.items():\n _, object_id, permission = key.split(\":\", 2)\n candidates.append((object_id, permission, value))\n else:\n for pattern, perm in bound_permissions:\n id_match = \".*\" if with_children else \"[^/]+\"\n regexp = re.compile(f\"^{pattern.replace('*', id_match)}$\")\n for key, value in self._store.items():\n if key.endswith(perm):\n object_id = key.split(\":\")[1]\n if regexp.match(object_id):\n candidates.append((object_id, perm, value))\n\n perms_by_object_id = {}\n for (object_id, perm, value) in candidates:\n if len(principals & value) > 0:\n perms_by_object_id.setdefault(object_id, set()).add(perm)\n return perms_by_object_id\n\n @synchronized\n def get_authorized_principals(self, bound_permissions):\n principals = set()\n for obj_id, perm in bound_permissions:\n principals |= self.get_object_permission_principals(obj_id, perm)\n return principals\n\n @synchronized\n def get_objects_permissions(self, objects_ids, permissions=None):\n result = []\n for object_id in objects_ids:\n if permissions is None:\n aces = [k for k in self._store.keys() if k.startswith(f\"permission:{object_id}:\")]\n else:\n aces = [f\"permission:{object_id}:{permission}\" for permission in permissions]\n perms = {}\n for ace in aces:\n # Should work with 'permission:/url/id:object:create'.\n permission = ace.split(\":\", 2)[2]\n perms[permission] = set(self._store[ace])\n result.append(perms)\n return result\n\n @synchronized\n def replace_object_permissions(self, object_id, permissions):\n for permission, principals in permissions.items():\n permission_key = f\"permission:{object_id}:{permission}\"\n if permission_key in self._store and len(principals) == 0:\n del self._store[permission_key]\n elif principals:\n self._store[permission_key] = set(principals)\n return permissions\n\n @synchronized\n def delete_object_permissions(self, *object_id_list):\n to_delete = []\n for key in self._store.keys():\n object_id = key.split(\":\")[1]\n for pattern in object_id_list:\n regexp = re.compile(f\"^{pattern.replace('*', '.*')}$\")\n if regexp.match(object_id):\n to_delete.append(key)\n for k in to_delete:\n del self._store[k]\n\n\ndef load_from_config(config):\n return Permission()\n", "path": "kinto/core/permission/memory.py"}], "after_files": [{"content": "import re\n\nfrom kinto.core.decorators import synchronized\nfrom kinto.core.permission import PermissionBase\n\n\nclass Permission(PermissionBase):\n \"\"\"Permission backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.permission_backend = kinto.core.permission.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._store = {}\n\n @synchronized\n def add_user_principal(self, user_id, principal):\n user_key = f\"user:{user_id}\"\n user_principals = self._store.get(user_key, set())\n user_principals.add(principal)\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_user_principal(self, user_id, principal):\n user_key = f\"user:{user_id}\"\n user_principals = self._store.get(user_key, set())\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n if len(user_principals) == 0:\n if user_key in self._store:\n del self._store[user_key]\n else:\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_principal(self, principal):\n for key, user_principals in self._store.items():\n if not key.startswith(\"user:\"):\n continue\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n\n @synchronized\n def get_user_principals(self, user_id):\n # Fetch the groups the user is in.\n user_key = f\"user:{user_id}\"\n members = self._store.get(user_key, set())\n # Fetch the groups system.Authenticated is in.\n group_authenticated = self._store.get(\"user:system.Authenticated\", set())\n return members | group_authenticated\n\n @synchronized\n def add_principal_to_ace(self, object_id, permission, principal):\n permission_key = f\"permission:{object_id}:{permission}\"\n object_permission_principals = self._store.get(permission_key, set())\n object_permission_principals.add(principal)\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def remove_principal_from_ace(self, object_id, permission, principal):\n permission_key = f\"permission:{object_id}:{permission}\"\n object_permission_principals = self._store.get(permission_key, set())\n try:\n object_permission_principals.remove(principal)\n except KeyError:\n pass\n if len(object_permission_principals) == 0:\n if permission_key in self._store:\n del self._store[permission_key]\n else:\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def get_object_permission_principals(self, object_id, permission):\n permission_key = f\"permission:{object_id}:{permission}\"\n members = self._store.get(permission_key, set())\n return members\n\n @synchronized\n def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):\n principals = set(principals)\n candidates = []\n if bound_permissions is None:\n for key, value in self._store.items():\n _, object_id, permission = key.split(\":\", 2)\n candidates.append((object_id, permission, value))\n else:\n for pattern, perm in bound_permissions:\n id_match = \".*\" if with_children else \"[^/]+\"\n regexp = re.compile(f\"^{pattern.replace('*', id_match)}$\")\n for key, value in self._store.items():\n if key.endswith(perm):\n object_id = key.split(\":\")[1]\n if regexp.match(object_id):\n candidates.append((object_id, perm, value))\n\n perms_by_object_id = {}\n for (object_id, perm, value) in candidates:\n if len(principals & value) > 0:\n perms_by_object_id.setdefault(object_id, set()).add(perm)\n return perms_by_object_id\n\n @synchronized\n def get_authorized_principals(self, bound_permissions):\n principals = set()\n for obj_id, perm in bound_permissions:\n principals |= self.get_object_permission_principals(obj_id, perm)\n return principals\n\n @synchronized\n def get_objects_permissions(self, objects_ids, permissions=None):\n result = []\n for object_id in objects_ids:\n if permissions is None:\n aces = [k for k in self._store.keys() if k.startswith(f\"permission:{object_id}:\")]\n else:\n aces = [f\"permission:{object_id}:{permission}\" for permission in permissions]\n perms = {}\n for ace in aces:\n # Should work with 'permission:/url/id:object:create'.\n permission = ace.split(\":\", 2)[2]\n perms[permission] = set(self._store[ace])\n result.append(perms)\n return result\n\n @synchronized\n def replace_object_permissions(self, object_id, permissions):\n for permission, principals in permissions.items():\n permission_key = f\"permission:{object_id}:{permission}\"\n if permission_key in self._store and len(principals) == 0:\n del self._store[permission_key]\n elif principals:\n self._store[permission_key] = set(principals)\n return permissions\n\n @synchronized\n def delete_object_permissions(self, *object_id_list):\n to_delete = []\n for key in self._store.keys():\n object_id = key.split(\":\")[1]\n for pattern in object_id_list:\n regexp = re.compile(f\"^{pattern.replace('*', '.*')}$\")\n if regexp.match(object_id):\n to_delete.append(key)\n for k in to_delete:\n del self._store[k]\n\n\ndef load_from_config(config):\n return Permission()\n", "path": "kinto/core/permission/memory.py"}]}
| 2,032 | 124 |
gh_patches_debug_32298
|
rasdani/github-patches
|
git_diff
|
facebookresearch__xformers-309
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't deepcopy an xformer model with triton 2 update
# 🐛 Bug
Given the recent triton2 update an xformer model cannot be deep-copied. This is an important requirement for numerous tasks including EMA (without knowledge of the generating class / hyper-params).
## xformers ViT-B Config
```yaml
reversible: False
block_type: "encoder"
num_layers: 12
dim_model: 768
layer_norm_style: "pre"
multi_head_config:
num_heads: 12
residual_dropout: 0.1 # (1) tried without this, (2) swapping this for DropPath, (3) with regular dropout
use_rotary_embeddings: False
attention:
name: "scaled_dot_product"
dropout: 0.0
causal: False
feedforward_config:
name: "MLP"
dropout: 0.0
activation: "gelu"
hidden_layer_multiplier: 4
```
## To reproduce
```python
from copy import deepcopy
with open(transfomer_config_file, "rb") as fileptr:
self.model_config = yaml.load(fileptr, Loader=yaml.FullLoader)
model = xFormer.from_config(xFormerConfig([self.model_config]))
deepcopy(model)
```
Error is:
```bash
TypeError: cannot pickle 'PyCapsule' object
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xformers/triton/dropout.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 # CREDITS: This comes almost as-is from the Triton dropout tutorial
8 # https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py
9
10 from typing import Optional
11
12 import torch
13 import triton
14 from torch.cuda.amp import custom_bwd, custom_fwd
15
16 from xformers.components.activations import Activation, build_activation
17 from xformers.triton.k_activations import (
18 get_triton_activation_bwd_kernel,
19 get_triton_activation_kernel,
20 )
21 from xformers.triton.k_dropout import k_dropout_bw, k_dropout_fw
22
23 GROUP_M = 32
24 BLOCK_M = GROUP_M // 4
25 BLOCK_N = 128
26
27
28 # Helper to handle the SPMD launch grid and error cases
29 class _dropout(torch.autograd.Function):
30 @staticmethod
31 @custom_fwd(cast_inputs=torch.float16)
32 def forward(ctx, x, p, bias, activation, activation_grad, trainable_bias):
33 # Soft-flatten an hypothetical 3rd dimension
34 x_ = x.reshape(-1, x.shape[-1]).contiguous()
35 y = torch.empty_like(x_)
36 M, N = x_.shape
37
38 assert bias is None or (bias.dtype == x.dtype and bias.shape[0] == N)
39 assert p > 0.0
40
41 def grid(meta):
42 # NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for
43 # a given seed and offsets. "BLOCK_M" here describes the size of one of these blocks
44 # but we need to take this factor of 4 into account when scheduling all the kernels
45 return (
46 triton.cdiv(M, meta["BLOCK_M"] * 4),
47 triton.cdiv(N, meta["BLOCK_N"]),
48 )
49
50 N_BLOCK_N = triton.cdiv(N, BLOCK_N)
51
52 # Generate one seed per sample
53 # seed max is int32 max for positive numbers: 2**16
54 seeds = torch.randint(65536, (N_BLOCK_N,), device=x.device).to(torch.int32)
55
56 # fmt: off
57 bias_ptr = bias if bias is not None else x_ # Possibly not being used
58
59 k_dropout_fw[grid](
60 y, x_,
61 bias_ptr,
62 seeds,
63 y.stride(0),
64 M, N,
65 p,
66 x.dtype == torch.float16,
67 USE_BIAS=bias is not None,
68 ACTIVATION=activation,
69 BLOCK_M=BLOCK_M,
70 BLOCK_N=BLOCK_N,
71 )
72 # fmt: on
73
74 if activation is not None:
75 ctx.save_for_backward(seeds, bias, x)
76 else:
77 ctx.save_for_backward(seeds, bias, None)
78
79 ctx.trainable_bias = bias is not None and trainable_bias
80 ctx.activation_grad = activation_grad
81 ctx.p = p
82
83 return y.reshape_as(x)
84
85 @staticmethod
86 @custom_bwd
87 def backward(ctx, grad_out):
88 (seeds, bias, inputs) = ctx.saved_tensors
89
90 # Soft-flatten an hypothetical 3rd dimension
91 grad_out_ = grad_out.reshape(-1, grad_out.shape[-1]).contiguous()
92 grad_in = torch.empty_like(grad_out_)
93
94 M, N = grad_out_.shape
95
96 # Optional inputs to compute the activation contribution to the gradient
97 assert inputs is not None or ctx.activation_grad is None
98
99 if inputs is None:
100 inputs = grad_out_
101 elif inputs.ndim > 2:
102 inputs = inputs.reshape(-1, N)
103
104 # We split the problem in tiles:
105 # - over M there will be a follow up reduction
106 # - over M, we go by 4 tiles at at time (consequence of the random number generation)
107 # - over N we compromise in between trying to use as much memory paralellism as possible,
108 # (fill in the warps, there are 32 threads per warps, and 4 warps default), and not being too
109 # big because of register spilling
110 N_BLOCKS_M = triton.cdiv(M, GROUP_M)
111
112 if ctx.trainable_bias:
113 grad_bias = torch.empty(
114 (
115 N_BLOCKS_M,
116 N,
117 ),
118 device=grad_in.device,
119 dtype=grad_in.dtype,
120 )
121
122 else:
123 grad_bias = grad_in # will not be used
124
125 def grid(meta):
126 # NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for
127 # a given seed and offsets. "BLOCK_M" here describes the size of one of these blocks
128 # but we need to take this factor of 4 into account when scheduling all the kernels
129 return (
130 triton.cdiv(M, meta["BLOCK_M"] * 4),
131 triton.cdiv(N, meta["BLOCK_N"]),
132 )
133
134 # fmt: off
135 k_dropout_bw[grid](
136 grad_in, grad_bias, grad_out_,
137 inputs, bias if bias is not None else inputs,
138 seeds,
139 grad_out_.stride(0), inputs.stride(0),
140 M, N,
141 ctx.p,
142 grad_in.dtype == torch.float16,
143 USE_BIAS=bias is not None,
144 ACTIVATION_GRAD=ctx.activation_grad,
145 TRAINABLE_BIAS=ctx.trainable_bias,
146 BLOCK_M=BLOCK_M,
147 BLOCK_N=BLOCK_N,
148 )
149 # fmt: on
150
151 return (
152 grad_in.reshape_as(grad_out),
153 None,
154 torch.sum(grad_bias, dim=0) if ctx.trainable_bias else None,
155 None,
156 None,
157 None,
158 )
159
160
161 def dropout(
162 x: torch.Tensor,
163 p: float,
164 bias: Optional[torch.Tensor] = None,
165 activation: Optional[Activation] = None,
166 ):
167 """
168 Apply dropout on the input tensor.
169 Optionally add a bias, the computation will be fused.
170 """
171
172 assert p <= 1.0 and p >= 0.0
173
174 if p == 1.0:
175 return torch.zeros_like(x)
176
177 # Micro optim, skip dropout
178 if p == 0.0:
179 x = x + bias if bias is not None else x
180 if activation is not None:
181 activation_fn = build_activation(activation)
182 return activation_fn(x)
183 return x
184
185 # The normal triton enabled codepath
186 act_kernel = get_triton_activation_kernel(activation)
187 act_grad_kernel = get_triton_activation_bwd_kernel(activation)
188 return _dropout.apply(
189 x,
190 float(p),
191 bias,
192 act_kernel,
193 act_grad_kernel,
194 bias is not None and bias.requires_grad,
195 )
196
197
198 class FusedDropoutBias(torch.nn.Module):
199 def __init__(
200 self,
201 p: float,
202 bias_shape: Optional[int],
203 activation: Optional[Activation] = None,
204 ) -> None:
205 super().__init__()
206
207 self.p = float(p)
208
209 assert (
210 self.p < 1.0
211 ), f"We don't want to drop all the values, most probably p={p} is not properly set"
212
213 self.activation_type = activation
214 self.bias = (
215 torch.zeros(bias_shape, requires_grad=True)
216 if bias_shape is not None
217 else None
218 )
219 self.activation = get_triton_activation_kernel(activation)
220 self.pytorch_activation = build_activation(self.activation_type)
221 self.activation_grad = get_triton_activation_bwd_kernel(activation)
222
223 def forward(self, x: torch.Tensor) -> torch.Tensor:
224 # Convenience, catch a possible type or device mismatch
225 if self.bias is not None:
226 self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore
227
228 # Train/inference
229 p = self.p if self.training else 0.0
230
231 # This kernel is slower than pytorch for small buffers, bypassing it in that case
232 perf_check = x.shape[-1] > 512
233
234 # Catch a non-cuda setup, fallback to pytorch
235 if not x.is_cuda or not perf_check or p == 0.0:
236 x = x + self.bias if self.bias is not None else x
237 x = self.pytorch_activation(x)
238 return torch.nn.functional.dropout(x, p) if p > 0.0 else x
239
240 # The normal, Triton-backed path
241 return _dropout.apply(
242 x, p, self.bias, self.activation, self.activation_grad, True
243 )
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xformers/triton/dropout.py b/xformers/triton/dropout.py
--- a/xformers/triton/dropout.py
+++ b/xformers/triton/dropout.py
@@ -7,7 +7,7 @@
# CREDITS: This comes almost as-is from the Triton dropout tutorial
# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py
-from typing import Optional
+from typing import Any, Optional
import torch
import triton
@@ -196,6 +196,11 @@
class FusedDropoutBias(torch.nn.Module):
+ """
+ A layer which fuses the computation of Dropout(Activation(x))
+ in a single GPU kernel
+ """
+
def __init__(
self,
p: float,
@@ -216,15 +221,24 @@
if bias_shape is not None
else None
)
- self.activation = get_triton_activation_kernel(activation)
- self.pytorch_activation = build_activation(self.activation_type)
- self.activation_grad = get_triton_activation_bwd_kernel(activation)
+
+ self.activation: Optional[Any] = None
+ self.activation_grad: Optional[Any] = None
+ self.activation_pytorch: Optional[Any] = None
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Convenience, catch a possible type or device mismatch
if self.bias is not None:
self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore
+ # Lazy init (helps with pickling)
+ if self.activation is None:
+ self.activation = get_triton_activation_kernel(self.activation_type)
+ self.pytorch_activation = build_activation(self.activation_type)
+ self.activation_grad = get_triton_activation_bwd_kernel(
+ self.activation_type
+ )
+
# Train/inference
p = self.p if self.training else 0.0
|
{"golden_diff": "diff --git a/xformers/triton/dropout.py b/xformers/triton/dropout.py\n--- a/xformers/triton/dropout.py\n+++ b/xformers/triton/dropout.py\n@@ -7,7 +7,7 @@\n # CREDITS: This comes almost as-is from the Triton dropout tutorial\n # https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py\n \n-from typing import Optional\n+from typing import Any, Optional\n \n import torch\n import triton\n@@ -196,6 +196,11 @@\n \n \n class FusedDropoutBias(torch.nn.Module):\n+ \"\"\"\n+ A layer which fuses the computation of Dropout(Activation(x))\n+ in a single GPU kernel\n+ \"\"\"\n+\n def __init__(\n self,\n p: float,\n@@ -216,15 +221,24 @@\n if bias_shape is not None\n else None\n )\n- self.activation = get_triton_activation_kernel(activation)\n- self.pytorch_activation = build_activation(self.activation_type)\n- self.activation_grad = get_triton_activation_bwd_kernel(activation)\n+\n+ self.activation: Optional[Any] = None\n+ self.activation_grad: Optional[Any] = None\n+ self.activation_pytorch: Optional[Any] = None\n \n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # Convenience, catch a possible type or device mismatch\n if self.bias is not None:\n self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore\n \n+ # Lazy init (helps with pickling)\n+ if self.activation is None:\n+ self.activation = get_triton_activation_kernel(self.activation_type)\n+ self.pytorch_activation = build_activation(self.activation_type)\n+ self.activation_grad = get_triton_activation_bwd_kernel(\n+ self.activation_type\n+ )\n+\n # Train/inference\n p = self.p if self.training else 0.0\n", "issue": "Can't deepcopy an xformer model with triton 2 update\n# \ud83d\udc1b Bug\r\n\r\nGiven the recent triton2 update an xformer model cannot be deep-copied. This is an important requirement for numerous tasks including EMA (without knowledge of the generating class / hyper-params).\r\n\r\n## xformers ViT-B Config\r\n\r\n```yaml\r\nreversible: False\r\nblock_type: \"encoder\"\r\nnum_layers: 12\r\ndim_model: 768\r\nlayer_norm_style: \"pre\"\r\n\r\nmulti_head_config:\r\n num_heads: 12\r\n residual_dropout: 0.1 # (1) tried without this, (2) swapping this for DropPath, (3) with regular dropout\r\n use_rotary_embeddings: False\r\n\r\n attention:\r\n name: \"scaled_dot_product\"\r\n dropout: 0.0\r\n causal: False\r\n\r\nfeedforward_config:\r\n name: \"MLP\"\r\n dropout: 0.0\r\n activation: \"gelu\"\r\n hidden_layer_multiplier: 4\r\n```\r\n\r\n## To reproduce\r\n\r\n```python\r\nfrom copy import deepcopy\r\n\r\nwith open(transfomer_config_file, \"rb\") as fileptr:\r\n self.model_config = yaml.load(fileptr, Loader=yaml.FullLoader)\r\n\r\nmodel = xFormer.from_config(xFormerConfig([self.model_config]))\r\ndeepcopy(model)\r\n```\r\n\r\nError is:\r\n```bash\r\nTypeError: cannot pickle 'PyCapsule' object\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This comes almost as-is from the Triton dropout tutorial\n# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py\n\nfrom typing import Optional\n\nimport torch\nimport triton\nfrom torch.cuda.amp import custom_bwd, custom_fwd\n\nfrom xformers.components.activations import Activation, build_activation\nfrom xformers.triton.k_activations import (\n get_triton_activation_bwd_kernel,\n get_triton_activation_kernel,\n)\nfrom xformers.triton.k_dropout import k_dropout_bw, k_dropout_fw\n\nGROUP_M = 32\nBLOCK_M = GROUP_M // 4\nBLOCK_N = 128\n\n\n# Helper to handle the SPMD launch grid and error cases\nclass _dropout(torch.autograd.Function):\n @staticmethod\n @custom_fwd(cast_inputs=torch.float16)\n def forward(ctx, x, p, bias, activation, activation_grad, trainable_bias):\n # Soft-flatten an hypothetical 3rd dimension\n x_ = x.reshape(-1, x.shape[-1]).contiguous()\n y = torch.empty_like(x_)\n M, N = x_.shape\n\n assert bias is None or (bias.dtype == x.dtype and bias.shape[0] == N)\n assert p > 0.0\n\n def grid(meta):\n # NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for\n # a given seed and offsets. \"BLOCK_M\" here describes the size of one of these blocks\n # but we need to take this factor of 4 into account when scheduling all the kernels\n return (\n triton.cdiv(M, meta[\"BLOCK_M\"] * 4),\n triton.cdiv(N, meta[\"BLOCK_N\"]),\n )\n\n N_BLOCK_N = triton.cdiv(N, BLOCK_N)\n\n # Generate one seed per sample\n # seed max is int32 max for positive numbers: 2**16\n seeds = torch.randint(65536, (N_BLOCK_N,), device=x.device).to(torch.int32)\n\n # fmt: off\n bias_ptr = bias if bias is not None else x_ # Possibly not being used\n\n k_dropout_fw[grid](\n y, x_,\n bias_ptr,\n seeds,\n y.stride(0),\n M, N,\n p,\n x.dtype == torch.float16,\n USE_BIAS=bias is not None,\n ACTIVATION=activation,\n BLOCK_M=BLOCK_M,\n BLOCK_N=BLOCK_N,\n )\n # fmt: on\n\n if activation is not None:\n ctx.save_for_backward(seeds, bias, x)\n else:\n ctx.save_for_backward(seeds, bias, None)\n\n ctx.trainable_bias = bias is not None and trainable_bias\n ctx.activation_grad = activation_grad\n ctx.p = p\n\n return y.reshape_as(x)\n\n @staticmethod\n @custom_bwd\n def backward(ctx, grad_out):\n (seeds, bias, inputs) = ctx.saved_tensors\n\n # Soft-flatten an hypothetical 3rd dimension\n grad_out_ = grad_out.reshape(-1, grad_out.shape[-1]).contiguous()\n grad_in = torch.empty_like(grad_out_)\n\n M, N = grad_out_.shape\n\n # Optional inputs to compute the activation contribution to the gradient\n assert inputs is not None or ctx.activation_grad is None\n\n if inputs is None:\n inputs = grad_out_\n elif inputs.ndim > 2:\n inputs = inputs.reshape(-1, N)\n\n # We split the problem in tiles:\n # - over M there will be a follow up reduction\n # - over M, we go by 4 tiles at at time (consequence of the random number generation)\n # - over N we compromise in between trying to use as much memory paralellism as possible,\n # (fill in the warps, there are 32 threads per warps, and 4 warps default), and not being too\n # big because of register spilling\n N_BLOCKS_M = triton.cdiv(M, GROUP_M)\n\n if ctx.trainable_bias:\n grad_bias = torch.empty(\n (\n N_BLOCKS_M,\n N,\n ),\n device=grad_in.device,\n dtype=grad_in.dtype,\n )\n\n else:\n grad_bias = grad_in # will not be used\n\n def grid(meta):\n # NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for\n # a given seed and offsets. \"BLOCK_M\" here describes the size of one of these blocks\n # but we need to take this factor of 4 into account when scheduling all the kernels\n return (\n triton.cdiv(M, meta[\"BLOCK_M\"] * 4),\n triton.cdiv(N, meta[\"BLOCK_N\"]),\n )\n\n # fmt: off\n k_dropout_bw[grid](\n grad_in, grad_bias, grad_out_,\n inputs, bias if bias is not None else inputs,\n seeds,\n grad_out_.stride(0), inputs.stride(0),\n M, N,\n ctx.p,\n grad_in.dtype == torch.float16,\n USE_BIAS=bias is not None,\n ACTIVATION_GRAD=ctx.activation_grad,\n TRAINABLE_BIAS=ctx.trainable_bias,\n BLOCK_M=BLOCK_M,\n BLOCK_N=BLOCK_N,\n )\n # fmt: on\n\n return (\n grad_in.reshape_as(grad_out),\n None,\n torch.sum(grad_bias, dim=0) if ctx.trainable_bias else None,\n None,\n None,\n None,\n )\n\n\ndef dropout(\n x: torch.Tensor,\n p: float,\n bias: Optional[torch.Tensor] = None,\n activation: Optional[Activation] = None,\n):\n \"\"\"\n Apply dropout on the input tensor.\n Optionally add a bias, the computation will be fused.\n \"\"\"\n\n assert p <= 1.0 and p >= 0.0\n\n if p == 1.0:\n return torch.zeros_like(x)\n\n # Micro optim, skip dropout\n if p == 0.0:\n x = x + bias if bias is not None else x\n if activation is not None:\n activation_fn = build_activation(activation)\n return activation_fn(x)\n return x\n\n # The normal triton enabled codepath\n act_kernel = get_triton_activation_kernel(activation)\n act_grad_kernel = get_triton_activation_bwd_kernel(activation)\n return _dropout.apply(\n x,\n float(p),\n bias,\n act_kernel,\n act_grad_kernel,\n bias is not None and bias.requires_grad,\n )\n\n\nclass FusedDropoutBias(torch.nn.Module):\n def __init__(\n self,\n p: float,\n bias_shape: Optional[int],\n activation: Optional[Activation] = None,\n ) -> None:\n super().__init__()\n\n self.p = float(p)\n\n assert (\n self.p < 1.0\n ), f\"We don't want to drop all the values, most probably p={p} is not properly set\"\n\n self.activation_type = activation\n self.bias = (\n torch.zeros(bias_shape, requires_grad=True)\n if bias_shape is not None\n else None\n )\n self.activation = get_triton_activation_kernel(activation)\n self.pytorch_activation = build_activation(self.activation_type)\n self.activation_grad = get_triton_activation_bwd_kernel(activation)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # Convenience, catch a possible type or device mismatch\n if self.bias is not None:\n self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore\n\n # Train/inference\n p = self.p if self.training else 0.0\n\n # This kernel is slower than pytorch for small buffers, bypassing it in that case\n perf_check = x.shape[-1] > 512\n\n # Catch a non-cuda setup, fallback to pytorch\n if not x.is_cuda or not perf_check or p == 0.0:\n x = x + self.bias if self.bias is not None else x\n x = self.pytorch_activation(x)\n return torch.nn.functional.dropout(x, p) if p > 0.0 else x\n\n # The normal, Triton-backed path\n return _dropout.apply(\n x, p, self.bias, self.activation, self.activation_grad, True\n )\n", "path": "xformers/triton/dropout.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# CREDITS: This comes almost as-is from the Triton dropout tutorial\n# https://raw.githubusercontent.com/openai/triton/master/python/tutorials/04-low-memory-dropout.py\n\nfrom typing import Any, Optional\n\nimport torch\nimport triton\nfrom torch.cuda.amp import custom_bwd, custom_fwd\n\nfrom xformers.components.activations import Activation, build_activation\nfrom xformers.triton.k_activations import (\n get_triton_activation_bwd_kernel,\n get_triton_activation_kernel,\n)\nfrom xformers.triton.k_dropout import k_dropout_bw, k_dropout_fw\n\nGROUP_M = 32\nBLOCK_M = GROUP_M // 4\nBLOCK_N = 128\n\n\n# Helper to handle the SPMD launch grid and error cases\nclass _dropout(torch.autograd.Function):\n @staticmethod\n @custom_fwd(cast_inputs=torch.float16)\n def forward(ctx, x, p, bias, activation, activation_grad, trainable_bias):\n # Soft-flatten an hypothetical 3rd dimension\n x_ = x.reshape(-1, x.shape[-1]).contiguous()\n y = torch.empty_like(x_)\n M, N = x_.shape\n\n assert bias is None or (bias.dtype == x.dtype and bias.shape[0] == N)\n assert p > 0.0\n\n def grid(meta):\n # NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for\n # a given seed and offsets. \"BLOCK_M\" here describes the size of one of these blocks\n # but we need to take this factor of 4 into account when scheduling all the kernels\n return (\n triton.cdiv(M, meta[\"BLOCK_M\"] * 4),\n triton.cdiv(N, meta[\"BLOCK_N\"]),\n )\n\n N_BLOCK_N = triton.cdiv(N, BLOCK_N)\n\n # Generate one seed per sample\n # seed max is int32 max for positive numbers: 2**16\n seeds = torch.randint(65536, (N_BLOCK_N,), device=x.device).to(torch.int32)\n\n # fmt: off\n bias_ptr = bias if bias is not None else x_ # Possibly not being used\n\n k_dropout_fw[grid](\n y, x_,\n bias_ptr,\n seeds,\n y.stride(0),\n M, N,\n p,\n x.dtype == torch.float16,\n USE_BIAS=bias is not None,\n ACTIVATION=activation,\n BLOCK_M=BLOCK_M,\n BLOCK_N=BLOCK_N,\n )\n # fmt: on\n\n if activation is not None:\n ctx.save_for_backward(seeds, bias, x)\n else:\n ctx.save_for_backward(seeds, bias, None)\n\n ctx.trainable_bias = bias is not None and trainable_bias\n ctx.activation_grad = activation_grad\n ctx.p = p\n\n return y.reshape_as(x)\n\n @staticmethod\n @custom_bwd\n def backward(ctx, grad_out):\n (seeds, bias, inputs) = ctx.saved_tensors\n\n # Soft-flatten an hypothetical 3rd dimension\n grad_out_ = grad_out.reshape(-1, grad_out.shape[-1]).contiguous()\n grad_in = torch.empty_like(grad_out_)\n\n M, N = grad_out_.shape\n\n # Optional inputs to compute the activation contribution to the gradient\n assert inputs is not None or ctx.activation_grad is None\n\n if inputs is None:\n inputs = grad_out_\n elif inputs.ndim > 2:\n inputs = inputs.reshape(-1, N)\n\n # We split the problem in tiles:\n # - over M there will be a follow up reduction\n # - over M, we go by 4 tiles at at time (consequence of the random number generation)\n # - over N we compromise in between trying to use as much memory paralellism as possible,\n # (fill in the warps, there are 32 threads per warps, and 4 warps default), and not being too\n # big because of register spilling\n N_BLOCKS_M = triton.cdiv(M, GROUP_M)\n\n if ctx.trainable_bias:\n grad_bias = torch.empty(\n (\n N_BLOCKS_M,\n N,\n ),\n device=grad_in.device,\n dtype=grad_in.dtype,\n )\n\n else:\n grad_bias = grad_in # will not be used\n\n def grid(meta):\n # NOTE: We use Triton Philox random number generator, which optimally generates 4 blocks for\n # a given seed and offsets. \"BLOCK_M\" here describes the size of one of these blocks\n # but we need to take this factor of 4 into account when scheduling all the kernels\n return (\n triton.cdiv(M, meta[\"BLOCK_M\"] * 4),\n triton.cdiv(N, meta[\"BLOCK_N\"]),\n )\n\n # fmt: off\n k_dropout_bw[grid](\n grad_in, grad_bias, grad_out_,\n inputs, bias if bias is not None else inputs,\n seeds,\n grad_out_.stride(0), inputs.stride(0),\n M, N,\n ctx.p,\n grad_in.dtype == torch.float16,\n USE_BIAS=bias is not None,\n ACTIVATION_GRAD=ctx.activation_grad,\n TRAINABLE_BIAS=ctx.trainable_bias,\n BLOCK_M=BLOCK_M,\n BLOCK_N=BLOCK_N,\n )\n # fmt: on\n\n return (\n grad_in.reshape_as(grad_out),\n None,\n torch.sum(grad_bias, dim=0) if ctx.trainable_bias else None,\n None,\n None,\n None,\n )\n\n\ndef dropout(\n x: torch.Tensor,\n p: float,\n bias: Optional[torch.Tensor] = None,\n activation: Optional[Activation] = None,\n):\n \"\"\"\n Apply dropout on the input tensor.\n Optionally add a bias, the computation will be fused.\n \"\"\"\n\n assert p <= 1.0 and p >= 0.0\n\n if p == 1.0:\n return torch.zeros_like(x)\n\n # Micro optim, skip dropout\n if p == 0.0:\n x = x + bias if bias is not None else x\n if activation is not None:\n activation_fn = build_activation(activation)\n return activation_fn(x)\n return x\n\n # The normal triton enabled codepath\n act_kernel = get_triton_activation_kernel(activation)\n act_grad_kernel = get_triton_activation_bwd_kernel(activation)\n return _dropout.apply(\n x,\n float(p),\n bias,\n act_kernel,\n act_grad_kernel,\n bias is not None and bias.requires_grad,\n )\n\n\nclass FusedDropoutBias(torch.nn.Module):\n \"\"\"\n A layer which fuses the computation of Dropout(Activation(x))\n in a single GPU kernel\n \"\"\"\n\n def __init__(\n self,\n p: float,\n bias_shape: Optional[int],\n activation: Optional[Activation] = None,\n ) -> None:\n super().__init__()\n\n self.p = float(p)\n\n assert (\n self.p < 1.0\n ), f\"We don't want to drop all the values, most probably p={p} is not properly set\"\n\n self.activation_type = activation\n self.bias = (\n torch.zeros(bias_shape, requires_grad=True)\n if bias_shape is not None\n else None\n )\n\n self.activation: Optional[Any] = None\n self.activation_grad: Optional[Any] = None\n self.activation_pytorch: Optional[Any] = None\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # Convenience, catch a possible type or device mismatch\n if self.bias is not None:\n self.bias = self.bias.to(dtype=x.dtype, device=x.device) # type: ignore\n\n # Lazy init (helps with pickling)\n if self.activation is None:\n self.activation = get_triton_activation_kernel(self.activation_type)\n self.pytorch_activation = build_activation(self.activation_type)\n self.activation_grad = get_triton_activation_bwd_kernel(\n self.activation_type\n )\n\n # Train/inference\n p = self.p if self.training else 0.0\n\n # This kernel is slower than pytorch for small buffers, bypassing it in that case\n perf_check = x.shape[-1] > 512\n\n # Catch a non-cuda setup, fallback to pytorch\n if not x.is_cuda or not perf_check or p == 0.0:\n x = x + self.bias if self.bias is not None else x\n x = self.pytorch_activation(x)\n return torch.nn.functional.dropout(x, p) if p > 0.0 else x\n\n # The normal, Triton-backed path\n return _dropout.apply(\n x, p, self.bias, self.activation, self.activation_grad, True\n )\n", "path": "xformers/triton/dropout.py"}]}
| 3,107 | 442 |
gh_patches_debug_8432
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-6423
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Use of pythonw on macos conda/mamba environments breaks app
### PsychoPy Version
2024.1.3
### What OS are your PsychoPy running on?
macOS Silicon
### Bug Description
When attempting to run the psychopy GUI app following an install in a fresh conda environment:
```
conda create -n pyschopy python=3.10
pip install pyschopy
psychopy
```
Psychopy fails to start.
### Expected Behaviour
Psychopy should not need to call pythonw for versions of python >= 3.9, and the if statement in `psychopyApp.py` should be modified to reflect that probably (or python < 3.9 support dropped entirely for newer versions of the package).
### Steps to Reproduce
After installing as above the following error is returned:
```
Traceback (most recent call last):
File "/Users/MYUSERNAME/mambaforge/envs/intermod/bin/psychopy", line 8, in <module>
sys.exit(main())
File "/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/site-packages/psychopy/app/psychopyApp.py", line 90, in main
stdout, stderr = core.shellCall(cmd,
File "/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/site-packages/psychopy/core.py", line 153, in shellCall
proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,
File "/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/subprocess.py", line 971, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/subprocess.py", line 1863, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: '/Users/MYUSERNAME/mambaforge/envs/intermod/bin/python3.10w'
```
This error happens because, according to `psychopyApp.py`, we need to call `pythonw` when running GUI scripts on MacOS in an Anaconda-based environment. However this is an outdated method as of Python 3.9, and [from that version onwards you can now directly call the python binary regardless](https://docs.python.org/3/using/mac.html#running-scripts-with-a-gui).
In a fresh Python 3.10 installation via conda/mamba the pythonw binary does not exist in the binaries folder for the environment. I have for the moment fixed this by simply symlinking the base python binary to `python3.10w` which psychopy expects, which then allows the app to start.
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/psychopyApp.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # Part of the PsychoPy library
5 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.
6 # Distributed under the terms of the GNU General Public License (GPL).
7
8 import sys
9
10 # fix macOS locale-bug on startup: sets locale to LC_ALL (must be defined!)
11 import psychopy.locale_setup # noqa
12
13
14 # NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00
15 # to allow for better upgrading possibilities from the mac app bundle. this
16 # file now used solely as a launcher for the app, not as the app itself.
17
18
19 def start_app():
20 from psychopy.app import startApp, quitApp
21 from psychopy.preferences import prefs
22
23 showSplash = prefs.app['showSplash']
24 if '--no-splash' in sys.argv:
25 showSplash = False
26 del sys.argv[sys.argv.index('--no-splash')]
27 _ = startApp(showSplash=showSplash) # main loop
28 quitApp()
29
30
31 def main():
32 if '-x' in sys.argv:
33 # run a .py script from the command line using StandAlone python
34 targetScript = sys.argv[sys.argv.index('-x') + 1]
35 from psychopy import core
36 import os
37 core.shellCall([sys.executable, os.path.abspath(targetScript)])
38 sys.exit()
39 if '-v' in sys.argv or '--version' in sys.argv:
40 from psychopy import __version__
41 msg = ('PsychoPy3, version %s (c)Jonathan Peirce 2018, GNU GPL license'
42 % __version__)
43 print(msg)
44 sys.exit()
45 if '-h' in sys.argv or '--help' in sys.argv:
46 print("""Starts the PsychoPy3 application.
47
48 Usage: python PsychoPy.py [options] [file]
49
50 Without options or files provided this starts PsychoPy using prefs to
51 decide on the view(s) to open. If optional [file] is provided action
52 depends on the type of the [file]:
53
54 Python script 'file.py' -- opens coder
55
56 Experiment design 'file.psyexp' -- opens builder
57
58 Options:
59 -c, --coder, coder opens coder view only
60 -b, --builder, builder opens builder view only
61 -x script.py execute script.py using StandAlone python
62
63 -v, --version prints version and exits
64 -h, --help prints this help and exit
65
66 --firstrun launches configuration wizard
67 --no-splash suppresses splash screen
68
69 """)
70 sys.exit()
71
72 if (sys.platform == 'darwin' and
73 ('| packaged by conda-forge |' in sys.version or
74 '|Anaconda' in sys.version)):
75
76 # On macOS with Anaconda, GUI applications need to be run using
77 # `pythonw`. Since we have no way to determine whether this is currently
78 # the case, we run this script again -- ensuring we're definitely using
79 # pythonw.
80 import os
81 env = os.environ
82 PYTHONW = env.get('PYTHONW', 'False')
83
84 if PYTHONW != 'True':
85 from psychopy import core
86 cmd = [sys.executable + 'w', __file__]
87 if '--no-splash' in sys.argv:
88 cmd.append('--no-splash')
89
90 stdout, stderr = core.shellCall(cmd,
91 env=dict(env, PYTHONW='True'),
92 stderr=True)
93 print(stdout, file=sys.stdout)
94 print(stderr, file=sys.stderr)
95 sys.exit()
96 else:
97 start_app()
98 else:
99 start_app()
100
101
102 if __name__ == '__main__':
103 main()
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/psychopy/app/psychopyApp.py b/psychopy/app/psychopyApp.py
--- a/psychopy/app/psychopyApp.py
+++ b/psychopy/app/psychopyApp.py
@@ -69,9 +69,8 @@
""")
sys.exit()
- if (sys.platform == 'darwin' and
- ('| packaged by conda-forge |' in sys.version or
- '|Anaconda' in sys.version)):
+ if (('| packaged by conda-forge |' in sys.version or '|Anaconda' in sys.version)
+ and sys.platform == 'darwin' and sys.version_info >= (3,9)):
# On macOS with Anaconda, GUI applications need to be run using
# `pythonw`. Since we have no way to determine whether this is currently
|
{"golden_diff": "diff --git a/psychopy/app/psychopyApp.py b/psychopy/app/psychopyApp.py\n--- a/psychopy/app/psychopyApp.py\n+++ b/psychopy/app/psychopyApp.py\n@@ -69,9 +69,8 @@\n \"\"\")\n sys.exit()\n \n- if (sys.platform == 'darwin' and\n- ('| packaged by conda-forge |' in sys.version or\n- '|Anaconda' in sys.version)):\n+ if (('| packaged by conda-forge |' in sys.version or '|Anaconda' in sys.version)\n+ and sys.platform == 'darwin' and sys.version_info >= (3,9)):\n \n # On macOS with Anaconda, GUI applications need to be run using\n # `pythonw`. Since we have no way to determine whether this is currently\n", "issue": "[Bug]: Use of pythonw on macos conda/mamba environments breaks app\n### PsychoPy Version\r\n\r\n2024.1.3\r\n\r\n### What OS are your PsychoPy running on?\r\n\r\nmacOS Silicon\r\n\r\n### Bug Description\r\n\r\nWhen attempting to run the psychopy GUI app following an install in a fresh conda environment:\r\n\r\n```\r\nconda create -n pyschopy python=3.10\r\npip install pyschopy\r\npsychopy\r\n```\r\nPsychopy fails to start.\r\n\r\n\r\n### Expected Behaviour\r\n\r\nPsychopy should not need to call pythonw for versions of python >= 3.9, and the if statement in `psychopyApp.py` should be modified to reflect that probably (or python < 3.9 support dropped entirely for newer versions of the package).\r\n\r\n### Steps to Reproduce\r\n\r\nAfter installing as above the following error is returned:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/MYUSERNAME/mambaforge/envs/intermod/bin/psychopy\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/site-packages/psychopy/app/psychopyApp.py\", line 90, in main\r\n stdout, stderr = core.shellCall(cmd,\r\n File \"/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/site-packages/psychopy/core.py\", line 153, in shellCall\r\n proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE,\r\n File \"/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/subprocess.py\", line 971, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/Users/MYUSERNAME/mambaforge/envs/intermod/lib/python3.10/subprocess.py\", line 1863, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/Users/MYUSERNAME/mambaforge/envs/intermod/bin/python3.10w'\r\n```\r\n\r\nThis error happens because, according to `psychopyApp.py`, we need to call `pythonw` when running GUI scripts on MacOS in an Anaconda-based environment. However this is an outdated method as of Python 3.9, and [from that version onwards you can now directly call the python binary regardless](https://docs.python.org/3/using/mac.html#running-scripts-with-a-gui). \r\n\r\nIn a fresh Python 3.10 installation via conda/mamba the pythonw binary does not exist in the binaries folder for the environment. I have for the moment fixed this by simply symlinking the base python binary to `python3.10w` which psychopy expects, which then allows the app to start.\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport sys\n\n# fix macOS locale-bug on startup: sets locale to LC_ALL (must be defined!)\nimport psychopy.locale_setup # noqa\n\n\n# NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00\n# to allow for better upgrading possibilities from the mac app bundle. this\n# file now used solely as a launcher for the app, not as the app itself.\n\n\ndef start_app():\n from psychopy.app import startApp, quitApp\n from psychopy.preferences import prefs\n\n showSplash = prefs.app['showSplash']\n if '--no-splash' in sys.argv:\n showSplash = False\n del sys.argv[sys.argv.index('--no-splash')]\n _ = startApp(showSplash=showSplash) # main loop\n quitApp()\n\n\ndef main():\n if '-x' in sys.argv:\n # run a .py script from the command line using StandAlone python\n targetScript = sys.argv[sys.argv.index('-x') + 1]\n from psychopy import core\n import os\n core.shellCall([sys.executable, os.path.abspath(targetScript)])\n sys.exit()\n if '-v' in sys.argv or '--version' in sys.argv:\n from psychopy import __version__\n msg = ('PsychoPy3, version %s (c)Jonathan Peirce 2018, GNU GPL license'\n % __version__)\n print(msg)\n sys.exit()\n if '-h' in sys.argv or '--help' in sys.argv:\n print(\"\"\"Starts the PsychoPy3 application.\n\nUsage: python PsychoPy.py [options] [file]\n\nWithout options or files provided this starts PsychoPy using prefs to\ndecide on the view(s) to open. If optional [file] is provided action\ndepends on the type of the [file]:\n\n Python script 'file.py' -- opens coder\n\n Experiment design 'file.psyexp' -- opens builder\n\nOptions:\n -c, --coder, coder opens coder view only\n -b, --builder, builder opens builder view only\n -x script.py execute script.py using StandAlone python\n\n -v, --version prints version and exits\n -h, --help prints this help and exit\n\n --firstrun launches configuration wizard\n --no-splash suppresses splash screen\n\n\"\"\")\n sys.exit()\n\n if (sys.platform == 'darwin' and\n ('| packaged by conda-forge |' in sys.version or\n '|Anaconda' in sys.version)):\n\n # On macOS with Anaconda, GUI applications need to be run using\n # `pythonw`. Since we have no way to determine whether this is currently\n # the case, we run this script again -- ensuring we're definitely using\n # pythonw.\n import os\n env = os.environ\n PYTHONW = env.get('PYTHONW', 'False')\n\n if PYTHONW != 'True':\n from psychopy import core\n cmd = [sys.executable + 'w', __file__]\n if '--no-splash' in sys.argv:\n cmd.append('--no-splash')\n\n stdout, stderr = core.shellCall(cmd,\n env=dict(env, PYTHONW='True'),\n stderr=True)\n print(stdout, file=sys.stdout)\n print(stderr, file=sys.stderr)\n sys.exit()\n else:\n start_app()\n else:\n start_app()\n\n\nif __name__ == '__main__':\n main()\n", "path": "psychopy/app/psychopyApp.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2024 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport sys\n\n# fix macOS locale-bug on startup: sets locale to LC_ALL (must be defined!)\nimport psychopy.locale_setup # noqa\n\n\n# NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00\n# to allow for better upgrading possibilities from the mac app bundle. this\n# file now used solely as a launcher for the app, not as the app itself.\n\n\ndef start_app():\n from psychopy.app import startApp, quitApp\n from psychopy.preferences import prefs\n\n showSplash = prefs.app['showSplash']\n if '--no-splash' in sys.argv:\n showSplash = False\n del sys.argv[sys.argv.index('--no-splash')]\n _ = startApp(showSplash=showSplash) # main loop\n quitApp()\n\n\ndef main():\n if '-x' in sys.argv:\n # run a .py script from the command line using StandAlone python\n targetScript = sys.argv[sys.argv.index('-x') + 1]\n from psychopy import core\n import os\n core.shellCall([sys.executable, os.path.abspath(targetScript)])\n sys.exit()\n if '-v' in sys.argv or '--version' in sys.argv:\n from psychopy import __version__\n msg = ('PsychoPy3, version %s (c)Jonathan Peirce 2018, GNU GPL license'\n % __version__)\n print(msg)\n sys.exit()\n if '-h' in sys.argv or '--help' in sys.argv:\n print(\"\"\"Starts the PsychoPy3 application.\n\nUsage: python PsychoPy.py [options] [file]\n\nWithout options or files provided this starts PsychoPy using prefs to\ndecide on the view(s) to open. If optional [file] is provided action\ndepends on the type of the [file]:\n\n Python script 'file.py' -- opens coder\n\n Experiment design 'file.psyexp' -- opens builder\n\nOptions:\n -c, --coder, coder opens coder view only\n -b, --builder, builder opens builder view only\n -x script.py execute script.py using StandAlone python\n\n -v, --version prints version and exits\n -h, --help prints this help and exit\n\n --firstrun launches configuration wizard\n --no-splash suppresses splash screen\n\n\"\"\")\n sys.exit()\n\n if (('| packaged by conda-forge |' in sys.version or '|Anaconda' in sys.version)\n and sys.platform == 'darwin' and sys.version_info >= (3,9)):\n\n # On macOS with Anaconda, GUI applications need to be run using\n # `pythonw`. Since we have no way to determine whether this is currently\n # the case, we run this script again -- ensuring we're definitely using\n # pythonw.\n import os\n env = os.environ\n PYTHONW = env.get('PYTHONW', 'False')\n\n if PYTHONW != 'True':\n from psychopy import core\n cmd = [sys.executable + 'w', __file__]\n if '--no-splash' in sys.argv:\n cmd.append('--no-splash')\n\n stdout, stderr = core.shellCall(cmd,\n env=dict(env, PYTHONW='True'),\n stderr=True)\n print(stdout, file=sys.stdout)\n print(stderr, file=sys.stderr)\n sys.exit()\n else:\n start_app()\n else:\n start_app()\n\n\nif __name__ == '__main__':\n main()\n", "path": "psychopy/app/psychopyApp.py"}]}
| 1,940 | 185 |
gh_patches_debug_18883
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-5
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Maps not properly sized in some cases
Some users get


> pognal owgeomap.py file: pokaze svet raztegnjen cez celo sirino okna (visina ni cela), ce zamenjam na europe dobim majhno slikico, po resize-u okna se malo izboljsa (slika 2).
Qt\* version likely not culprit.
cc: @BlazZupan @lanzagar
Maps not properly sized in some cases
Some users get


> pognal owgeomap.py file: pokaze svet raztegnjen cez celo sirino okna (visina ni cela), ce zamenjam na europe dobim majhno slikico, po resize-u okna se malo izboljsa (slika 2).
Qt\* version likely not culprit.
cc: @BlazZupan @lanzagar
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/widgets/owgeomap.py`
Content:
```
1 # coding: utf-8
2 from collections import OrderedDict
3 from os import path
4 from math import pi as PI
5
6 import numpy as np
7
8 from PyQt4 import QtCore, QtGui
9
10 from Orange.widgets import widget, gui, settings
11 from Orange.data import Table
12 from orangecontrib.text.country_codes import \
13 CC_EUROPE, INV_CC_EUROPE, SET_CC_EUROPE, \
14 CC_WORLD, INV_CC_WORLD, \
15 CC_USA, INV_CC_USA, SET_CC_USA
16
17
18 class Map:
19 WORLD = 'world_mill_en'
20 EUROPE = 'europe_mill_en'
21 USA = 'us_aea_en'
22 all = (('World', WORLD),
23 ('Europe', EUROPE),
24 ('USA', USA))
25
26
27 class OWGeoMap(widget.OWWidget):
28 name = "GeoMap"
29 priority = 20000
30 icon = "icons/GeoMap.svg"
31 inputs = [("Data", Table, "on_data")]
32 outputs = [('Data', Table)]
33
34 want_main_area = False
35
36 selected_attr = settings.Setting(0)
37 selected_map = settings.Setting(0)
38 regions = settings.Setting([])
39
40 def __init__(self):
41 super().__init__()
42 self._create_layout()
43
44 @QtCore.pyqtSlot(str, result=str)
45 def region_selected(self, regions):
46 """Called from JavaScript"""
47 if not regions:
48 return self.send('Data', None)
49 self.regions = regions.split(',')
50 from Orange.data.filter import FilterStringList
51 # TODO, FIXME: make this work for discrete attrs also
52 filter = FilterStringList(self.metas[self.selected_attr], self.regions)
53 self.send('Data', self.data._filter_values(filter))
54
55 def _create_layout(self):
56 box = gui.widgetBox(self.controlArea,
57 orientation='horizontal')
58 self.attr_combo = gui.comboBox(box, self, 'selected_attr',
59 orientation='horizontal',
60 label='Region attribute:',
61 callback=self.on_attr_change)
62 self.map_combo = gui.comboBox(box, self, 'selected_map',
63 orientation='horizontal',
64 label='Map type:',
65 callback=self.on_map_change,
66 items=Map.all)
67 hexpand = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
68 QtGui.QSizePolicy.Fixed)
69 self.attr_combo.setSizePolicy(hexpand)
70 self.map_combo.setSizePolicy(hexpand)
71 html = '''
72 <!DOCTYPE html>
73 <html>
74 <head>
75 <meta charset="utf-8">
76 <style>
77 html, body, #map {margin:0px;padding:0px;width:100%;height:100%;}
78 </style>
79 <link href="resources/jquery-jvectormap-2.0.2.css" rel="stylesheet">
80 </head>
81 <body>
82 <div id="map"></div>
83 </body>
84 </html>'''
85 self.webview = gui.WebviewWidget(self.controlArea, self, debug=True)
86 self.webview.setHtml(html, 'file://{}/'.format(path.dirname(__file__)))
87 for script in ('jquery-2.1.4.min.js',
88 'jquery-jvectormap-2.0.2.min.js',
89 'jquery-jvectormap-world-mill-en.js',
90 'jquery-jvectormap-europe-mill-en.js',
91 'jquery-jvectormap-us-aea-en.js',
92 'geomap-script.js'):
93 self.webview.evalJS(open(path.join(path.dirname(__file__), 'resources', script), encoding='utf-8').read())
94 self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,
95 Map.EUROPE: CC_EUROPE,
96 Map.USA: CC_USA}))
97
98 def _repopulate_attr_combo(self, data):
99 from itertools import chain
100 self.metas = [a for a in chain(data.domain.metas,
101 data.domain.attributes,
102 data.domain.class_vars)
103 # Filter string variables
104 if (a.is_discrete and a.values and isinstance(a.values[0], str) and not a.ordered or
105 a.is_string)] if data else []
106 self.attr_combo.clear()
107 self.selected_attr = 0
108 for i, var in enumerate(self.metas):
109 self.attr_combo.addItem(gui.attributeIconDict[var], var.name)
110 # Select default attribute
111 if var.name.lower() == 'country':
112 self.selected_attr = i
113 if self.metas:
114 self.attr_combo.setCurrentIndex(self.attr_combo.findText(self.metas[self.selected_attr].name))
115
116 def on_data(self, data):
117 self.data = data
118 self._repopulate_attr_combo(data)
119 if not data:
120 self.region_selected('')
121 self.webview.evalJS('DATA = {}; renderMap();')
122 else:
123 self.on_attr_change()
124
125
126 def on_map_change(self, map_code=''):
127 if map_code:
128 self.map_combo.setCurrentIndex(self.map_combo.findData(map_code))
129 else:
130 map_code = map_code or self.map_combo.itemData(self.selected_map)
131 self.webview.evalJS('MAP_CODE = "{}";'.format(map_code))
132 self.webview.evalJS('SELECTED_REGIONS = {};'.format(self.regions))
133 self.webview.evalJS('renderMap();')
134
135 def on_attr_change(self):
136 attr = self.metas[self.selected_attr]
137 from collections import Counter, defaultdict
138 counts = Counter(self.data.get_column_view(self.data.domain.index(attr))[0])
139 # Auto-select region map
140 values = set(counts)
141 if 0 == len(values - SET_CC_USA):
142 cc_map, inv_cc_map, map_code = CC_USA, INV_CC_USA, Map.USA
143 elif 0 == len(values - SET_CC_EUROPE):
144 cc_map, inv_cc_map, map_code = CC_EUROPE, INV_CC_EUROPE, Map.EUROPE
145 else:
146 cc_map, inv_cc_map, map_code = CC_WORLD, INV_CC_WORLD, Map.WORLD
147 data = defaultdict(int)
148 for cc in counts:
149 data[inv_cc_map.get(cc, cc)] += counts[cc]
150 self.webview.evalJS('DATA = {};'.format(dict(data)))
151 self.on_map_change(map_code)
152
153
154 def main():
155 from Orange.data import Table, Domain, ContinuousVariable, StringVariable
156
157 words = np.column_stack([
158 'Slovenia Slovenia SVN USA Iraq Iraq Iraq Iraq France FR'.split(),
159 'Slovenia Slovenia SVN France FR Austria NL GB GB GB'.split(),
160 'Alabama AL Texas TX TX TX MS Montana US-MT MT'.split(),
161 ])
162 metas = [
163 StringVariable('World'),
164 StringVariable('Europe'),
165 StringVariable('USA'),
166 ]
167 domain = Domain([], metas=metas)
168 table = Table.from_numpy(domain,
169 X=np.zeros((len(words), 0)),
170 metas=words)
171 app = QtGui.QApplication([''])
172 w = OWGeoMap()
173 w.on_data(table)
174 w.show()
175 app.exec()
176
177
178 if __name__ == "__main__":
179 main()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/orangecontrib/text/widgets/owgeomap.py b/orangecontrib/text/widgets/owgeomap.py
--- a/orangecontrib/text/widgets/owgeomap.py
+++ b/orangecontrib/text/widgets/owgeomap.py
@@ -73,17 +73,18 @@
<html>
<head>
<meta charset="utf-8">
+<base href="file://{}/"/>
<style>
-html, body, #map {margin:0px;padding:0px;width:100%;height:100%;}
+html, body, #map {{margin:0px;padding:0px;width:100%;height:100%;}}
</style>
<link href="resources/jquery-jvectormap-2.0.2.css" rel="stylesheet">
</head>
<body>
<div id="map"></div>
</body>
-</html>'''
+</html>'''.format(path.abspath(path.dirname(__file__)))
self.webview = gui.WebviewWidget(self.controlArea, self, debug=True)
- self.webview.setHtml(html, 'file://{}/'.format(path.dirname(__file__)))
+ self.webview.setHtml(html)
for script in ('jquery-2.1.4.min.js',
'jquery-jvectormap-2.0.2.min.js',
'jquery-jvectormap-world-mill-en.js',
|
{"golden_diff": "diff --git a/orangecontrib/text/widgets/owgeomap.py b/orangecontrib/text/widgets/owgeomap.py\n--- a/orangecontrib/text/widgets/owgeomap.py\n+++ b/orangecontrib/text/widgets/owgeomap.py\n@@ -73,17 +73,18 @@\n <html>\n <head>\n <meta charset=\"utf-8\">\n+<base href=\"file://{}/\"/>\n <style>\n-html, body, #map {margin:0px;padding:0px;width:100%;height:100%;}\n+html, body, #map {{margin:0px;padding:0px;width:100%;height:100%;}}\n </style>\n <link href=\"resources/jquery-jvectormap-2.0.2.css\" rel=\"stylesheet\">\n </head>\n <body>\n <div id=\"map\"></div>\n </body>\n-</html>'''\n+</html>'''.format(path.abspath(path.dirname(__file__)))\n self.webview = gui.WebviewWidget(self.controlArea, self, debug=True)\n- self.webview.setHtml(html, 'file://{}/'.format(path.dirname(__file__)))\n+ self.webview.setHtml(html)\n for script in ('jquery-2.1.4.min.js',\n 'jquery-jvectormap-2.0.2.min.js',\n 'jquery-jvectormap-world-mill-en.js',\n", "issue": "Maps not properly sized in some cases\nSome users get\n\n\n\n\n> pognal owgeomap.py file: pokaze svet raztegnjen cez celo sirino okna (visina ni cela), ce zamenjam na europe dobim majhno slikico, po resize-u okna se malo izboljsa (slika 2).\n\nQt\\* version likely not culprit.\n\ncc: @BlazZupan @lanzagar\n\nMaps not properly sized in some cases\nSome users get\n\n\n\n\n> pognal owgeomap.py file: pokaze svet raztegnjen cez celo sirino okna (visina ni cela), ce zamenjam na europe dobim majhno slikico, po resize-u okna se malo izboljsa (slika 2).\n\nQt\\* version likely not culprit.\n\ncc: @BlazZupan @lanzagar\n\n", "before_files": [{"content": "# coding: utf-8\nfrom collections import OrderedDict\nfrom os import path\nfrom math import pi as PI\n\nimport numpy as np\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.data import Table\nfrom orangecontrib.text.country_codes import \\\n CC_EUROPE, INV_CC_EUROPE, SET_CC_EUROPE, \\\n CC_WORLD, INV_CC_WORLD, \\\n CC_USA, INV_CC_USA, SET_CC_USA\n\n\nclass Map:\n WORLD = 'world_mill_en'\n EUROPE = 'europe_mill_en'\n USA = 'us_aea_en'\n all = (('World', WORLD),\n ('Europe', EUROPE),\n ('USA', USA))\n\n\nclass OWGeoMap(widget.OWWidget):\n name = \"GeoMap\"\n priority = 20000\n icon = \"icons/GeoMap.svg\"\n inputs = [(\"Data\", Table, \"on_data\")]\n outputs = [('Data', Table)]\n\n want_main_area = False\n\n selected_attr = settings.Setting(0)\n selected_map = settings.Setting(0)\n regions = settings.Setting([])\n\n def __init__(self):\n super().__init__()\n self._create_layout()\n\n @QtCore.pyqtSlot(str, result=str)\n def region_selected(self, regions):\n \"\"\"Called from JavaScript\"\"\"\n if not regions:\n return self.send('Data', None)\n self.regions = regions.split(',')\n from Orange.data.filter import FilterStringList\n # TODO, FIXME: make this work for discrete attrs also\n filter = FilterStringList(self.metas[self.selected_attr], self.regions)\n self.send('Data', self.data._filter_values(filter))\n\n def _create_layout(self):\n box = gui.widgetBox(self.controlArea,\n orientation='horizontal')\n self.attr_combo = gui.comboBox(box, self, 'selected_attr',\n orientation='horizontal',\n label='Region attribute:',\n callback=self.on_attr_change)\n self.map_combo = gui.comboBox(box, self, 'selected_map',\n orientation='horizontal',\n label='Map type:',\n callback=self.on_map_change,\n items=Map.all)\n hexpand = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Fixed)\n self.attr_combo.setSizePolicy(hexpand)\n self.map_combo.setSizePolicy(hexpand)\n html = '''\n<!DOCTYPE html>\n<html>\n<head>\n<meta charset=\"utf-8\">\n<style>\nhtml, body, #map {margin:0px;padding:0px;width:100%;height:100%;}\n</style>\n<link href=\"resources/jquery-jvectormap-2.0.2.css\" rel=\"stylesheet\">\n</head>\n<body>\n<div id=\"map\"></div>\n</body>\n</html>'''\n self.webview = gui.WebviewWidget(self.controlArea, self, debug=True)\n self.webview.setHtml(html, 'file://{}/'.format(path.dirname(__file__)))\n for script in ('jquery-2.1.4.min.js',\n 'jquery-jvectormap-2.0.2.min.js',\n 'jquery-jvectormap-world-mill-en.js',\n 'jquery-jvectormap-europe-mill-en.js',\n 'jquery-jvectormap-us-aea-en.js',\n 'geomap-script.js'):\n self.webview.evalJS(open(path.join(path.dirname(__file__), 'resources', script), encoding='utf-8').read())\n self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,\n Map.EUROPE: CC_EUROPE,\n Map.USA: CC_USA}))\n\n def _repopulate_attr_combo(self, data):\n from itertools import chain\n self.metas = [a for a in chain(data.domain.metas,\n data.domain.attributes,\n data.domain.class_vars)\n # Filter string variables\n if (a.is_discrete and a.values and isinstance(a.values[0], str) and not a.ordered or\n a.is_string)] if data else []\n self.attr_combo.clear()\n self.selected_attr = 0\n for i, var in enumerate(self.metas):\n self.attr_combo.addItem(gui.attributeIconDict[var], var.name)\n # Select default attribute\n if var.name.lower() == 'country':\n self.selected_attr = i\n if self.metas:\n self.attr_combo.setCurrentIndex(self.attr_combo.findText(self.metas[self.selected_attr].name))\n\n def on_data(self, data):\n self.data = data\n self._repopulate_attr_combo(data)\n if not data:\n self.region_selected('')\n self.webview.evalJS('DATA = {}; renderMap();')\n else:\n self.on_attr_change()\n\n\n def on_map_change(self, map_code=''):\n if map_code:\n self.map_combo.setCurrentIndex(self.map_combo.findData(map_code))\n else:\n map_code = map_code or self.map_combo.itemData(self.selected_map)\n self.webview.evalJS('MAP_CODE = \"{}\";'.format(map_code))\n self.webview.evalJS('SELECTED_REGIONS = {};'.format(self.regions))\n self.webview.evalJS('renderMap();')\n\n def on_attr_change(self):\n attr = self.metas[self.selected_attr]\n from collections import Counter, defaultdict\n counts = Counter(self.data.get_column_view(self.data.domain.index(attr))[0])\n # Auto-select region map\n values = set(counts)\n if 0 == len(values - SET_CC_USA):\n cc_map, inv_cc_map, map_code = CC_USA, INV_CC_USA, Map.USA\n elif 0 == len(values - SET_CC_EUROPE):\n cc_map, inv_cc_map, map_code = CC_EUROPE, INV_CC_EUROPE, Map.EUROPE\n else:\n cc_map, inv_cc_map, map_code = CC_WORLD, INV_CC_WORLD, Map.WORLD\n data = defaultdict(int)\n for cc in counts:\n data[inv_cc_map.get(cc, cc)] += counts[cc]\n self.webview.evalJS('DATA = {};'.format(dict(data)))\n self.on_map_change(map_code)\n\n\ndef main():\n from Orange.data import Table, Domain, ContinuousVariable, StringVariable\n\n words = np.column_stack([\n 'Slovenia Slovenia SVN USA Iraq Iraq Iraq Iraq France FR'.split(),\n 'Slovenia Slovenia SVN France FR Austria NL GB GB GB'.split(),\n 'Alabama AL Texas TX TX TX MS Montana US-MT MT'.split(),\n ])\n metas = [\n StringVariable('World'),\n StringVariable('Europe'),\n StringVariable('USA'),\n ]\n domain = Domain([], metas=metas)\n table = Table.from_numpy(domain,\n X=np.zeros((len(words), 0)),\n metas=words)\n app = QtGui.QApplication([''])\n w = OWGeoMap()\n w.on_data(table)\n w.show()\n app.exec()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "orangecontrib/text/widgets/owgeomap.py"}], "after_files": [{"content": "# coding: utf-8\nfrom collections import OrderedDict\nfrom os import path\nfrom math import pi as PI\n\nimport numpy as np\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.data import Table\nfrom orangecontrib.text.country_codes import \\\n CC_EUROPE, INV_CC_EUROPE, SET_CC_EUROPE, \\\n CC_WORLD, INV_CC_WORLD, \\\n CC_USA, INV_CC_USA, SET_CC_USA\n\n\nclass Map:\n WORLD = 'world_mill_en'\n EUROPE = 'europe_mill_en'\n USA = 'us_aea_en'\n all = (('World', WORLD),\n ('Europe', EUROPE),\n ('USA', USA))\n\n\nclass OWGeoMap(widget.OWWidget):\n name = \"GeoMap\"\n priority = 20000\n icon = \"icons/GeoMap.svg\"\n inputs = [(\"Data\", Table, \"on_data\")]\n outputs = [('Data', Table)]\n\n want_main_area = False\n\n selected_attr = settings.Setting(0)\n selected_map = settings.Setting(0)\n regions = settings.Setting([])\n\n def __init__(self):\n super().__init__()\n self._create_layout()\n\n @QtCore.pyqtSlot(str, result=str)\n def region_selected(self, regions):\n \"\"\"Called from JavaScript\"\"\"\n if not regions:\n return self.send('Data', None)\n self.regions = regions.split(',')\n from Orange.data.filter import FilterStringList\n # TODO, FIXME: make this work for discrete attrs also\n filter = FilterStringList(self.metas[self.selected_attr], self.regions)\n self.send('Data', self.data._filter_values(filter))\n\n def _create_layout(self):\n box = gui.widgetBox(self.controlArea,\n orientation='horizontal')\n self.attr_combo = gui.comboBox(box, self, 'selected_attr',\n orientation='horizontal',\n label='Region attribute:',\n callback=self.on_attr_change)\n self.map_combo = gui.comboBox(box, self, 'selected_map',\n orientation='horizontal',\n label='Map type:',\n callback=self.on_map_change,\n items=Map.all)\n hexpand = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Fixed)\n self.attr_combo.setSizePolicy(hexpand)\n self.map_combo.setSizePolicy(hexpand)\n html = '''\n<!DOCTYPE html>\n<html>\n<head>\n<meta charset=\"utf-8\">\n<base href=\"file://{}/\"/>\n<style>\nhtml, body, #map {{margin:0px;padding:0px;width:100%;height:100%;}}\n</style>\n<link href=\"resources/jquery-jvectormap-2.0.2.css\" rel=\"stylesheet\">\n</head>\n<body>\n<div id=\"map\"></div>\n</body>\n</html>'''.format(path.abspath(path.dirname(__file__)))\n self.webview = gui.WebviewWidget(self.controlArea, self, debug=True)\n self.webview.setHtml(html)\n for script in ('jquery-2.1.4.min.js',\n 'jquery-jvectormap-2.0.2.min.js',\n 'jquery-jvectormap-world-mill-en.js',\n 'jquery-jvectormap-europe-mill-en.js',\n 'jquery-jvectormap-us-aea-en.js',\n 'geomap-script.js'):\n self.webview.evalJS(open(path.join(path.dirname(__file__), 'resources', script), encoding='utf-8').read())\n self.webview.evalJS('REGIONS = {};'.format({Map.WORLD: CC_WORLD,\n Map.EUROPE: CC_EUROPE,\n Map.USA: CC_USA}))\n\n def _repopulate_attr_combo(self, data):\n from itertools import chain\n self.metas = [a for a in chain(data.domain.metas,\n data.domain.attributes,\n data.domain.class_vars)\n # Filter string variables\n if (a.is_discrete and a.values and isinstance(a.values[0], str) and not a.ordered or\n a.is_string)] if data else []\n self.attr_combo.clear()\n self.selected_attr = 0\n for i, var in enumerate(self.metas):\n self.attr_combo.addItem(gui.attributeIconDict[var], var.name)\n # Select default attribute\n if var.name.lower() == 'country':\n self.selected_attr = i\n if self.metas:\n self.attr_combo.setCurrentIndex(self.attr_combo.findText(self.metas[self.selected_attr].name))\n\n def on_data(self, data):\n self.data = data\n self._repopulate_attr_combo(data)\n if not data:\n self.region_selected('')\n self.webview.evalJS('DATA = {}; renderMap();')\n else:\n self.on_attr_change()\n\n\n def on_map_change(self, map_code=''):\n if map_code:\n self.map_combo.setCurrentIndex(self.map_combo.findData(map_code))\n else:\n map_code = map_code or self.map_combo.itemData(self.selected_map)\n self.webview.evalJS('MAP_CODE = \"{}\";'.format(map_code))\n self.webview.evalJS('SELECTED_REGIONS = {};'.format(self.regions))\n self.webview.evalJS('renderMap();')\n\n def on_attr_change(self):\n attr = self.metas[self.selected_attr]\n from collections import Counter, defaultdict\n counts = Counter(self.data.get_column_view(self.data.domain.index(attr))[0])\n # Auto-select region map\n values = set(counts)\n if 0 == len(values - SET_CC_USA):\n cc_map, inv_cc_map, map_code = CC_USA, INV_CC_USA, Map.USA\n elif 0 == len(values - SET_CC_EUROPE):\n cc_map, inv_cc_map, map_code = CC_EUROPE, INV_CC_EUROPE, Map.EUROPE\n else:\n cc_map, inv_cc_map, map_code = CC_WORLD, INV_CC_WORLD, Map.WORLD\n data = defaultdict(int)\n for cc in counts:\n data[inv_cc_map.get(cc, cc)] += counts[cc]\n self.webview.evalJS('DATA = {};'.format(dict(data)))\n self.on_map_change(map_code)\n\n\ndef main():\n from Orange.data import Table, Domain, ContinuousVariable, StringVariable\n\n words = np.column_stack([\n 'Slovenia Slovenia SVN USA Iraq Iraq Iraq Iraq France FR'.split(),\n 'Slovenia Slovenia SVN France FR Austria NL GB GB GB'.split(),\n 'Alabama AL Texas TX TX TX MS Montana US-MT MT'.split(),\n ])\n metas = [\n StringVariable('World'),\n StringVariable('Europe'),\n StringVariable('USA'),\n ]\n domain = Domain([], metas=metas)\n table = Table.from_numpy(domain,\n X=np.zeros((len(words), 0)),\n metas=words)\n app = QtGui.QApplication([''])\n w = OWGeoMap()\n w.on_data(table)\n w.show()\n app.exec()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "orangecontrib/text/widgets/owgeomap.py"}]}
| 2,655 | 299 |
gh_patches_debug_3344
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-2962
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AL: Alabama bill count very low
State: AL - Alabama
The [bill results for AL](https://openstates.org/graphql?operationName=null&query=%7B%0A%20%20bills(jurisdiction%3A%22Alabama%22%2Csession%3A%222019rs%22%2Cfirst%3A10)%20%7B%0A%0A%20%20%20%20totalCount%0A%20%20%7D%0A%7D) indicate that 133 bills are available for session `2019rs`.
```
{
"data": {
"bills": {
"totalCount": 133
}
}
}
```
However the AL website lists "849 Instruments" in the results for searching the 2019 regular session with bills from both houses that have had one reading in house of origin (this page is unlinkable, because AL site is a horror). So a lot more bills should be showing up.
Running the scraper locally, it seems to work fine:
`docker-compose run --rm scrape al bills --scrape session=2019rs`
```
al (scrape)
bills: {'session': '2019rs'}
bills scrape:
duration: 0:25:43.643260
objects:
bill: 1067
vote_event: 458
jurisdiction scrape:
duration: 0:00:00.019707
objects:
jurisdiction: 1
organization: 3
```
Anything I can do to help?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/al/__init__.py`
Content:
```
1 from pupa.scrape import Jurisdiction, Organization
2
3 from .bills import ALBillScraper
4 from .events import ALEventScraper
5 from .people import ALPersonScraper
6
7
8 class Alabama(Jurisdiction):
9 division_id = "ocd-division/country:us/state:al"
10 classification = "government"
11 name = "Alabama"
12 url = "http://www.legislature.state.al.us/"
13 scrapers = {
14 'bills': ALBillScraper,
15 'events': ALEventScraper,
16 'people': ALPersonScraper,
17 }
18 legislative_sessions = [
19 {
20 "_scraped_name": "Regular Session 2011",
21 "classification": "primary",
22 "identifier": "2011rs",
23 "name": "2011 Regular Session"
24 },
25 {
26 "_scraped_name": "First Special Session 2012",
27 "classification": "special",
28 "identifier": "2012fs",
29 "name": "First Special Session 2012"
30 },
31 {
32 "_scraped_name": "Regular Session 2012",
33 "classification": "primary",
34 "identifier": "2012rs",
35 "name": "2012 Regular Session"
36 },
37 {
38 "_scraped_name": "Regular Session 2013",
39 "classification": "primary",
40 "identifier": "2013rs",
41 "name": "2013 Regular Session"
42 },
43 {
44 "_scraped_name": "Regular Session 2014",
45 "classification": "primary",
46 "identifier": "2014rs",
47 "name": "2014 Regular Session"
48 },
49 {
50 "_scraped_name": "First Special Session 2015",
51 "classification": "special",
52 "identifier": "2015fs",
53 "name": "First Special Session 2015"
54 },
55 {
56 "_scraped_name": "Organizational Session 2015",
57 "classification": "primary",
58 "identifier": "2015os",
59 "name": "2015 Organizational Session"
60 },
61 {
62 "_scraped_name": "Regular Session 2015",
63 "classification": "primary",
64 "identifier": "2015rs",
65 "name": "2015 Regular Session"
66 },
67 {
68 "_scraped_name": "Second Special Session 2015",
69 "classification": "special",
70 "identifier": "2015ss",
71 "name": "Second Special Session 2015"
72 },
73 {
74 "_scraped_name": "First Special Session 2016",
75 "classification": "special",
76 "identifier": "2016fs",
77 "name": "First Special Session 2016"
78 },
79 {
80 "_scraped_name": "Regular Session 2016",
81 "classification": "primary",
82 "identifier": "2016rs",
83 "name": "2016 Regular Session"
84 },
85 {
86 "_scraped_name": "Regular Session 2017",
87 "classification": "primary",
88 "end_date": "2017-05-31",
89 "identifier": "2017rs",
90 "name": "2017 Regular Session",
91 "start_date": "2017-02-07"
92 },
93 {
94 "_scraped_name": "Regular Session 2018",
95 "classification": "primary",
96 "end_date": "2018-03-29",
97 "identifier": "2018rs",
98 "name": "2018 Regular Session",
99 "start_date": "2018-01-09",
100 },
101 {
102 "_scraped_name": "First Special Session 2019",
103 "classification": "special",
104 "identifier": "2019fs",
105 "name": "First Special Session 2019",
106 "start_date": "2019-03-08"
107 },
108 {
109 "_scraped_name": "Regular Session 2019",
110 "classification": "primary",
111 "end_date": "2019-06-17",
112 "identifier": "2019rs",
113 "name": "2019 Regular Session",
114 "start_date": "2019-03-05"
115 },
116 ]
117 ignored_scraped_sessions = [
118 "Regular Session 1998",
119 "Organizational Session 1999",
120 "Regular Session 1999",
121 "First Special Session 1999",
122 "Organizational Session 2011",
123 "Second Special Session 1999",
124 "Regular Session 2000",
125 "Regular Session 2001",
126 "First Special Session 2001",
127 "Second Special Session 2001",
128 "Third Special Session 2001",
129 "Fourth Special Session 2001",
130 "Regular Session 2002",
131 "Organizational Session 2003",
132 "Regular Session 2003",
133 "First Special Session 2003",
134 "Second Special Session 2003",
135 "Regular Session 2004",
136 "First Special Session 2004",
137 "Regular Session 2005",
138 "First Special Session 2005",
139 "Regular Session 2006",
140 "Organizational Session 2007",
141 "Regular Session 2007",
142 "First Special Session 2007",
143 "Regular Session 2008",
144 "First Special Session 2008",
145 "Regular Session 2009",
146 "Regular Session 2010",
147 "First Special Session 2009",
148 "First Special Session 2010",
149 "Regular Session 2016",
150 "Organizational Session 2019",
151 ]
152
153 def get_organizations(self):
154 legislature_name = "Alabama Legislature"
155
156 legislature = Organization(name=legislature_name,
157 classification="legislature")
158 upper = Organization('Senate', classification='upper',
159 parent_id=legislature._id)
160 lower = Organization('House', classification='lower',
161 parent_id=legislature._id)
162
163 yield legislature
164 yield upper
165 yield lower
166
167 def get_session_list(self):
168 import lxml.html
169 import requests
170
171 s = requests.Session()
172 r = s.get('http://alisondb.legislature.state.al.us/alison/SelectSession.aspx')
173 doc = lxml.html.fromstring(r.text)
174 return doc.xpath('//*[@id="ContentPlaceHolder1_gvSessions"]/tr/td/font/a/font/text()')
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/al/__init__.py b/openstates/al/__init__.py
--- a/openstates/al/__init__.py
+++ b/openstates/al/__init__.py
@@ -106,7 +106,7 @@
"start_date": "2019-03-08"
},
{
- "_scraped_name": "Regular Session 2019",
+ "_scraped_name": " Regular Session 2019",
"classification": "primary",
"end_date": "2019-06-17",
"identifier": "2019rs",
|
{"golden_diff": "diff --git a/openstates/al/__init__.py b/openstates/al/__init__.py\n--- a/openstates/al/__init__.py\n+++ b/openstates/al/__init__.py\n@@ -106,7 +106,7 @@\n \"start_date\": \"2019-03-08\"\n },\n {\n- \"_scraped_name\": \"Regular Session 2019\",\n+ \"_scraped_name\": \" Regular Session 2019\",\n \"classification\": \"primary\",\n \"end_date\": \"2019-06-17\",\n \"identifier\": \"2019rs\",\n", "issue": "AL: Alabama bill count very low\nState: AL - Alabama\r\n\r\nThe [bill results for AL](https://openstates.org/graphql?operationName=null&query=%7B%0A%20%20bills(jurisdiction%3A%22Alabama%22%2Csession%3A%222019rs%22%2Cfirst%3A10)%20%7B%0A%0A%20%20%20%20totalCount%0A%20%20%7D%0A%7D) indicate that 133 bills are available for session `2019rs`.\r\n\r\n```\r\n{\r\n \"data\": {\r\n \"bills\": {\r\n \"totalCount\": 133\r\n }\r\n }\r\n}\r\n```\r\n\r\nHowever the AL website lists \"849 Instruments\" in the results for searching the 2019 regular session with bills from both houses that have had one reading in house of origin (this page is unlinkable, because AL site is a horror). So a lot more bills should be showing up.\r\n\r\nRunning the scraper locally, it seems to work fine:\r\n\r\n`docker-compose run --rm scrape al bills --scrape session=2019rs`\r\n\r\n```\r\nal (scrape)\r\n bills: {'session': '2019rs'}\r\nbills scrape:\r\n duration: 0:25:43.643260\r\n objects:\r\n bill: 1067\r\n vote_event: 458\r\njurisdiction scrape:\r\n duration: 0:00:00.019707\r\n objects:\r\n jurisdiction: 1\r\n organization: 3\r\n```\r\n\r\nAnything I can do to help?\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom .bills import ALBillScraper\nfrom .events import ALEventScraper\nfrom .people import ALPersonScraper\n\n\nclass Alabama(Jurisdiction):\n division_id = \"ocd-division/country:us/state:al\"\n classification = \"government\"\n name = \"Alabama\"\n url = \"http://www.legislature.state.al.us/\"\n scrapers = {\n 'bills': ALBillScraper,\n 'events': ALEventScraper,\n 'people': ALPersonScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"Regular Session 2011\",\n \"classification\": \"primary\",\n \"identifier\": \"2011rs\",\n \"name\": \"2011 Regular Session\"\n },\n {\n \"_scraped_name\": \"First Special Session 2012\",\n \"classification\": \"special\",\n \"identifier\": \"2012fs\",\n \"name\": \"First Special Session 2012\"\n },\n {\n \"_scraped_name\": \"Regular Session 2012\",\n \"classification\": \"primary\",\n \"identifier\": \"2012rs\",\n \"name\": \"2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2013\",\n \"classification\": \"primary\",\n \"identifier\": \"2013rs\",\n \"name\": \"2013 Regular Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2014\",\n \"classification\": \"primary\",\n \"identifier\": \"2014rs\",\n \"name\": \"2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"First Special Session 2015\",\n \"classification\": \"special\",\n \"identifier\": \"2015fs\",\n \"name\": \"First Special Session 2015\"\n },\n {\n \"_scraped_name\": \"Organizational Session 2015\",\n \"classification\": \"primary\",\n \"identifier\": \"2015os\",\n \"name\": \"2015 Organizational Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2015\",\n \"classification\": \"primary\",\n \"identifier\": \"2015rs\",\n \"name\": \"2015 Regular Session\"\n },\n {\n \"_scraped_name\": \"Second Special Session 2015\",\n \"classification\": \"special\",\n \"identifier\": \"2015ss\",\n \"name\": \"Second Special Session 2015\"\n },\n {\n \"_scraped_name\": \"First Special Session 2016\",\n \"classification\": \"special\",\n \"identifier\": \"2016fs\",\n \"name\": \"First Special Session 2016\"\n },\n {\n \"_scraped_name\": \"Regular Session 2016\",\n \"classification\": \"primary\",\n \"identifier\": \"2016rs\",\n \"name\": \"2016 Regular Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2017\",\n \"classification\": \"primary\",\n \"end_date\": \"2017-05-31\",\n \"identifier\": \"2017rs\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-02-07\"\n },\n {\n \"_scraped_name\": \"Regular Session 2018\",\n \"classification\": \"primary\",\n \"end_date\": \"2018-03-29\",\n \"identifier\": \"2018rs\",\n \"name\": \"2018 Regular Session\",\n \"start_date\": \"2018-01-09\",\n },\n {\n \"_scraped_name\": \"First Special Session 2019\",\n \"classification\": \"special\",\n \"identifier\": \"2019fs\",\n \"name\": \"First Special Session 2019\",\n \"start_date\": \"2019-03-08\"\n },\n {\n \"_scraped_name\": \"Regular Session 2019\",\n \"classification\": \"primary\",\n \"end_date\": \"2019-06-17\",\n \"identifier\": \"2019rs\",\n \"name\": \"2019 Regular Session\",\n \"start_date\": \"2019-03-05\"\n },\n ]\n ignored_scraped_sessions = [\n \"Regular Session 1998\",\n \"Organizational Session 1999\",\n \"Regular Session 1999\",\n \"First Special Session 1999\",\n \"Organizational Session 2011\",\n \"Second Special Session 1999\",\n \"Regular Session 2000\",\n \"Regular Session 2001\",\n \"First Special Session 2001\",\n \"Second Special Session 2001\",\n \"Third Special Session 2001\",\n \"Fourth Special Session 2001\",\n \"Regular Session 2002\",\n \"Organizational Session 2003\",\n \"Regular Session 2003\",\n \"First Special Session 2003\",\n \"Second Special Session 2003\",\n \"Regular Session 2004\",\n \"First Special Session 2004\",\n \"Regular Session 2005\",\n \"First Special Session 2005\",\n \"Regular Session 2006\",\n \"Organizational Session 2007\",\n \"Regular Session 2007\",\n \"First Special Session 2007\",\n \"Regular Session 2008\",\n \"First Special Session 2008\",\n \"Regular Session 2009\",\n \"Regular Session 2010\",\n \"First Special Session 2009\",\n \"First Special Session 2010\",\n \"Regular Session 2016\",\n \"Organizational Session 2019\",\n ]\n\n def get_organizations(self):\n legislature_name = \"Alabama Legislature\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n import lxml.html\n import requests\n\n s = requests.Session()\n r = s.get('http://alisondb.legislature.state.al.us/alison/SelectSession.aspx')\n doc = lxml.html.fromstring(r.text)\n return doc.xpath('//*[@id=\"ContentPlaceHolder1_gvSessions\"]/tr/td/font/a/font/text()')\n", "path": "openstates/al/__init__.py"}], "after_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom .bills import ALBillScraper\nfrom .events import ALEventScraper\nfrom .people import ALPersonScraper\n\n\nclass Alabama(Jurisdiction):\n division_id = \"ocd-division/country:us/state:al\"\n classification = \"government\"\n name = \"Alabama\"\n url = \"http://www.legislature.state.al.us/\"\n scrapers = {\n 'bills': ALBillScraper,\n 'events': ALEventScraper,\n 'people': ALPersonScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"Regular Session 2011\",\n \"classification\": \"primary\",\n \"identifier\": \"2011rs\",\n \"name\": \"2011 Regular Session\"\n },\n {\n \"_scraped_name\": \"First Special Session 2012\",\n \"classification\": \"special\",\n \"identifier\": \"2012fs\",\n \"name\": \"First Special Session 2012\"\n },\n {\n \"_scraped_name\": \"Regular Session 2012\",\n \"classification\": \"primary\",\n \"identifier\": \"2012rs\",\n \"name\": \"2012 Regular Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2013\",\n \"classification\": \"primary\",\n \"identifier\": \"2013rs\",\n \"name\": \"2013 Regular Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2014\",\n \"classification\": \"primary\",\n \"identifier\": \"2014rs\",\n \"name\": \"2014 Regular Session\"\n },\n {\n \"_scraped_name\": \"First Special Session 2015\",\n \"classification\": \"special\",\n \"identifier\": \"2015fs\",\n \"name\": \"First Special Session 2015\"\n },\n {\n \"_scraped_name\": \"Organizational Session 2015\",\n \"classification\": \"primary\",\n \"identifier\": \"2015os\",\n \"name\": \"2015 Organizational Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2015\",\n \"classification\": \"primary\",\n \"identifier\": \"2015rs\",\n \"name\": \"2015 Regular Session\"\n },\n {\n \"_scraped_name\": \"Second Special Session 2015\",\n \"classification\": \"special\",\n \"identifier\": \"2015ss\",\n \"name\": \"Second Special Session 2015\"\n },\n {\n \"_scraped_name\": \"First Special Session 2016\",\n \"classification\": \"special\",\n \"identifier\": \"2016fs\",\n \"name\": \"First Special Session 2016\"\n },\n {\n \"_scraped_name\": \"Regular Session 2016\",\n \"classification\": \"primary\",\n \"identifier\": \"2016rs\",\n \"name\": \"2016 Regular Session\"\n },\n {\n \"_scraped_name\": \"Regular Session 2017\",\n \"classification\": \"primary\",\n \"end_date\": \"2017-05-31\",\n \"identifier\": \"2017rs\",\n \"name\": \"2017 Regular Session\",\n \"start_date\": \"2017-02-07\"\n },\n {\n \"_scraped_name\": \"Regular Session 2018\",\n \"classification\": \"primary\",\n \"end_date\": \"2018-03-29\",\n \"identifier\": \"2018rs\",\n \"name\": \"2018 Regular Session\",\n \"start_date\": \"2018-01-09\",\n },\n {\n \"_scraped_name\": \"First Special Session 2019\",\n \"classification\": \"special\",\n \"identifier\": \"2019fs\",\n \"name\": \"First Special Session 2019\",\n \"start_date\": \"2019-03-08\"\n },\n {\n \"_scraped_name\": \" Regular Session 2019\",\n \"classification\": \"primary\",\n \"end_date\": \"2019-06-17\",\n \"identifier\": \"2019rs\",\n \"name\": \"2019 Regular Session\",\n \"start_date\": \"2019-03-05\"\n },\n ]\n ignored_scraped_sessions = [\n \"Regular Session 1998\",\n \"Organizational Session 1999\",\n \"Regular Session 1999\",\n \"First Special Session 1999\",\n \"Organizational Session 2011\",\n \"Second Special Session 1999\",\n \"Regular Session 2000\",\n \"Regular Session 2001\",\n \"First Special Session 2001\",\n \"Second Special Session 2001\",\n \"Third Special Session 2001\",\n \"Fourth Special Session 2001\",\n \"Regular Session 2002\",\n \"Organizational Session 2003\",\n \"Regular Session 2003\",\n \"First Special Session 2003\",\n \"Second Special Session 2003\",\n \"Regular Session 2004\",\n \"First Special Session 2004\",\n \"Regular Session 2005\",\n \"First Special Session 2005\",\n \"Regular Session 2006\",\n \"Organizational Session 2007\",\n \"Regular Session 2007\",\n \"First Special Session 2007\",\n \"Regular Session 2008\",\n \"First Special Session 2008\",\n \"Regular Session 2009\",\n \"Regular Session 2010\",\n \"First Special Session 2009\",\n \"First Special Session 2010\",\n \"Regular Session 2016\",\n \"Organizational Session 2019\",\n ]\n\n def get_organizations(self):\n legislature_name = \"Alabama Legislature\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization('Senate', classification='upper',\n parent_id=legislature._id)\n lower = Organization('House', classification='lower',\n parent_id=legislature._id)\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n import lxml.html\n import requests\n\n s = requests.Session()\n r = s.get('http://alisondb.legislature.state.al.us/alison/SelectSession.aspx')\n doc = lxml.html.fromstring(r.text)\n return doc.xpath('//*[@id=\"ContentPlaceHolder1_gvSessions\"]/tr/td/font/a/font/text()')\n", "path": "openstates/al/__init__.py"}]}
| 2,620 | 144 |
gh_patches_debug_12911
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1090
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Newsletters show unpublished events
### Describe the bug
Newsletters show unpublished events
### How to reproduce
Steps to reproduce the behaviour:
1. Check one of the newsletters of the last weeks
### Expected behaviour
Only published events should show.
### Additional context
This is probably because of the low number of events during these days.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/newsletters/services.py`
Content:
```
1 import os
2
3 from django.conf import settings
4 from django.template.loader import get_template
5 from django.utils import translation, timezone
6
7 from events.models import Event
8 from members.models import Member
9 from newsletters import emails
10 from partners.models import Partner
11 from pushnotifications.models import Message, Category
12
13
14 def write_to_file(pk, lang, html_message):
15 """
16 Write newsletter to a file
17 """
18 cache_dir = os.path.join(settings.MEDIA_ROOT, "newsletters")
19 if not os.path.isdir(cache_dir):
20 os.makedirs(cache_dir)
21
22 with open(os.path.join(cache_dir, f"{pk}_{lang}.html"), "w+") as cache_file:
23 cache_file.write(html_message)
24
25
26 def save_to_disk(newsletter, request):
27 """
28 Writes the newsletter as HTML to file (in all languages)
29 """
30 main_partner = Partner.objects.filter(is_main_partner=True).first()
31 local_partner = Partner.objects.filter(is_local_partner=True).first()
32
33 html_template = get_template("newsletters/email.html")
34
35 for language in settings.LANGUAGES:
36 translation.activate(language[0])
37
38 context = {
39 "newsletter": newsletter,
40 "agenda_events": (
41 newsletter.newslettercontent_set.filter(newsletteritem=None).order_by(
42 "newsletterevent__start_datetime"
43 )
44 ),
45 "main_partner": main_partner,
46 "local_partner": local_partner,
47 "lang_code": language[0],
48 "request": request,
49 }
50
51 html_message = html_template.render(context)
52
53 write_to_file(newsletter.pk, language[0], html_message)
54
55
56 def get_agenda(start_date):
57 end_date = start_date + timezone.timedelta(weeks=2)
58 base_events = Event.objects.filter(
59 start__gte=start_date, end__lt=end_date, published=True
60 ).order_by("start")
61 if base_events.count() < 10:
62 more_events = Event.objects.filter(end__gte=end_date).order_by("start")
63 return [*base_events, *more_events][:10]
64 return base_events
65
66
67 def send_newsletter(newsletter):
68 emails.send_newsletter(newsletter)
69 newsletter.sent = True
70 newsletter.save()
71 message = Message.objects.create(
72 title_nl=newsletter.title_nl,
73 title_en=newsletter.title_en,
74 body_nl="Tik om te bekijken",
75 body_en="Tap to view",
76 url=settings.BASE_URL + newsletter.get_absolute_url(),
77 category=Category.objects.get(key=Category.NEWSLETTER),
78 )
79 message.users.set(Member.current_members.all())
80 message.send()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/newsletters/services.py b/website/newsletters/services.py
--- a/website/newsletters/services.py
+++ b/website/newsletters/services.py
@@ -55,11 +55,12 @@
def get_agenda(start_date):
end_date = start_date + timezone.timedelta(weeks=2)
- base_events = Event.objects.filter(
- start__gte=start_date, end__lt=end_date, published=True
+ published_events = Event.objects.filter(published=True)
+ base_events = published_events.filter(
+ start__gte=start_date, end__lt=end_date
).order_by("start")
if base_events.count() < 10:
- more_events = Event.objects.filter(end__gte=end_date).order_by("start")
+ more_events = published_events.filter(end__gte=end_date).order_by("start")
return [*base_events, *more_events][:10]
return base_events
|
{"golden_diff": "diff --git a/website/newsletters/services.py b/website/newsletters/services.py\n--- a/website/newsletters/services.py\n+++ b/website/newsletters/services.py\n@@ -55,11 +55,12 @@\n \n def get_agenda(start_date):\n end_date = start_date + timezone.timedelta(weeks=2)\n- base_events = Event.objects.filter(\n- start__gte=start_date, end__lt=end_date, published=True\n+ published_events = Event.objects.filter(published=True)\n+ base_events = published_events.filter(\n+ start__gte=start_date, end__lt=end_date\n ).order_by(\"start\")\n if base_events.count() < 10:\n- more_events = Event.objects.filter(end__gte=end_date).order_by(\"start\")\n+ more_events = published_events.filter(end__gte=end_date).order_by(\"start\")\n return [*base_events, *more_events][:10]\n return base_events\n", "issue": "Newsletters show unpublished events\n### Describe the bug\r\nNewsletters show unpublished events\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Check one of the newsletters of the last weeks\r\n\r\n### Expected behaviour\r\nOnly published events should show.\r\n\r\n### Additional context\r\nThis is probably because of the low number of events during these days.\r\n\n", "before_files": [{"content": "import os\n\nfrom django.conf import settings\nfrom django.template.loader import get_template\nfrom django.utils import translation, timezone\n\nfrom events.models import Event\nfrom members.models import Member\nfrom newsletters import emails\nfrom partners.models import Partner\nfrom pushnotifications.models import Message, Category\n\n\ndef write_to_file(pk, lang, html_message):\n \"\"\"\n Write newsletter to a file\n \"\"\"\n cache_dir = os.path.join(settings.MEDIA_ROOT, \"newsletters\")\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n with open(os.path.join(cache_dir, f\"{pk}_{lang}.html\"), \"w+\") as cache_file:\n cache_file.write(html_message)\n\n\ndef save_to_disk(newsletter, request):\n \"\"\"\n Writes the newsletter as HTML to file (in all languages)\n \"\"\"\n main_partner = Partner.objects.filter(is_main_partner=True).first()\n local_partner = Partner.objects.filter(is_local_partner=True).first()\n\n html_template = get_template(\"newsletters/email.html\")\n\n for language in settings.LANGUAGES:\n translation.activate(language[0])\n\n context = {\n \"newsletter\": newsletter,\n \"agenda_events\": (\n newsletter.newslettercontent_set.filter(newsletteritem=None).order_by(\n \"newsletterevent__start_datetime\"\n )\n ),\n \"main_partner\": main_partner,\n \"local_partner\": local_partner,\n \"lang_code\": language[0],\n \"request\": request,\n }\n\n html_message = html_template.render(context)\n\n write_to_file(newsletter.pk, language[0], html_message)\n\n\ndef get_agenda(start_date):\n end_date = start_date + timezone.timedelta(weeks=2)\n base_events = Event.objects.filter(\n start__gte=start_date, end__lt=end_date, published=True\n ).order_by(\"start\")\n if base_events.count() < 10:\n more_events = Event.objects.filter(end__gte=end_date).order_by(\"start\")\n return [*base_events, *more_events][:10]\n return base_events\n\n\ndef send_newsletter(newsletter):\n emails.send_newsletter(newsletter)\n newsletter.sent = True\n newsletter.save()\n message = Message.objects.create(\n title_nl=newsletter.title_nl,\n title_en=newsletter.title_en,\n body_nl=\"Tik om te bekijken\",\n body_en=\"Tap to view\",\n url=settings.BASE_URL + newsletter.get_absolute_url(),\n category=Category.objects.get(key=Category.NEWSLETTER),\n )\n message.users.set(Member.current_members.all())\n message.send()\n", "path": "website/newsletters/services.py"}], "after_files": [{"content": "import os\n\nfrom django.conf import settings\nfrom django.template.loader import get_template\nfrom django.utils import translation, timezone\n\nfrom events.models import Event\nfrom members.models import Member\nfrom newsletters import emails\nfrom partners.models import Partner\nfrom pushnotifications.models import Message, Category\n\n\ndef write_to_file(pk, lang, html_message):\n \"\"\"\n Write newsletter to a file\n \"\"\"\n cache_dir = os.path.join(settings.MEDIA_ROOT, \"newsletters\")\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n with open(os.path.join(cache_dir, f\"{pk}_{lang}.html\"), \"w+\") as cache_file:\n cache_file.write(html_message)\n\n\ndef save_to_disk(newsletter, request):\n \"\"\"\n Writes the newsletter as HTML to file (in all languages)\n \"\"\"\n main_partner = Partner.objects.filter(is_main_partner=True).first()\n local_partner = Partner.objects.filter(is_local_partner=True).first()\n\n html_template = get_template(\"newsletters/email.html\")\n\n for language in settings.LANGUAGES:\n translation.activate(language[0])\n\n context = {\n \"newsletter\": newsletter,\n \"agenda_events\": (\n newsletter.newslettercontent_set.filter(newsletteritem=None).order_by(\n \"newsletterevent__start_datetime\"\n )\n ),\n \"main_partner\": main_partner,\n \"local_partner\": local_partner,\n \"lang_code\": language[0],\n \"request\": request,\n }\n\n html_message = html_template.render(context)\n\n write_to_file(newsletter.pk, language[0], html_message)\n\n\ndef get_agenda(start_date):\n end_date = start_date + timezone.timedelta(weeks=2)\n published_events = Event.objects.filter(published=True)\n base_events = published_events.filter(\n start__gte=start_date, end__lt=end_date\n ).order_by(\"start\")\n if base_events.count() < 10:\n more_events = published_events.filter(end__gte=end_date).order_by(\"start\")\n return [*base_events, *more_events][:10]\n return base_events\n\n\ndef send_newsletter(newsletter):\n emails.send_newsletter(newsletter)\n newsletter.sent = True\n newsletter.save()\n message = Message.objects.create(\n title_nl=newsletter.title_nl,\n title_en=newsletter.title_en,\n body_nl=\"Tik om te bekijken\",\n body_en=\"Tap to view\",\n url=settings.BASE_URL + newsletter.get_absolute_url(),\n category=Category.objects.get(key=Category.NEWSLETTER),\n )\n message.users.set(Member.current_members.all())\n message.send()\n", "path": "website/newsletters/services.py"}]}
| 1,030 | 207 |
gh_patches_debug_2222
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-2337
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a sample middleware to startproject's template
It will be nice to have a middleware template inside the template project to serve as an example for people that want to use it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/commands/startproject.py`
Content:
```
1 from __future__ import print_function
2 import re
3 import os
4 import string
5 from importlib import import_module
6 from os.path import join, exists, abspath
7 from shutil import ignore_patterns, move, copy2, copystat
8
9 import scrapy
10 from scrapy.commands import ScrapyCommand
11 from scrapy.utils.template import render_templatefile, string_camelcase
12 from scrapy.exceptions import UsageError
13
14
15 TEMPLATES_TO_RENDER = (
16 ('scrapy.cfg',),
17 ('${project_name}', 'settings.py.tmpl'),
18 ('${project_name}', 'items.py.tmpl'),
19 ('${project_name}', 'pipelines.py.tmpl'),
20 )
21
22 IGNORE = ignore_patterns('*.pyc', '.svn')
23
24
25 class Command(ScrapyCommand):
26
27 requires_project = False
28 default_settings = {'LOG_ENABLED': False}
29
30 def syntax(self):
31 return "<project_name> [project_dir]"
32
33 def short_desc(self):
34 return "Create new project"
35
36 def _is_valid_name(self, project_name):
37 def _module_exists(module_name):
38 try:
39 import_module(module_name)
40 return True
41 except ImportError:
42 return False
43
44 if not re.search(r'^[_a-zA-Z]\w*$', project_name):
45 print('Error: Project names must begin with a letter and contain'\
46 ' only\nletters, numbers and underscores')
47 elif _module_exists(project_name):
48 print('Error: Module %r already exists' % project_name)
49 else:
50 return True
51 return False
52
53 def _copytree(self, src, dst):
54 """
55 Since the original function always creates the directory, to resolve
56 the issue a new function had to be created. It's a simple copy and
57 was reduced for this case.
58
59 More info at:
60 https://github.com/scrapy/scrapy/pull/2005
61 """
62 ignore = IGNORE
63 names = os.listdir(src)
64 ignored_names = ignore(src, names)
65
66 if not os.path.exists(dst):
67 os.makedirs(dst)
68
69 for name in names:
70 if name in ignored_names:
71 continue
72
73 srcname = os.path.join(src, name)
74 dstname = os.path.join(dst, name)
75 if os.path.isdir(srcname):
76 self._copytree(srcname, dstname)
77 else:
78 copy2(srcname, dstname)
79 copystat(src, dst)
80
81 def run(self, args, opts):
82 if len(args) not in (1, 2):
83 raise UsageError()
84
85 project_name = args[0]
86 project_dir = args[0]
87
88 if len(args) == 2:
89 project_dir = args[1]
90
91 if exists(join(project_dir, 'scrapy.cfg')):
92 self.exitcode = 1
93 print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))
94 return
95
96 if not self._is_valid_name(project_name):
97 self.exitcode = 1
98 return
99
100 self._copytree(self.templates_dir, abspath(project_dir))
101 move(join(project_dir, 'module'), join(project_dir, project_name))
102 for paths in TEMPLATES_TO_RENDER:
103 path = join(*paths)
104 tplfile = join(project_dir,
105 string.Template(path).substitute(project_name=project_name))
106 render_templatefile(tplfile, project_name=project_name,
107 ProjectName=string_camelcase(project_name))
108 print("New Scrapy project %r, using template directory %r, created in:" % \
109 (project_name, self.templates_dir))
110 print(" %s\n" % abspath(project_dir))
111 print("You can start your first spider with:")
112 print(" cd %s" % project_dir)
113 print(" scrapy genspider example example.com")
114
115 @property
116 def templates_dir(self):
117 _templates_base_dir = self.settings['TEMPLATES_DIR'] or \
118 join(scrapy.__path__[0], 'templates')
119 return join(_templates_base_dir, 'project')
120
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py
--- a/scrapy/commands/startproject.py
+++ b/scrapy/commands/startproject.py
@@ -17,6 +17,7 @@
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
+ ('${project_name}', 'middlewares.py.tmpl'),
)
IGNORE = ignore_patterns('*.pyc', '.svn')
|
{"golden_diff": "diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py\n--- a/scrapy/commands/startproject.py\n+++ b/scrapy/commands/startproject.py\n@@ -17,6 +17,7 @@\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n+ ('${project_name}', 'middlewares.py.tmpl'),\n )\n \n IGNORE = ignore_patterns('*.pyc', '.svn')\n", "issue": "Add a sample middleware to startproject's template\nIt will be nice to have a middleware template inside the template project to serve as an example for people that want to use it.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '.svn')\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\\\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print('Error: Module %r already exists' % project_name)\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n copystat(src, dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir,\n string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name,\n ProjectName=string_camelcase(project_name))\n print(\"New Scrapy project %r, using template directory %r, created in:\" % \\\n (project_name, self.templates_dir))\n print(\" %s\\n\" % abspath(project_dir))\n print(\"You can start your first spider with:\")\n print(\" cd %s\" % project_dir)\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n _templates_base_dir = self.settings['TEMPLATES_DIR'] or \\\n join(scrapy.__path__[0], 'templates')\n return join(_templates_base_dir, 'project')\n \n", "path": "scrapy/commands/startproject.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n ('${project_name}', 'middlewares.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '.svn')\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\\\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print('Error: Module %r already exists' % project_name)\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n copystat(src, dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir,\n string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name,\n ProjectName=string_camelcase(project_name))\n print(\"New Scrapy project %r, using template directory %r, created in:\" % \\\n (project_name, self.templates_dir))\n print(\" %s\\n\" % abspath(project_dir))\n print(\"You can start your first spider with:\")\n print(\" cd %s\" % project_dir)\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n _templates_base_dir = self.settings['TEMPLATES_DIR'] or \\\n join(scrapy.__path__[0], 'templates')\n return join(_templates_base_dir, 'project')\n \n", "path": "scrapy/commands/startproject.py"}]}
| 1,421 | 116 |
gh_patches_debug_50238
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1339
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
raw logging defaults to true...necessary?
sopel [by default](https://github.com/sopel-irc/sopel/blob/master/sopel/config/core_section.py#L134) does raw logging.
You would need to manually add log_raw = False to config file to disable it.
Just wondering if having it on by default is really that necessary?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/config/core_section.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals, absolute_import, print_function, division
4
5 import os.path
6
7 from sopel.config.types import (
8 StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,
9 FilenameAttribute, NO_DEFAULT
10 )
11 from sopel.tools import Identifier
12
13
14 def _find_certs():
15 """
16 Find the TLS root CA store.
17
18 :returns: str (path to file)
19 """
20 # check if the root CA store is at a known location
21 locations = [
22 '/etc/pki/tls/cert.pem', # best first guess
23 '/etc/ssl/certs/ca-certificates.crt', # Debian
24 '/etc/ssl/cert.pem', # FreeBSD base OpenSSL
25 '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL
26 '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora
27 '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS
28 '/etc/pki/tls/cacert.pem', # OpenELEC
29 '/etc/ssl/ca-bundle.pem', # OpenSUSE
30 ]
31 for certs in locations:
32 if os.path.isfile(certs):
33 return certs
34 return None
35
36
37 def configure(config):
38 config.core.configure_setting('nick', 'Enter the nickname for your bot.')
39 config.core.configure_setting('host', 'Enter the server to connect to.')
40 config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')
41 if config.core.use_ssl:
42 default_port = 6697
43 else:
44 default_port = 6667
45 config.core.configure_setting('port', 'Enter the port to connect on.',
46 default=default_port)
47 config.core.configure_setting(
48 'owner', "Enter your own IRC name (or that of the bot's owner)")
49 config.core.configure_setting(
50 'channels',
51 'Enter the channels to connect to at startup, separated by commas.'
52 )
53
54
55 class CoreSection(StaticSection):
56 """The config section used for configuring the bot itself."""
57 admins = ListAttribute('admins')
58 """The list of people (other than the owner) who can administer the bot"""
59
60 admin_accounts = ListAttribute('admin_accounts')
61 """The list of accounts (other than the owner's) who can administer the bot.
62
63 This should not be set for networks that do not support IRCv3 account
64 capabilities."""
65
66 auth_method = ChoiceAttribute('auth_method', choices=[
67 'nickserv', 'authserv', 'Q', 'sasl', 'server'])
68 """The method to use to authenticate with the server.
69
70 Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server``."""
71
72 auth_password = ValidatedAttribute('auth_password')
73 """The password to use to authenticate with the server."""
74
75 auth_target = ValidatedAttribute('auth_target')
76 """The user to use for nickserv authentication, or the SASL mechanism.
77
78 May not apply, depending on ``auth_method``. Defaults to NickServ for
79 nickserv auth, and PLAIN for SASL auth."""
80
81 auth_username = ValidatedAttribute('auth_username')
82 """The username/account to use to authenticate with the server.
83
84 May not apply, depending on ``auth_method``."""
85
86 bind_host = ValidatedAttribute('bind_host')
87 """Bind the connection to a specific IP"""
88
89 ca_certs = FilenameAttribute('ca_certs', default=_find_certs())
90 """The path of the CA certs pem file"""
91
92 channels = ListAttribute('channels')
93 """List of channels for the bot to join when it connects"""
94
95 db_filename = ValidatedAttribute('db_filename')
96 """The filename for Sopel's database."""
97
98 default_time_format = ValidatedAttribute('default_time_format',
99 default='%Y-%m-%d - %T%Z')
100 """The default format to use for time in messages."""
101
102 default_timezone = ValidatedAttribute('default_timezone')
103 """The default timezone to use for time in messages."""
104
105 enable = ListAttribute('enable')
106 """A whitelist of the only modules you want to enable."""
107
108 exclude = ListAttribute('exclude')
109 """A list of modules which should not be loaded."""
110
111 extra = ListAttribute('extra')
112 """A list of other directories you'd like to include modules from."""
113
114 help_prefix = ValidatedAttribute('help_prefix', default='.')
115 """The prefix to use in help"""
116
117 @property
118 def homedir(self):
119 """The directory in which various files are stored at runtime.
120
121 By default, this is the same directory as the config. It can not be
122 changed at runtime.
123 """
124 return self._parent.homedir
125
126 host = ValidatedAttribute('host', default='irc.dftba.net')
127 """The server to connect to."""
128
129 host_blocks = ListAttribute('host_blocks')
130 """A list of hostmasks which Sopel should ignore.
131
132 Regular expression syntax is used"""
133
134 log_raw = ValidatedAttribute('log_raw', bool, default=True)
135 """Whether a log of raw lines as sent and received should be kept."""
136
137 logdir = FilenameAttribute('logdir', directory=True, default='logs')
138 """Directory in which to place logs."""
139
140 logging_channel = ValidatedAttribute('logging_channel', Identifier)
141 """The channel to send logging messages to."""
142
143 logging_level = ChoiceAttribute('logging_level',
144 ['CRITICAL', 'ERROR', 'WARNING', 'INFO',
145 'DEBUG'],
146 'WARNING')
147 """The lowest severity of logs to display."""
148
149 modes = ValidatedAttribute('modes', default='B')
150 """User modes to be set on connection."""
151
152 name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')
153 """The "real name" of your bot for WHOIS responses."""
154
155 nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))
156 """The nickname for the bot"""
157
158 nick_blocks = ListAttribute('nick_blocks')
159 """A list of nicks which Sopel should ignore.
160
161 Regular expression syntax is used."""
162
163 not_configured = ValidatedAttribute('not_configured', bool, default=False)
164 """For package maintainers. Not used in normal configurations.
165
166 This allows software packages to install a default config file, with this
167 set to true, so that the bot will not run until it has been properly
168 configured."""
169
170 owner = ValidatedAttribute('owner', default=NO_DEFAULT)
171 """The IRC name of the owner of the bot."""
172
173 owner_account = ValidatedAttribute('owner_account')
174 """The services account name of the owner of the bot.
175
176 This should only be set on networks which support IRCv3 account
177 capabilities.
178 """
179
180 pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')
181 """The directory in which to put the file Sopel uses to track its process ID.
182
183 You probably do not need to change this unless you're managing Sopel with
184 systemd or similar."""
185
186 port = ValidatedAttribute('port', int, default=6667)
187 """The port to connect on."""
188
189 prefix = ValidatedAttribute('prefix', default='\.')
190 """The prefix to add to the beginning of commands.
191
192 It is a regular expression (so the default, ``\.``, means commands start
193 with a period), though using capturing groups will create problems."""
194
195 reply_errors = ValidatedAttribute('reply_errors', bool, default=True)
196 """Whether to message the sender of a message that triggered an error with the exception."""
197
198 throttle_join = ValidatedAttribute('throttle_join', int)
199 """Slow down the initial join of channels to prevent getting kicked.
200
201 Sopel will only join this many channels at a time, sleeping for a second
202 between each batch. This is unnecessary on most networks."""
203
204 timeout = ValidatedAttribute('timeout', int, default=120)
205 """The amount of time acceptable between pings before timing out."""
206
207 use_ssl = ValidatedAttribute('use_ssl', bool, default=False)
208 """Whether to use a SSL secured connection."""
209
210 user = ValidatedAttribute('user', default='sopel')
211 """The "user" for your bot (the part before the @ in the hostname)."""
212
213 verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)
214 """Whether to require a trusted SSL certificate for SSL connections."""
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/config/core_section.py b/sopel/config/core_section.py
--- a/sopel/config/core_section.py
+++ b/sopel/config/core_section.py
@@ -131,7 +131,7 @@
Regular expression syntax is used"""
- log_raw = ValidatedAttribute('log_raw', bool, default=True)
+ log_raw = ValidatedAttribute('log_raw', bool, default=False)
"""Whether a log of raw lines as sent and received should be kept."""
logdir = FilenameAttribute('logdir', directory=True, default='logs')
|
{"golden_diff": "diff --git a/sopel/config/core_section.py b/sopel/config/core_section.py\n--- a/sopel/config/core_section.py\n+++ b/sopel/config/core_section.py\n@@ -131,7 +131,7 @@\n \n Regular expression syntax is used\"\"\"\n \n- log_raw = ValidatedAttribute('log_raw', bool, default=True)\n+ log_raw = ValidatedAttribute('log_raw', bool, default=False)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n \n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n", "issue": "raw logging defaults to true...necessary?\nsopel [by default](https://github.com/sopel-irc/sopel/blob/master/sopel/config/core_section.py#L134) does raw logging. \r\nYou would need to manually add log_raw = False to config file to disable it.\r\nJust wondering if having it on by default is really that necessary?\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,\n FilenameAttribute, NO_DEFAULT\n)\nfrom sopel.tools import Identifier\n\n\ndef _find_certs():\n \"\"\"\n Find the TLS root CA store.\n\n :returns: str (path to file)\n \"\"\"\n # check if the root CA store is at a known location\n locations = [\n '/etc/pki/tls/cert.pem', # best first guess\n '/etc/ssl/certs/ca-certificates.crt', # Debian\n '/etc/ssl/cert.pem', # FreeBSD base OpenSSL\n '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL\n '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora\n '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS\n '/etc/pki/tls/cacert.pem', # OpenELEC\n '/etc/ssl/ca-bundle.pem', # OpenSUSE\n ]\n for certs in locations:\n if os.path.isfile(certs):\n return certs\n return None\n\n\ndef configure(config):\n config.core.configure_setting('nick', 'Enter the nickname for your bot.')\n config.core.configure_setting('host', 'Enter the server to connect to.')\n config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')\n if config.core.use_ssl:\n default_port = 6697\n else:\n default_port = 6667\n config.core.configure_setting('port', 'Enter the port to connect on.',\n default=default_port)\n config.core.configure_setting(\n 'owner', \"Enter your own IRC name (or that of the bot's owner)\")\n config.core.configure_setting(\n 'channels',\n 'Enter the channels to connect to at startup, separated by commas.'\n )\n\n\nclass CoreSection(StaticSection):\n \"\"\"The config section used for configuring the bot itself.\"\"\"\n admins = ListAttribute('admins')\n \"\"\"The list of people (other than the owner) who can administer the bot\"\"\"\n\n admin_accounts = ListAttribute('admin_accounts')\n \"\"\"The list of accounts (other than the owner's) who can administer the bot.\n\n This should not be set for networks that do not support IRCv3 account\n capabilities.\"\"\"\n\n auth_method = ChoiceAttribute('auth_method', choices=[\n 'nickserv', 'authserv', 'Q', 'sasl', 'server'])\n \"\"\"The method to use to authenticate with the server.\n\n Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server``.\"\"\"\n\n auth_password = ValidatedAttribute('auth_password')\n \"\"\"The password to use to authenticate with the server.\"\"\"\n\n auth_target = ValidatedAttribute('auth_target')\n \"\"\"The user to use for nickserv authentication, or the SASL mechanism.\n\n May not apply, depending on ``auth_method``. Defaults to NickServ for\n nickserv auth, and PLAIN for SASL auth.\"\"\"\n\n auth_username = ValidatedAttribute('auth_username')\n \"\"\"The username/account to use to authenticate with the server.\n\n May not apply, depending on ``auth_method``.\"\"\"\n\n bind_host = ValidatedAttribute('bind_host')\n \"\"\"Bind the connection to a specific IP\"\"\"\n\n ca_certs = FilenameAttribute('ca_certs', default=_find_certs())\n \"\"\"The path of the CA certs pem file\"\"\"\n\n channels = ListAttribute('channels')\n \"\"\"List of channels for the bot to join when it connects\"\"\"\n\n db_filename = ValidatedAttribute('db_filename')\n \"\"\"The filename for Sopel's database.\"\"\"\n\n default_time_format = ValidatedAttribute('default_time_format',\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n\n default_timezone = ValidatedAttribute('default_timezone')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n\n enable = ListAttribute('enable')\n \"\"\"A whitelist of the only modules you want to enable.\"\"\"\n\n exclude = ListAttribute('exclude')\n \"\"\"A list of modules which should not be loaded.\"\"\"\n\n extra = ListAttribute('extra')\n \"\"\"A list of other directories you'd like to include modules from.\"\"\"\n\n help_prefix = ValidatedAttribute('help_prefix', default='.')\n \"\"\"The prefix to use in help\"\"\"\n\n @property\n def homedir(self):\n \"\"\"The directory in which various files are stored at runtime.\n\n By default, this is the same directory as the config. It can not be\n changed at runtime.\n \"\"\"\n return self._parent.homedir\n\n host = ValidatedAttribute('host', default='irc.dftba.net')\n \"\"\"The server to connect to.\"\"\"\n\n host_blocks = ListAttribute('host_blocks')\n \"\"\"A list of hostmasks which Sopel should ignore.\n\n Regular expression syntax is used\"\"\"\n\n log_raw = ValidatedAttribute('log_raw', bool, default=True)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n\n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n \"\"\"Directory in which to place logs.\"\"\"\n\n logging_channel = ValidatedAttribute('logging_channel', Identifier)\n \"\"\"The channel to send logging messages to.\"\"\"\n\n logging_level = ChoiceAttribute('logging_level',\n ['CRITICAL', 'ERROR', 'WARNING', 'INFO',\n 'DEBUG'],\n 'WARNING')\n \"\"\"The lowest severity of logs to display.\"\"\"\n\n modes = ValidatedAttribute('modes', default='B')\n \"\"\"User modes to be set on connection.\"\"\"\n\n name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')\n \"\"\"The \"real name\" of your bot for WHOIS responses.\"\"\"\n\n nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))\n \"\"\"The nickname for the bot\"\"\"\n\n nick_blocks = ListAttribute('nick_blocks')\n \"\"\"A list of nicks which Sopel should ignore.\n\n Regular expression syntax is used.\"\"\"\n\n not_configured = ValidatedAttribute('not_configured', bool, default=False)\n \"\"\"For package maintainers. Not used in normal configurations.\n\n This allows software packages to install a default config file, with this\n set to true, so that the bot will not run until it has been properly\n configured.\"\"\"\n\n owner = ValidatedAttribute('owner', default=NO_DEFAULT)\n \"\"\"The IRC name of the owner of the bot.\"\"\"\n\n owner_account = ValidatedAttribute('owner_account')\n \"\"\"The services account name of the owner of the bot.\n\n This should only be set on networks which support IRCv3 account\n capabilities.\n \"\"\"\n\n pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')\n \"\"\"The directory in which to put the file Sopel uses to track its process ID.\n\n You probably do not need to change this unless you're managing Sopel with\n systemd or similar.\"\"\"\n\n port = ValidatedAttribute('port', int, default=6667)\n \"\"\"The port to connect on.\"\"\"\n\n prefix = ValidatedAttribute('prefix', default='\\.')\n \"\"\"The prefix to add to the beginning of commands.\n\n It is a regular expression (so the default, ``\\.``, means commands start\n with a period), though using capturing groups will create problems.\"\"\"\n\n reply_errors = ValidatedAttribute('reply_errors', bool, default=True)\n \"\"\"Whether to message the sender of a message that triggered an error with the exception.\"\"\"\n\n throttle_join = ValidatedAttribute('throttle_join', int)\n \"\"\"Slow down the initial join of channels to prevent getting kicked.\n\n Sopel will only join this many channels at a time, sleeping for a second\n between each batch. This is unnecessary on most networks.\"\"\"\n\n timeout = ValidatedAttribute('timeout', int, default=120)\n \"\"\"The amount of time acceptable between pings before timing out.\"\"\"\n\n use_ssl = ValidatedAttribute('use_ssl', bool, default=False)\n \"\"\"Whether to use a SSL secured connection.\"\"\"\n\n user = ValidatedAttribute('user', default='sopel')\n \"\"\"The \"user\" for your bot (the part before the @ in the hostname).\"\"\"\n\n verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)\n \"\"\"Whether to require a trusted SSL certificate for SSL connections.\"\"\"\n", "path": "sopel/config/core_section.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,\n FilenameAttribute, NO_DEFAULT\n)\nfrom sopel.tools import Identifier\n\n\ndef _find_certs():\n \"\"\"\n Find the TLS root CA store.\n\n :returns: str (path to file)\n \"\"\"\n # check if the root CA store is at a known location\n locations = [\n '/etc/pki/tls/cert.pem', # best first guess\n '/etc/ssl/certs/ca-certificates.crt', # Debian\n '/etc/ssl/cert.pem', # FreeBSD base OpenSSL\n '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL\n '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora\n '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS\n '/etc/pki/tls/cacert.pem', # OpenELEC\n '/etc/ssl/ca-bundle.pem', # OpenSUSE\n ]\n for certs in locations:\n if os.path.isfile(certs):\n return certs\n return None\n\n\ndef configure(config):\n config.core.configure_setting('nick', 'Enter the nickname for your bot.')\n config.core.configure_setting('host', 'Enter the server to connect to.')\n config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')\n if config.core.use_ssl:\n default_port = 6697\n else:\n default_port = 6667\n config.core.configure_setting('port', 'Enter the port to connect on.',\n default=default_port)\n config.core.configure_setting(\n 'owner', \"Enter your own IRC name (or that of the bot's owner)\")\n config.core.configure_setting(\n 'channels',\n 'Enter the channels to connect to at startup, separated by commas.'\n )\n\n\nclass CoreSection(StaticSection):\n \"\"\"The config section used for configuring the bot itself.\"\"\"\n admins = ListAttribute('admins')\n \"\"\"The list of people (other than the owner) who can administer the bot\"\"\"\n\n admin_accounts = ListAttribute('admin_accounts')\n \"\"\"The list of accounts (other than the owner's) who can administer the bot.\n\n This should not be set for networks that do not support IRCv3 account\n capabilities.\"\"\"\n\n auth_method = ChoiceAttribute('auth_method', choices=[\n 'nickserv', 'authserv', 'Q', 'sasl', 'server'])\n \"\"\"The method to use to authenticate with the server.\n\n Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server``.\"\"\"\n\n auth_password = ValidatedAttribute('auth_password')\n \"\"\"The password to use to authenticate with the server.\"\"\"\n\n auth_target = ValidatedAttribute('auth_target')\n \"\"\"The user to use for nickserv authentication, or the SASL mechanism.\n\n May not apply, depending on ``auth_method``. Defaults to NickServ for\n nickserv auth, and PLAIN for SASL auth.\"\"\"\n\n auth_username = ValidatedAttribute('auth_username')\n \"\"\"The username/account to use to authenticate with the server.\n\n May not apply, depending on ``auth_method``.\"\"\"\n\n bind_host = ValidatedAttribute('bind_host')\n \"\"\"Bind the connection to a specific IP\"\"\"\n\n ca_certs = FilenameAttribute('ca_certs', default=_find_certs())\n \"\"\"The path of the CA certs pem file\"\"\"\n\n channels = ListAttribute('channels')\n \"\"\"List of channels for the bot to join when it connects\"\"\"\n\n db_filename = ValidatedAttribute('db_filename')\n \"\"\"The filename for Sopel's database.\"\"\"\n\n default_time_format = ValidatedAttribute('default_time_format',\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n\n default_timezone = ValidatedAttribute('default_timezone')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n\n enable = ListAttribute('enable')\n \"\"\"A whitelist of the only modules you want to enable.\"\"\"\n\n exclude = ListAttribute('exclude')\n \"\"\"A list of modules which should not be loaded.\"\"\"\n\n extra = ListAttribute('extra')\n \"\"\"A list of other directories you'd like to include modules from.\"\"\"\n\n help_prefix = ValidatedAttribute('help_prefix', default='.')\n \"\"\"The prefix to use in help\"\"\"\n\n @property\n def homedir(self):\n \"\"\"The directory in which various files are stored at runtime.\n\n By default, this is the same directory as the config. It can not be\n changed at runtime.\n \"\"\"\n return self._parent.homedir\n\n host = ValidatedAttribute('host', default='irc.dftba.net')\n \"\"\"The server to connect to.\"\"\"\n\n host_blocks = ListAttribute('host_blocks')\n \"\"\"A list of hostmasks which Sopel should ignore.\n\n Regular expression syntax is used\"\"\"\n\n log_raw = ValidatedAttribute('log_raw', bool, default=False)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n\n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n \"\"\"Directory in which to place logs.\"\"\"\n\n logging_channel = ValidatedAttribute('logging_channel', Identifier)\n \"\"\"The channel to send logging messages to.\"\"\"\n\n logging_level = ChoiceAttribute('logging_level',\n ['CRITICAL', 'ERROR', 'WARNING', 'INFO',\n 'DEBUG'],\n 'WARNING')\n \"\"\"The lowest severity of logs to display.\"\"\"\n\n modes = ValidatedAttribute('modes', default='B')\n \"\"\"User modes to be set on connection.\"\"\"\n\n name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')\n \"\"\"The \"real name\" of your bot for WHOIS responses.\"\"\"\n\n nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))\n \"\"\"The nickname for the bot\"\"\"\n\n nick_blocks = ListAttribute('nick_blocks')\n \"\"\"A list of nicks which Sopel should ignore.\n\n Regular expression syntax is used.\"\"\"\n\n not_configured = ValidatedAttribute('not_configured', bool, default=False)\n \"\"\"For package maintainers. Not used in normal configurations.\n\n This allows software packages to install a default config file, with this\n set to true, so that the bot will not run until it has been properly\n configured.\"\"\"\n\n owner = ValidatedAttribute('owner', default=NO_DEFAULT)\n \"\"\"The IRC name of the owner of the bot.\"\"\"\n\n owner_account = ValidatedAttribute('owner_account')\n \"\"\"The services account name of the owner of the bot.\n\n This should only be set on networks which support IRCv3 account\n capabilities.\n \"\"\"\n\n pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')\n \"\"\"The directory in which to put the file Sopel uses to track its process ID.\n\n You probably do not need to change this unless you're managing Sopel with\n systemd or similar.\"\"\"\n\n port = ValidatedAttribute('port', int, default=6667)\n \"\"\"The port to connect on.\"\"\"\n\n prefix = ValidatedAttribute('prefix', default='\\.')\n \"\"\"The prefix to add to the beginning of commands.\n\n It is a regular expression (so the default, ``\\.``, means commands start\n with a period), though using capturing groups will create problems.\"\"\"\n\n reply_errors = ValidatedAttribute('reply_errors', bool, default=True)\n \"\"\"Whether to message the sender of a message that triggered an error with the exception.\"\"\"\n\n throttle_join = ValidatedAttribute('throttle_join', int)\n \"\"\"Slow down the initial join of channels to prevent getting kicked.\n\n Sopel will only join this many channels at a time, sleeping for a second\n between each batch. This is unnecessary on most networks.\"\"\"\n\n timeout = ValidatedAttribute('timeout', int, default=120)\n \"\"\"The amount of time acceptable between pings before timing out.\"\"\"\n\n use_ssl = ValidatedAttribute('use_ssl', bool, default=False)\n \"\"\"Whether to use a SSL secured connection.\"\"\"\n\n user = ValidatedAttribute('user', default='sopel')\n \"\"\"The \"user\" for your bot (the part before the @ in the hostname).\"\"\"\n\n verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)\n \"\"\"Whether to require a trusted SSL certificate for SSL connections.\"\"\"\n", "path": "sopel/config/core_section.py"}]}
| 2,749 | 131 |
gh_patches_debug_32043
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-695
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 'files' to regressor features
I've removed it in 66c496bb25b69ca4b42333ee365755f49faf62fd so that I could enable feature importance for the model.
In order to add it back, we need to implement something similar to TfIdfVectorizer's min_df parameter and only consider files that are most often modified.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/regressor.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 from datetime import datetime
7
8 import dateutil.parser
9 import xgboost
10 from dateutil.relativedelta import relativedelta
11 from imblearn.under_sampling import RandomUnderSampler
12 from sklearn.compose import ColumnTransformer
13 from sklearn.feature_extraction import DictVectorizer
14 from sklearn.pipeline import Pipeline
15
16 from bugbug import commit_features, feature_cleanup, labels, repository
17 from bugbug.model import CommitModel
18
19
20 class RegressorModel(CommitModel):
21 def __init__(self, lemmatization=False):
22 CommitModel.__init__(self, lemmatization)
23
24 self.sampler = RandomUnderSampler(random_state=0)
25
26 feature_extractors = [
27 commit_features.file_size(),
28 commit_features.test_added(),
29 commit_features.added(),
30 commit_features.deleted(),
31 commit_features.test_deleted(),
32 commit_features.author_experience(),
33 commit_features.reviewer_experience(),
34 commit_features.reviewers_num(),
35 commit_features.component_touched_prev(),
36 commit_features.directory_touched_prev(),
37 commit_features.file_touched_prev(),
38 commit_features.types(),
39 commit_features.components(),
40 commit_features.components_modified_num(),
41 commit_features.directories(),
42 commit_features.directories_modified_num(),
43 commit_features.files_modified_num(),
44 ]
45
46 cleanup_functions = [
47 feature_cleanup.fileref(),
48 feature_cleanup.url(),
49 feature_cleanup.synonyms(),
50 ]
51
52 self.extraction_pipeline = Pipeline(
53 [
54 (
55 "commit_extractor",
56 commit_features.CommitExtractor(
57 feature_extractors, cleanup_functions
58 ),
59 ),
60 (
61 "union",
62 ColumnTransformer(
63 [
64 ("data", DictVectorizer(), "data"),
65 ("desc", self.text_vectorizer(min_df=0.0001), "desc"),
66 ]
67 ),
68 ),
69 ]
70 )
71
72 self.clf = xgboost.XGBClassifier(n_jobs=16)
73 self.clf.set_params(predictor="cpu_predictor")
74
75 def get_labels(self):
76 classes = {}
77
78 regressors = set(r[0] for r in labels.get_labels("regressor"))
79
80 for commit_data in repository.get_commits():
81 if commit_data["ever_backedout"]:
82 continue
83
84 node = commit_data["node"]
85 if node in regressors:
86 classes[node] = 1
87 else:
88 push_date = dateutil.parser.parse(commit_data["pushdate"])
89
90 # The labels we have are only from 2016-11-01.
91 # TODO: Automate collection of labels and somehow remove this check.
92 if push_date < datetime(2016, 11, 1):
93 continue
94
95 # We remove the last 6 months, as there could be regressions which haven't been filed yet.
96 if push_date > datetime.utcnow() - relativedelta(months=6):
97 continue
98
99 classes[node] = 0
100
101 print(
102 "{} commits caused regressions".format(
103 sum(1 for label in classes.values() if label == 1)
104 )
105 )
106
107 print(
108 "{} commits did not cause regressions".format(
109 sum(1 for label in classes.values() if label == 0)
110 )
111 )
112
113 return classes, [0, 1]
114
115 def get_feature_names(self):
116 return self.extraction_pipeline.named_steps["union"].get_feature_names()
117
```
Path: `bugbug/commit_features.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import pandas as pd
7 from sklearn.base import BaseEstimator, TransformerMixin
8
9 EXPERIENCE_TIMESPAN = 90
10 EXPERIENCE_TIMESPAN_TEXT = f"{EXPERIENCE_TIMESPAN}_days"
11 EXPERIENCE_TIMESPAN_TEXT_USER = f"{EXPERIENCE_TIMESPAN} days"
12
13
14 class files_modified_num(object):
15 name = "# of modified files"
16
17 def __call__(self, commit, **kwargs):
18 return commit["files_modified_num"]
19
20
21 class file_size(object):
22 name = "File size"
23
24 def __call__(self, commit, **kwargs):
25 return {
26 "sum": commit["total_file_size"],
27 "avg": commit["average_file_size"],
28 "max": commit["maximum_file_size"],
29 "min": commit["minimum_file_size"],
30 }
31
32
33 class added(object):
34 name = "# of lines added"
35
36 def __call__(self, commit, **kwargs):
37 return commit["added"]
38
39
40 class test_added(object):
41 name = "# of lines added in tests"
42
43 def __call__(self, commit, **kwargs):
44 return commit["test_added"]
45
46
47 class deleted(object):
48 name = "# of lines deleted"
49
50 def __call__(self, commit, **kwargs):
51 return commit["deleted"]
52
53
54 class test_deleted(object):
55 name = "# of lines deleted in tests"
56
57 def __call__(self, commit, **kwargs):
58 return commit["test_deleted"]
59
60
61 def get_exps(exp_type, commit):
62 items_key = f"{exp_type}s" if exp_type != "directory" else "directories"
63 items_num = len(commit[items_key])
64
65 return {
66 "sum": commit[f"touched_prev_total_{exp_type}_sum"],
67 "max": commit[f"touched_prev_total_{exp_type}_max"],
68 "min": commit[f"touched_prev_total_{exp_type}_min"],
69 "avg": commit[f"touched_prev_total_{exp_type}_sum"] / items_num
70 if items_num > 0
71 else 0,
72 "sum backout": commit[f"touched_prev_total_{exp_type}_backout_sum"],
73 "max backout": commit[f"touched_prev_total_{exp_type}_backout_max"],
74 "min backout": commit[f"touched_prev_total_{exp_type}_backout_min"],
75 "avg backout": commit[f"touched_prev_total_{exp_type}_backout_sum"] / items_num
76 if items_num > 0
77 else 0,
78 f"sum {EXPERIENCE_TIMESPAN_TEXT_USER}": commit[
79 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_sum"
80 ],
81 f"max {EXPERIENCE_TIMESPAN_TEXT_USER}": commit[
82 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_max"
83 ],
84 f"min {EXPERIENCE_TIMESPAN_TEXT_USER}": commit[
85 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_min"
86 ],
87 f"avg {EXPERIENCE_TIMESPAN_TEXT_USER}": commit[
88 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_sum"
89 ]
90 / items_num
91 if items_num > 0
92 else 0,
93 f"sum {EXPERIENCE_TIMESPAN_TEXT_USER} backout": commit[
94 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_sum"
95 ],
96 f"max {EXPERIENCE_TIMESPAN_TEXT_USER} backout": commit[
97 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_max"
98 ],
99 f"min {EXPERIENCE_TIMESPAN_TEXT_USER} backout": commit[
100 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_min"
101 ],
102 f"avg {EXPERIENCE_TIMESPAN_TEXT_USER} backout": commit[
103 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_sum"
104 ]
105 / items_num
106 if items_num > 0
107 else 0,
108 }
109
110
111 class author_experience(object):
112 name = "Author experience"
113
114 def __call__(self, commit, **kwargs):
115 return {
116 "total": commit["touched_prev_total_author_sum"],
117 EXPERIENCE_TIMESPAN_TEXT_USER: commit[
118 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_author_sum"
119 ],
120 "total backouts": commit["touched_prev_total_author_backout_sum"],
121 f"{EXPERIENCE_TIMESPAN_TEXT_USER} backouts": commit[
122 f"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_author_backout_sum"
123 ],
124 "seniority": commit["seniority_author"] / 86400,
125 }
126
127
128 class reviewer_experience(object):
129 name = "Reviewer experience"
130
131 def __call__(self, commit, **kwargs):
132 return get_exps("reviewer", commit)
133
134
135 class reviewers_num(object):
136 name = "# of reviewers"
137
138 def __call__(self, commit, **kwargs):
139 return len(commit["reviewers"])
140
141
142 class components(object):
143 def __call__(self, commit, **kwargs):
144 return commit["components"]
145
146
147 class components_modified_num(object):
148 name = "# of components modified"
149
150 def __call__(self, commit, **kwargs):
151 return len(commit["components"])
152
153
154 class component_touched_prev(object):
155 name = "# of times the components were touched before"
156
157 def __call__(self, commit, **kwargs):
158 return get_exps("component", commit)
159
160
161 class directories(object):
162 def __call__(self, commit, **kwargs):
163 return commit["directories"]
164
165
166 class directories_modified_num(object):
167 name = "# of directories modified"
168
169 def __call__(self, commit, **kwargs):
170 return len(commit["directories"])
171
172
173 class directory_touched_prev(object):
174 name = "# of times the directories were touched before"
175
176 def __call__(self, commit, **kwargs):
177 return get_exps("directory", commit)
178
179
180 class files(object):
181 def __call__(self, commit, **kwargs):
182 return commit["files"]
183
184
185 class file_touched_prev(object):
186 name = "# of times the files were touched before"
187
188 def __call__(self, commit, **kwargs):
189 return get_exps("file", commit)
190
191
192 class types(object):
193 name = "file types"
194
195 def __call__(self, commit, **kwargs):
196 return commit["types"]
197
198
199 class CommitExtractor(BaseEstimator, TransformerMixin):
200 def __init__(self, feature_extractors, cleanup_functions):
201 self.feature_extractors = feature_extractors
202 self.cleanup_functions = cleanup_functions
203
204 def fit(self, x, y=None):
205 return self
206
207 def transform(self, commits):
208 results = []
209
210 for commit in commits:
211 data = {}
212
213 for feature_extractor in self.feature_extractors:
214 if "bug_features" in feature_extractor.__module__:
215 if not commit["bug"]:
216 continue
217
218 res = feature_extractor(commit["bug"])
219 else:
220 res = feature_extractor(commit)
221
222 if res is None:
223 continue
224
225 if hasattr(feature_extractor, "name"):
226 feature_extractor_name = feature_extractor.name
227 else:
228 feature_extractor_name = feature_extractor.__class__.__name__
229
230 if isinstance(res, dict):
231 for key, value in res.items():
232 data[f"{feature_extractor_name} ({key})"] = value
233 continue
234
235 if isinstance(res, list):
236 for item in res:
237 data[f"{item} in {feature_extractor_name}"] = "True"
238 continue
239
240 if isinstance(res, bool):
241 res = str(res)
242
243 data[feature_extractor_name] = res
244
245 # TODO: Try simply using all possible fields instead of extracting features manually.
246
247 for cleanup_function in self.cleanup_functions:
248 commit["desc"] = cleanup_function(commit["desc"])
249
250 result = {"data": data, "desc": commit["desc"]}
251
252 results.append(result)
253
254 return pd.DataFrame(results)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bugbug/commit_features.py b/bugbug/commit_features.py
--- a/bugbug/commit_features.py
+++ b/bugbug/commit_features.py
@@ -3,6 +3,8 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
+from collections import defaultdict
+
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
@@ -178,8 +180,23 @@
class files(object):
+ def __init__(self, min_freq=0.00003):
+ self.min_freq = min_freq
+
+ def fit(self, commits):
+ self.count = defaultdict(int)
+
+ for commit in commits:
+ for f in commit["files"]:
+ self.count[f] += 1
+ self.total_files = sum(self.count.values())
+
def __call__(self, commit, **kwargs):
- return commit["files"]
+ return [
+ f
+ for f in commit["files"]
+ if (self.count[f] / self.total_files) > self.min_freq
+ ]
class file_touched_prev(object):
@@ -202,6 +219,10 @@
self.cleanup_functions = cleanup_functions
def fit(self, x, y=None):
+ for feature in self.feature_extractors:
+ if hasattr(feature, "fit"):
+ feature.fit(x)
+
return self
def transform(self, commits):
diff --git a/bugbug/models/regressor.py b/bugbug/models/regressor.py
--- a/bugbug/models/regressor.py
+++ b/bugbug/models/regressor.py
@@ -36,6 +36,7 @@
commit_features.directory_touched_prev(),
commit_features.file_touched_prev(),
commit_features.types(),
+ commit_features.files(),
commit_features.components(),
commit_features.components_modified_num(),
commit_features.directories(),
|
{"golden_diff": "diff --git a/bugbug/commit_features.py b/bugbug/commit_features.py\n--- a/bugbug/commit_features.py\n+++ b/bugbug/commit_features.py\n@@ -3,6 +3,8 @@\n # License, v. 2.0. If a copy of the MPL was not distributed with this file,\n # You can obtain one at http://mozilla.org/MPL/2.0/.\n \n+from collections import defaultdict\n+\n import pandas as pd\n from sklearn.base import BaseEstimator, TransformerMixin\n \n@@ -178,8 +180,23 @@\n \n \n class files(object):\n+ def __init__(self, min_freq=0.00003):\n+ self.min_freq = min_freq\n+\n+ def fit(self, commits):\n+ self.count = defaultdict(int)\n+\n+ for commit in commits:\n+ for f in commit[\"files\"]:\n+ self.count[f] += 1\n+ self.total_files = sum(self.count.values())\n+\n def __call__(self, commit, **kwargs):\n- return commit[\"files\"]\n+ return [\n+ f\n+ for f in commit[\"files\"]\n+ if (self.count[f] / self.total_files) > self.min_freq\n+ ]\n \n \n class file_touched_prev(object):\n@@ -202,6 +219,10 @@\n self.cleanup_functions = cleanup_functions\n \n def fit(self, x, y=None):\n+ for feature in self.feature_extractors:\n+ if hasattr(feature, \"fit\"):\n+ feature.fit(x)\n+\n return self\n \n def transform(self, commits):\ndiff --git a/bugbug/models/regressor.py b/bugbug/models/regressor.py\n--- a/bugbug/models/regressor.py\n+++ b/bugbug/models/regressor.py\n@@ -36,6 +36,7 @@\n commit_features.directory_touched_prev(),\n commit_features.file_touched_prev(),\n commit_features.types(),\n+ commit_features.files(),\n commit_features.components(),\n commit_features.components_modified_num(),\n commit_features.directories(),\n", "issue": "Add 'files' to regressor features\nI've removed it in 66c496bb25b69ca4b42333ee365755f49faf62fd so that I could enable feature importance for the model.\r\nIn order to add it back, we need to implement something similar to TfIdfVectorizer's min_df parameter and only consider files that are most often modified.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom datetime import datetime\n\nimport dateutil.parser\nimport xgboost\nfrom dateutil.relativedelta import relativedelta\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import commit_features, feature_cleanup, labels, repository\nfrom bugbug.model import CommitModel\n\n\nclass RegressorModel(CommitModel):\n def __init__(self, lemmatization=False):\n CommitModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n commit_features.file_size(),\n commit_features.test_added(),\n commit_features.added(),\n commit_features.deleted(),\n commit_features.test_deleted(),\n commit_features.author_experience(),\n commit_features.reviewer_experience(),\n commit_features.reviewers_num(),\n commit_features.component_touched_prev(),\n commit_features.directory_touched_prev(),\n commit_features.file_touched_prev(),\n commit_features.types(),\n commit_features.components(),\n commit_features.components_modified_num(),\n commit_features.directories(),\n commit_features.directories_modified_num(),\n commit_features.files_modified_num(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"commit_extractor\",\n commit_features.CommitExtractor(\n feature_extractors, cleanup_functions\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"desc\", self.text_vectorizer(min_df=0.0001), \"desc\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n regressors = set(r[0] for r in labels.get_labels(\"regressor\"))\n\n for commit_data in repository.get_commits():\n if commit_data[\"ever_backedout\"]:\n continue\n\n node = commit_data[\"node\"]\n if node in regressors:\n classes[node] = 1\n else:\n push_date = dateutil.parser.parse(commit_data[\"pushdate\"])\n\n # The labels we have are only from 2016-11-01.\n # TODO: Automate collection of labels and somehow remove this check.\n if push_date < datetime(2016, 11, 1):\n continue\n\n # We remove the last 6 months, as there could be regressions which haven't been filed yet.\n if push_date > datetime.utcnow() - relativedelta(months=6):\n continue\n\n classes[node] = 0\n\n print(\n \"{} commits caused regressions\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n print(\n \"{} commits did not cause regressions\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/regressor.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nEXPERIENCE_TIMESPAN = 90\nEXPERIENCE_TIMESPAN_TEXT = f\"{EXPERIENCE_TIMESPAN}_days\"\nEXPERIENCE_TIMESPAN_TEXT_USER = f\"{EXPERIENCE_TIMESPAN} days\"\n\n\nclass files_modified_num(object):\n name = \"# of modified files\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"files_modified_num\"]\n\n\nclass file_size(object):\n name = \"File size\"\n\n def __call__(self, commit, **kwargs):\n return {\n \"sum\": commit[\"total_file_size\"],\n \"avg\": commit[\"average_file_size\"],\n \"max\": commit[\"maximum_file_size\"],\n \"min\": commit[\"minimum_file_size\"],\n }\n\n\nclass added(object):\n name = \"# of lines added\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"added\"]\n\n\nclass test_added(object):\n name = \"# of lines added in tests\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"test_added\"]\n\n\nclass deleted(object):\n name = \"# of lines deleted\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"deleted\"]\n\n\nclass test_deleted(object):\n name = \"# of lines deleted in tests\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"test_deleted\"]\n\n\ndef get_exps(exp_type, commit):\n items_key = f\"{exp_type}s\" if exp_type != \"directory\" else \"directories\"\n items_num = len(commit[items_key])\n\n return {\n \"sum\": commit[f\"touched_prev_total_{exp_type}_sum\"],\n \"max\": commit[f\"touched_prev_total_{exp_type}_max\"],\n \"min\": commit[f\"touched_prev_total_{exp_type}_min\"],\n \"avg\": commit[f\"touched_prev_total_{exp_type}_sum\"] / items_num\n if items_num > 0\n else 0,\n \"sum backout\": commit[f\"touched_prev_total_{exp_type}_backout_sum\"],\n \"max backout\": commit[f\"touched_prev_total_{exp_type}_backout_max\"],\n \"min backout\": commit[f\"touched_prev_total_{exp_type}_backout_min\"],\n \"avg backout\": commit[f\"touched_prev_total_{exp_type}_backout_sum\"] / items_num\n if items_num > 0\n else 0,\n f\"sum {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_sum\"\n ],\n f\"max {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_max\"\n ],\n f\"min {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_min\"\n ],\n f\"avg {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_sum\"\n ]\n / items_num\n if items_num > 0\n else 0,\n f\"sum {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_sum\"\n ],\n f\"max {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_max\"\n ],\n f\"min {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_min\"\n ],\n f\"avg {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_sum\"\n ]\n / items_num\n if items_num > 0\n else 0,\n }\n\n\nclass author_experience(object):\n name = \"Author experience\"\n\n def __call__(self, commit, **kwargs):\n return {\n \"total\": commit[\"touched_prev_total_author_sum\"],\n EXPERIENCE_TIMESPAN_TEXT_USER: commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_author_sum\"\n ],\n \"total backouts\": commit[\"touched_prev_total_author_backout_sum\"],\n f\"{EXPERIENCE_TIMESPAN_TEXT_USER} backouts\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_author_backout_sum\"\n ],\n \"seniority\": commit[\"seniority_author\"] / 86400,\n }\n\n\nclass reviewer_experience(object):\n name = \"Reviewer experience\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"reviewer\", commit)\n\n\nclass reviewers_num(object):\n name = \"# of reviewers\"\n\n def __call__(self, commit, **kwargs):\n return len(commit[\"reviewers\"])\n\n\nclass components(object):\n def __call__(self, commit, **kwargs):\n return commit[\"components\"]\n\n\nclass components_modified_num(object):\n name = \"# of components modified\"\n\n def __call__(self, commit, **kwargs):\n return len(commit[\"components\"])\n\n\nclass component_touched_prev(object):\n name = \"# of times the components were touched before\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"component\", commit)\n\n\nclass directories(object):\n def __call__(self, commit, **kwargs):\n return commit[\"directories\"]\n\n\nclass directories_modified_num(object):\n name = \"# of directories modified\"\n\n def __call__(self, commit, **kwargs):\n return len(commit[\"directories\"])\n\n\nclass directory_touched_prev(object):\n name = \"# of times the directories were touched before\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"directory\", commit)\n\n\nclass files(object):\n def __call__(self, commit, **kwargs):\n return commit[\"files\"]\n\n\nclass file_touched_prev(object):\n name = \"# of times the files were touched before\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"file\", commit)\n\n\nclass types(object):\n name = \"file types\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"types\"]\n\n\nclass CommitExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, commits):\n results = []\n\n for commit in commits:\n data = {}\n\n for feature_extractor in self.feature_extractors:\n if \"bug_features\" in feature_extractor.__module__:\n if not commit[\"bug\"]:\n continue\n\n res = feature_extractor(commit[\"bug\"])\n else:\n res = feature_extractor(commit)\n\n if res is None:\n continue\n\n if hasattr(feature_extractor, \"name\"):\n feature_extractor_name = feature_extractor.name\n else:\n feature_extractor_name = feature_extractor.__class__.__name__\n\n if isinstance(res, dict):\n for key, value in res.items():\n data[f\"{feature_extractor_name} ({key})\"] = value\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f\"{item} in {feature_extractor_name}\"] = \"True\"\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[feature_extractor_name] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n commit[\"desc\"] = cleanup_function(commit[\"desc\"])\n\n result = {\"data\": data, \"desc\": commit[\"desc\"]}\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/commit_features.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom datetime import datetime\n\nimport dateutil.parser\nimport xgboost\nfrom dateutil.relativedelta import relativedelta\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import commit_features, feature_cleanup, labels, repository\nfrom bugbug.model import CommitModel\n\n\nclass RegressorModel(CommitModel):\n def __init__(self, lemmatization=False):\n CommitModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n commit_features.file_size(),\n commit_features.test_added(),\n commit_features.added(),\n commit_features.deleted(),\n commit_features.test_deleted(),\n commit_features.author_experience(),\n commit_features.reviewer_experience(),\n commit_features.reviewers_num(),\n commit_features.component_touched_prev(),\n commit_features.directory_touched_prev(),\n commit_features.file_touched_prev(),\n commit_features.types(),\n commit_features.files(),\n commit_features.components(),\n commit_features.components_modified_num(),\n commit_features.directories(),\n commit_features.directories_modified_num(),\n commit_features.files_modified_num(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"commit_extractor\",\n commit_features.CommitExtractor(\n feature_extractors, cleanup_functions\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"desc\", self.text_vectorizer(min_df=0.0001), \"desc\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n regressors = set(r[0] for r in labels.get_labels(\"regressor\"))\n\n for commit_data in repository.get_commits():\n if commit_data[\"ever_backedout\"]:\n continue\n\n node = commit_data[\"node\"]\n if node in regressors:\n classes[node] = 1\n else:\n push_date = dateutil.parser.parse(commit_data[\"pushdate\"])\n\n # The labels we have are only from 2016-11-01.\n # TODO: Automate collection of labels and somehow remove this check.\n if push_date < datetime(2016, 11, 1):\n continue\n\n # We remove the last 6 months, as there could be regressions which haven't been filed yet.\n if push_date > datetime.utcnow() - relativedelta(months=6):\n continue\n\n classes[node] = 0\n\n print(\n \"{} commits caused regressions\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n print(\n \"{} commits did not cause regressions\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/regressor.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom collections import defaultdict\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nEXPERIENCE_TIMESPAN = 90\nEXPERIENCE_TIMESPAN_TEXT = f\"{EXPERIENCE_TIMESPAN}_days\"\nEXPERIENCE_TIMESPAN_TEXT_USER = f\"{EXPERIENCE_TIMESPAN} days\"\n\n\nclass files_modified_num(object):\n name = \"# of modified files\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"files_modified_num\"]\n\n\nclass file_size(object):\n name = \"File size\"\n\n def __call__(self, commit, **kwargs):\n return {\n \"sum\": commit[\"total_file_size\"],\n \"avg\": commit[\"average_file_size\"],\n \"max\": commit[\"maximum_file_size\"],\n \"min\": commit[\"minimum_file_size\"],\n }\n\n\nclass added(object):\n name = \"# of lines added\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"added\"]\n\n\nclass test_added(object):\n name = \"# of lines added in tests\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"test_added\"]\n\n\nclass deleted(object):\n name = \"# of lines deleted\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"deleted\"]\n\n\nclass test_deleted(object):\n name = \"# of lines deleted in tests\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"test_deleted\"]\n\n\ndef get_exps(exp_type, commit):\n items_key = f\"{exp_type}s\" if exp_type != \"directory\" else \"directories\"\n items_num = len(commit[items_key])\n\n return {\n \"sum\": commit[f\"touched_prev_total_{exp_type}_sum\"],\n \"max\": commit[f\"touched_prev_total_{exp_type}_max\"],\n \"min\": commit[f\"touched_prev_total_{exp_type}_min\"],\n \"avg\": commit[f\"touched_prev_total_{exp_type}_sum\"] / items_num\n if items_num > 0\n else 0,\n \"sum backout\": commit[f\"touched_prev_total_{exp_type}_backout_sum\"],\n \"max backout\": commit[f\"touched_prev_total_{exp_type}_backout_max\"],\n \"min backout\": commit[f\"touched_prev_total_{exp_type}_backout_min\"],\n \"avg backout\": commit[f\"touched_prev_total_{exp_type}_backout_sum\"] / items_num\n if items_num > 0\n else 0,\n f\"sum {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_sum\"\n ],\n f\"max {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_max\"\n ],\n f\"min {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_min\"\n ],\n f\"avg {EXPERIENCE_TIMESPAN_TEXT_USER}\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_sum\"\n ]\n / items_num\n if items_num > 0\n else 0,\n f\"sum {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_sum\"\n ],\n f\"max {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_max\"\n ],\n f\"min {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_min\"\n ],\n f\"avg {EXPERIENCE_TIMESPAN_TEXT_USER} backout\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_{exp_type}_backout_sum\"\n ]\n / items_num\n if items_num > 0\n else 0,\n }\n\n\nclass author_experience(object):\n name = \"Author experience\"\n\n def __call__(self, commit, **kwargs):\n return {\n \"total\": commit[\"touched_prev_total_author_sum\"],\n EXPERIENCE_TIMESPAN_TEXT_USER: commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_author_sum\"\n ],\n \"total backouts\": commit[\"touched_prev_total_author_backout_sum\"],\n f\"{EXPERIENCE_TIMESPAN_TEXT_USER} backouts\": commit[\n f\"touched_prev_{EXPERIENCE_TIMESPAN_TEXT}_author_backout_sum\"\n ],\n \"seniority\": commit[\"seniority_author\"] / 86400,\n }\n\n\nclass reviewer_experience(object):\n name = \"Reviewer experience\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"reviewer\", commit)\n\n\nclass reviewers_num(object):\n name = \"# of reviewers\"\n\n def __call__(self, commit, **kwargs):\n return len(commit[\"reviewers\"])\n\n\nclass components(object):\n def __call__(self, commit, **kwargs):\n return commit[\"components\"]\n\n\nclass components_modified_num(object):\n name = \"# of components modified\"\n\n def __call__(self, commit, **kwargs):\n return len(commit[\"components\"])\n\n\nclass component_touched_prev(object):\n name = \"# of times the components were touched before\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"component\", commit)\n\n\nclass directories(object):\n def __call__(self, commit, **kwargs):\n return commit[\"directories\"]\n\n\nclass directories_modified_num(object):\n name = \"# of directories modified\"\n\n def __call__(self, commit, **kwargs):\n return len(commit[\"directories\"])\n\n\nclass directory_touched_prev(object):\n name = \"# of times the directories were touched before\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"directory\", commit)\n\n\nclass files(object):\n def __init__(self, min_freq=0.00003):\n self.min_freq = min_freq\n\n def fit(self, commits):\n self.count = defaultdict(int)\n\n for commit in commits:\n for f in commit[\"files\"]:\n self.count[f] += 1\n self.total_files = sum(self.count.values())\n\n def __call__(self, commit, **kwargs):\n return [\n f\n for f in commit[\"files\"]\n if (self.count[f] / self.total_files) > self.min_freq\n ]\n\n\nclass file_touched_prev(object):\n name = \"# of times the files were touched before\"\n\n def __call__(self, commit, **kwargs):\n return get_exps(\"file\", commit)\n\n\nclass types(object):\n name = \"file types\"\n\n def __call__(self, commit, **kwargs):\n return commit[\"types\"]\n\n\nclass CommitExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n\n def fit(self, x, y=None):\n for feature in self.feature_extractors:\n if hasattr(feature, \"fit\"):\n feature.fit(x)\n\n return self\n\n def transform(self, commits):\n results = []\n\n for commit in commits:\n data = {}\n\n for feature_extractor in self.feature_extractors:\n if \"bug_features\" in feature_extractor.__module__:\n if not commit[\"bug\"]:\n continue\n\n res = feature_extractor(commit[\"bug\"])\n else:\n res = feature_extractor(commit)\n\n if res is None:\n continue\n\n if hasattr(feature_extractor, \"name\"):\n feature_extractor_name = feature_extractor.name\n else:\n feature_extractor_name = feature_extractor.__class__.__name__\n\n if isinstance(res, dict):\n for key, value in res.items():\n data[f\"{feature_extractor_name} ({key})\"] = value\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f\"{item} in {feature_extractor_name}\"] = \"True\"\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[feature_extractor_name] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n commit[\"desc\"] = cleanup_function(commit[\"desc\"])\n\n result = {\"data\": data, \"desc\": commit[\"desc\"]}\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/commit_features.py"}]}
| 3,892 | 453 |
gh_patches_debug_40251
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-685
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows: Node Support
This involves solving this ticket: https://github.com/ekalinin/nodeenv/issues/53
I've already started some work on this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/node.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os
5 import sys
6
7 from pre_commit.envcontext import envcontext
8 from pre_commit.envcontext import Var
9 from pre_commit.languages import helpers
10 from pre_commit.util import clean_path_on_failure
11 from pre_commit.util import cmd_output
12 from pre_commit.xargs import xargs
13
14
15 ENVIRONMENT_DIR = 'node_env'
16 get_default_version = helpers.basic_get_default_version
17 healthy = helpers.basic_healthy
18
19
20 def get_env_patch(venv): # pragma: windows no cover
21 if sys.platform == 'cygwin': # pragma: no cover
22 _, win_venv, _ = cmd_output('cygpath', '-w', venv)
23 install_prefix = r'{}\bin'.format(win_venv.strip())
24 else:
25 install_prefix = venv
26 return (
27 ('NODE_VIRTUAL_ENV', venv),
28 ('NPM_CONFIG_PREFIX', install_prefix),
29 ('npm_config_prefix', install_prefix),
30 ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
31 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
32 )
33
34
35 @contextlib.contextmanager
36 def in_env(prefix, language_version): # pragma: windows no cover
37 envdir = prefix.path(
38 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
39 )
40 with envcontext(get_env_patch(envdir)):
41 yield
42
43
44 def install_environment(
45 prefix, version, additional_dependencies,
46 ): # pragma: windows no cover
47 additional_dependencies = tuple(additional_dependencies)
48 assert prefix.exists('package.json')
49 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
50
51 env_dir = prefix.path(directory)
52 with clean_path_on_failure(env_dir):
53 cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', env_dir]
54 if version != 'default':
55 cmd.extend(['-n', version])
56 cmd_output(*cmd)
57
58 with in_env(prefix, version):
59 helpers.run_setup_cmd(
60 prefix,
61 ('npm', 'install', '-g', '.') + additional_dependencies,
62 )
63
64
65 def run_hook(prefix, hook, file_args): # pragma: windows no cover
66 with in_env(prefix, hook['language_version']):
67 return xargs(helpers.to_cmd(hook), file_args)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -7,6 +7,7 @@
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.languages import helpers
+from pre_commit.languages.python import bin_dir
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output
from pre_commit.xargs import xargs
@@ -17,10 +18,17 @@
healthy = helpers.basic_healthy
-def get_env_patch(venv): # pragma: windows no cover
+def _envdir(prefix, version):
+ directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
+ return prefix.path(directory)
+
+
+def get_env_patch(venv):
if sys.platform == 'cygwin': # pragma: no cover
_, win_venv, _ = cmd_output('cygpath', '-w', venv)
install_prefix = r'{}\bin'.format(win_venv.strip())
+ elif sys.platform == 'win32': # pragma: no cover
+ install_prefix = bin_dir(venv)
else:
install_prefix = venv
return (
@@ -28,29 +36,26 @@
('NPM_CONFIG_PREFIX', install_prefix),
('npm_config_prefix', install_prefix),
('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
- ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
+ ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
)
@contextlib.contextmanager
-def in_env(prefix, language_version): # pragma: windows no cover
- envdir = prefix.path(
- helpers.environment_dir(ENVIRONMENT_DIR, language_version),
- )
- with envcontext(get_env_patch(envdir)):
+def in_env(prefix, language_version):
+ with envcontext(get_env_patch(_envdir(prefix, language_version))):
yield
-def install_environment(
- prefix, version, additional_dependencies,
-): # pragma: windows no cover
+def install_environment(prefix, version, additional_dependencies):
additional_dependencies = tuple(additional_dependencies)
assert prefix.exists('package.json')
- directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
+ envdir = _envdir(prefix, version)
- env_dir = prefix.path(directory)
- with clean_path_on_failure(env_dir):
- cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', env_dir]
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath
+ if sys.platform == 'win32': # pragma: no cover
+ envdir = '\\\\?\\' + os.path.normpath(envdir)
+ with clean_path_on_failure(envdir):
+ cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', envdir]
if version != 'default':
cmd.extend(['-n', version])
cmd_output(*cmd)
@@ -62,6 +67,6 @@
)
-def run_hook(prefix, hook, file_args): # pragma: windows no cover
+def run_hook(prefix, hook, file_args):
with in_env(prefix, hook['language_version']):
return xargs(helpers.to_cmd(hook), file_args)
|
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -7,6 +7,7 @@\n from pre_commit.envcontext import envcontext\n from pre_commit.envcontext import Var\n from pre_commit.languages import helpers\n+from pre_commit.languages.python import bin_dir\n from pre_commit.util import clean_path_on_failure\n from pre_commit.util import cmd_output\n from pre_commit.xargs import xargs\n@@ -17,10 +18,17 @@\n healthy = helpers.basic_healthy\n \n \n-def get_env_patch(venv): # pragma: windows no cover\n+def _envdir(prefix, version):\n+ directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n+ return prefix.path(directory)\n+\n+\n+def get_env_patch(venv):\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = r'{}\\bin'.format(win_venv.strip())\n+ elif sys.platform == 'win32': # pragma: no cover\n+ install_prefix = bin_dir(venv)\n else:\n install_prefix = venv\n return (\n@@ -28,29 +36,26 @@\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n- ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n+ ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n \n \n @contextlib.contextmanager\n-def in_env(prefix, language_version): # pragma: windows no cover\n- envdir = prefix.path(\n- helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n- )\n- with envcontext(get_env_patch(envdir)):\n+def in_env(prefix, language_version):\n+ with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n \n \n-def install_environment(\n- prefix, version, additional_dependencies,\n-): # pragma: windows no cover\n+def install_environment(prefix, version, additional_dependencies):\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n- directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n+ envdir = _envdir(prefix, version)\n \n- env_dir = prefix.path(directory)\n- with clean_path_on_failure(env_dir):\n- cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', env_dir]\n+ # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n+ if sys.platform == 'win32': # pragma: no cover\n+ envdir = '\\\\\\\\?\\\\' + os.path.normpath(envdir)\n+ with clean_path_on_failure(envdir):\n+ cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', envdir]\n if version != 'default':\n cmd.extend(['-n', version])\n cmd_output(*cmd)\n@@ -62,6 +67,6 @@\n )\n \n \n-def run_hook(prefix, hook, file_args): # pragma: windows no cover\n+def run_hook(prefix, hook, file_args):\n with in_env(prefix, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "issue": "Windows: Node Support\nThis involves solving this ticket: https://github.com/ekalinin/nodeenv/issues/53\n\nI've already started some work on this\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv): # pragma: windows no cover\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = r'{}\\bin'.format(win_venv.strip())\n else:\n install_prefix = venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix, language_version): # pragma: windows no cover\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix, version, additional_dependencies,\n): # pragma: windows no cover\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', env_dir]\n if version != 'default':\n cmd.extend(['-n', version])\n cmd_output(*cmd)\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(prefix, hook, file_args): # pragma: windows no cover\n with in_env(prefix, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef _envdir(prefix, version):\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv):\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = r'{}\\bin'.format(win_venv.strip())\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n else:\n install_prefix = venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix, language_version):\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(prefix, version, additional_dependencies):\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = '\\\\\\\\?\\\\' + os.path.normpath(envdir)\n with clean_path_on_failure(envdir):\n cmd = [sys.executable, '-m', 'nodeenv', '--prebuilt', envdir]\n if version != 'default':\n cmd.extend(['-n', version])\n cmd_output(*cmd)\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(prefix, hook, file_args):\n with in_env(prefix, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}]}
| 932 | 805 |
gh_patches_debug_18004
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-1381
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test with Ray 0.8.4 and update version
Ray 0.8.4 was released: https://github.com/ray-project/ray/tree/ray-0.8.4, we should test performance and update version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import versioneer
3 import os
4 from setuptools.dist import Distribution
5
6 try:
7 from wheel.bdist_wheel import bdist_wheel
8
9 HAS_WHEEL = True
10 except ImportError:
11 HAS_WHEEL = False
12
13 with open("README.md", "r") as fh:
14 long_description = fh.read()
15
16 if HAS_WHEEL:
17
18 class ModinWheel(bdist_wheel):
19 def finalize_options(self):
20 bdist_wheel.finalize_options(self)
21 self.root_is_pure = False
22
23 def get_tag(self):
24 _, _, plat = bdist_wheel.get_tag(self)
25 py = "py3"
26 abi = "none"
27 return py, abi, plat
28
29
30 class ModinDistribution(Distribution):
31 def __init__(self, *attrs):
32 Distribution.__init__(self, *attrs)
33 if HAS_WHEEL:
34 self.cmdclass["bdist_wheel"] = ModinWheel
35
36 def is_pure(self):
37 return False
38
39
40 dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"]
41 ray_deps = ["ray==0.8.3", "pyarrow<0.17"]
42 if "SETUP_PLAT_NAME" in os.environ:
43 if "win" in os.environ["SETUP_PLAT_NAME"]:
44 all_deps = dask_deps
45 else:
46 all_deps = dask_deps + ray_deps
47 else:
48 all_deps = dask_deps if os.name == "nt" else dask_deps + ray_deps
49
50 setup(
51 name="modin",
52 version=versioneer.get_version(),
53 cmdclass=versioneer.get_cmdclass(),
54 distclass=ModinDistribution,
55 description="Modin: Make your pandas code run faster by changing one line of code.",
56 packages=find_packages(),
57 license="Apache 2",
58 url="https://github.com/modin-project/modin",
59 long_description=long_description,
60 long_description_content_type="text/markdown",
61 install_requires=["pandas==1.0.3", "packaging"],
62 extras_require={
63 # can be installed by pip install modin[dask]
64 "dask": dask_deps,
65 "ray": ray_deps,
66 "all": all_deps,
67 },
68 python_requires=">=3.5",
69 )
70
```
Path: `modin/__init__.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import os
15 import sys
16 import warnings
17 from packaging import version
18
19 from ._version import get_versions
20
21
22 def custom_formatwarning(msg, category, *args, **kwargs):
23 # ignore everything except the message
24 return "{}: {}\n".format(category.__name__, msg)
25
26
27 warnings.formatwarning = custom_formatwarning
28 # Filter numpy version warnings because they are not relevant
29 warnings.filterwarnings("ignore", message="numpy.dtype size changed")
30 warnings.filterwarnings("ignore", message="Large object of size")
31 warnings.filterwarnings(
32 "ignore",
33 message="The pandas.datetime class is deprecated and will be removed from pandas in a future version. "
34 "Import from datetime module instead.",
35 )
36
37
38 def get_execution_engine():
39 # In the future, when there are multiple engines and different ways of
40 # backing the DataFrame, there will have to be some changed logic here to
41 # decide these things. In the meantime, we will use the currently supported
42 # execution engine + backing (Pandas + Ray).
43 if "MODIN_ENGINE" in os.environ:
44 # .title allows variants like ray, RAY, Ray
45 return os.environ["MODIN_ENGINE"].title()
46 else:
47 if "MODIN_DEBUG" in os.environ:
48 return "Python"
49 else:
50 if sys.platform != "win32":
51 try:
52 import ray
53
54 except ImportError:
55 pass
56 else:
57 if version.parse(ray.__version__) != version.parse("0.8.3"):
58 raise ImportError(
59 "Please `pip install modin[ray]` to install compatible Ray version."
60 )
61 return "Ray"
62 try:
63 import dask
64 import distributed
65
66 except ImportError:
67 raise ImportError(
68 "Please `pip install {}modin[dask]` to install an engine".format(
69 "modin[ray]` or `" if sys.platform != "win32" else ""
70 )
71 )
72 else:
73 if version.parse(dask.__version__) < version.parse(
74 "2.1.0"
75 ) or version.parse(distributed.__version__) < version.parse("2.3.2"):
76 raise ImportError(
77 "Please `pip install modin[dask]` to install compatible Dask version."
78 )
79 return "Dask"
80
81
82 def get_partition_format():
83 # See note above about engine + backing.
84 return os.environ.get("MODIN_BACKEND", "Pandas").title()
85
86
87 __version__ = "0.6.3"
88 __execution_engine__ = get_execution_engine()
89 __partition_format__ = get_partition_format()
90
91 # We don't want these used outside of this file.
92 del get_execution_engine
93 del get_partition_format
94
95 __version__ = get_versions()["version"]
96 del get_versions
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modin/__init__.py b/modin/__init__.py
--- a/modin/__init__.py
+++ b/modin/__init__.py
@@ -54,7 +54,7 @@
except ImportError:
pass
else:
- if version.parse(ray.__version__) != version.parse("0.8.3"):
+ if version.parse(ray.__version__) != version.parse("0.8.4"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,7 +38,7 @@
dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"]
-ray_deps = ["ray==0.8.3", "pyarrow<0.17"]
+ray_deps = ["ray==0.8.4", "pyarrow<0.17"]
if "SETUP_PLAT_NAME" in os.environ:
if "win" in os.environ["SETUP_PLAT_NAME"]:
all_deps = dask_deps
|
{"golden_diff": "diff --git a/modin/__init__.py b/modin/__init__.py\n--- a/modin/__init__.py\n+++ b/modin/__init__.py\n@@ -54,7 +54,7 @@\n except ImportError:\n pass\n else:\n- if version.parse(ray.__version__) != version.parse(\"0.8.3\"):\n+ if version.parse(ray.__version__) != version.parse(\"0.8.4\"):\n raise ImportError(\n \"Please `pip install modin[ray]` to install compatible Ray version.\"\n )\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,7 +38,7 @@\n \n \n dask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\n-ray_deps = [\"ray==0.8.3\", \"pyarrow<0.17\"]\n+ray_deps = [\"ray==0.8.4\", \"pyarrow<0.17\"]\n if \"SETUP_PLAT_NAME\" in os.environ:\n if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n all_deps = dask_deps\n", "issue": "Test with Ray 0.8.4 and update version\nRay 0.8.4 was released: https://github.com/ray-project/ray/tree/ray-0.8.4, we should test performance and update version.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport os\nfrom setuptools.dist import Distribution\n\ntry:\n from wheel.bdist_wheel import bdist_wheel\n\n HAS_WHEEL = True\nexcept ImportError:\n HAS_WHEEL = False\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nif HAS_WHEEL:\n\n class ModinWheel(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n self.root_is_pure = False\n\n def get_tag(self):\n _, _, plat = bdist_wheel.get_tag(self)\n py = \"py3\"\n abi = \"none\"\n return py, abi, plat\n\n\nclass ModinDistribution(Distribution):\n def __init__(self, *attrs):\n Distribution.__init__(self, *attrs)\n if HAS_WHEEL:\n self.cmdclass[\"bdist_wheel\"] = ModinWheel\n\n def is_pure(self):\n return False\n\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.3\", \"pyarrow<0.17\"]\nif \"SETUP_PLAT_NAME\" in os.environ:\n if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n all_deps = dask_deps\n else:\n all_deps = dask_deps + ray_deps\nelse:\n all_deps = dask_deps if os.name == \"nt\" else dask_deps + ray_deps\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n distclass=ModinDistribution,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport sys\nimport warnings\nfrom packaging import version\n\nfrom ._version import get_versions\n\n\ndef custom_formatwarning(msg, category, *args, **kwargs):\n # ignore everything except the message\n return \"{}: {}\\n\".format(category.__name__, msg)\n\n\nwarnings.formatwarning = custom_formatwarning\n# Filter numpy version warnings because they are not relevant\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"Large object of size\")\nwarnings.filterwarnings(\n \"ignore\",\n message=\"The pandas.datetime class is deprecated and will be removed from pandas in a future version. \"\n \"Import from datetime module instead.\",\n)\n\n\ndef get_execution_engine():\n # In the future, when there are multiple engines and different ways of\n # backing the DataFrame, there will have to be some changed logic here to\n # decide these things. In the meantime, we will use the currently supported\n # execution engine + backing (Pandas + Ray).\n if \"MODIN_ENGINE\" in os.environ:\n # .title allows variants like ray, RAY, Ray\n return os.environ[\"MODIN_ENGINE\"].title()\n else:\n if \"MODIN_DEBUG\" in os.environ:\n return \"Python\"\n else:\n if sys.platform != \"win32\":\n try:\n import ray\n\n except ImportError:\n pass\n else:\n if version.parse(ray.__version__) != version.parse(\"0.8.3\"):\n raise ImportError(\n \"Please `pip install modin[ray]` to install compatible Ray version.\"\n )\n return \"Ray\"\n try:\n import dask\n import distributed\n\n except ImportError:\n raise ImportError(\n \"Please `pip install {}modin[dask]` to install an engine\".format(\n \"modin[ray]` or `\" if sys.platform != \"win32\" else \"\"\n )\n )\n else:\n if version.parse(dask.__version__) < version.parse(\n \"2.1.0\"\n ) or version.parse(distributed.__version__) < version.parse(\"2.3.2\"):\n raise ImportError(\n \"Please `pip install modin[dask]` to install compatible Dask version.\"\n )\n return \"Dask\"\n\n\ndef get_partition_format():\n # See note above about engine + backing.\n return os.environ.get(\"MODIN_BACKEND\", \"Pandas\").title()\n\n\n__version__ = \"0.6.3\"\n__execution_engine__ = get_execution_engine()\n__partition_format__ = get_partition_format()\n\n# We don't want these used outside of this file.\ndel get_execution_engine\ndel get_partition_format\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n", "path": "modin/__init__.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport os\nfrom setuptools.dist import Distribution\n\ntry:\n from wheel.bdist_wheel import bdist_wheel\n\n HAS_WHEEL = True\nexcept ImportError:\n HAS_WHEEL = False\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nif HAS_WHEEL:\n\n class ModinWheel(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n self.root_is_pure = False\n\n def get_tag(self):\n _, _, plat = bdist_wheel.get_tag(self)\n py = \"py3\"\n abi = \"none\"\n return py, abi, plat\n\n\nclass ModinDistribution(Distribution):\n def __init__(self, *attrs):\n Distribution.__init__(self, *attrs)\n if HAS_WHEEL:\n self.cmdclass[\"bdist_wheel\"] = ModinWheel\n\n def is_pure(self):\n return False\n\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.4\", \"pyarrow<0.17\"]\nif \"SETUP_PLAT_NAME\" in os.environ:\n if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n all_deps = dask_deps\n else:\n all_deps = dask_deps + ray_deps\nelse:\n all_deps = dask_deps if os.name == \"nt\" else dask_deps + ray_deps\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n distclass=ModinDistribution,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport os\nimport sys\nimport warnings\nfrom packaging import version\n\nfrom ._version import get_versions\n\n\ndef custom_formatwarning(msg, category, *args, **kwargs):\n # ignore everything except the message\n return \"{}: {}\\n\".format(category.__name__, msg)\n\n\nwarnings.formatwarning = custom_formatwarning\n# Filter numpy version warnings because they are not relevant\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"Large object of size\")\nwarnings.filterwarnings(\n \"ignore\",\n message=\"The pandas.datetime class is deprecated and will be removed from pandas in a future version. \"\n \"Import from datetime module instead.\",\n)\n\n\ndef get_execution_engine():\n # In the future, when there are multiple engines and different ways of\n # backing the DataFrame, there will have to be some changed logic here to\n # decide these things. In the meantime, we will use the currently supported\n # execution engine + backing (Pandas + Ray).\n if \"MODIN_ENGINE\" in os.environ:\n # .title allows variants like ray, RAY, Ray\n return os.environ[\"MODIN_ENGINE\"].title()\n else:\n if \"MODIN_DEBUG\" in os.environ:\n return \"Python\"\n else:\n if sys.platform != \"win32\":\n try:\n import ray\n\n except ImportError:\n pass\n else:\n if version.parse(ray.__version__) != version.parse(\"0.8.4\"):\n raise ImportError(\n \"Please `pip install modin[ray]` to install compatible Ray version.\"\n )\n return \"Ray\"\n try:\n import dask\n import distributed\n\n except ImportError:\n raise ImportError(\n \"Please `pip install {}modin[dask]` to install an engine\".format(\n \"modin[ray]` or `\" if sys.platform != \"win32\" else \"\"\n )\n )\n else:\n if version.parse(dask.__version__) < version.parse(\n \"2.1.0\"\n ) or version.parse(distributed.__version__) < version.parse(\"2.3.2\"):\n raise ImportError(\n \"Please `pip install modin[dask]` to install compatible Dask version.\"\n )\n return \"Dask\"\n\n\ndef get_partition_format():\n # See note above about engine + backing.\n return os.environ.get(\"MODIN_BACKEND\", \"Pandas\").title()\n\n\n__version__ = \"0.6.3\"\n__execution_engine__ = get_execution_engine()\n__partition_format__ = get_partition_format()\n\n# We don't want these used outside of this file.\ndel get_execution_engine\ndel get_partition_format\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n", "path": "modin/__init__.py"}]}
| 1,891 | 254 |
gh_patches_debug_38834
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-658
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove conjugate in Cholesky decomposition which effects performance heavily
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
In the tile of Cholesky decomposition, we do a conjugate after transpose which is unnecessary and effect performance heavily.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/tensor/linalg/cholesky.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import numpy as np
18 from numpy.linalg import LinAlgError
19
20 from ...serialize import KeyField, BoolField
21 from ... import opcodes as OperandDef
22 from ..operands import TensorHasInput, TensorOperandMixin
23 from ..datasource import tensor as astensor
24 from ..core import TensorOrder
25 from ..array_utils import as_same_device, device
26
27
28 def _H(chunk):
29 from ..base.transpose import TensorTranspose
30 from ..arithmetic.conj import TensorConj
31
32 trans_op = TensorTranspose(dtype=chunk.dtype)
33 c = trans_op.new_chunk([chunk], shape=chunk.shape[::-1], index=chunk.index[::-1])
34 conj_op = TensorConj(dtype=c.dtype)
35 return conj_op.new_chunk([c], shape=c.shape, index=c.index)
36
37
38 class TensorCholesky(TensorHasInput, TensorOperandMixin):
39 _op_type_ = OperandDef.CHOLESKY
40
41 _input = KeyField('input')
42 _lower = BoolField('lower')
43
44 def __init__(self, lower=None, dtype=None, **kw):
45 super(TensorCholesky, self).__init__(_lower=lower, _dtype=dtype, **kw)
46
47 @property
48 def lower(self):
49 return self._lower
50
51 def _set_inputs(self, inputs):
52 super(TensorCholesky, self)._set_inputs(inputs)
53 self._input = self._inputs[0]
54
55 def __call__(self, a):
56 return self.new_tensor([a], a.shape, order=TensorOrder.C_ORDER)
57
58 @classmethod
59 def tile(cls, op):
60 from ..datasource.zeros import TensorZeros
61 from ..arithmetic.subtract import TensorSubtract
62 from ..arithmetic.utils import tree_add
63 from .dot import TensorDot
64 from .solve_triangular import TensorSolveTriangular
65
66 tensor = op.outputs[0]
67 in_tensor = op.input
68 if in_tensor.nsplits[0] != in_tensor.nsplits[1]:
69 # all chunks on diagonal should be square
70 nsplits = in_tensor.nsplits[0]
71 in_tensor = in_tensor.rechunk([nsplits, nsplits]).single_tiles()
72
73 lower_chunks, upper_chunks = {}, {}
74 for i in range(in_tensor.chunk_shape[0]):
75 for j in range(in_tensor.chunk_shape[1]):
76 if i < j:
77 lower_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(
78 None, shape=(in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]),
79 index=(i, j), order=tensor.order)
80 upper_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(
81 None, shape=(in_tensor.nsplits[1][j], in_tensor.nsplits[0][i]),
82 index=(j, i), order=tensor.order)
83 lower_chunks[lower_chunk.index] = lower_chunk
84 upper_chunks[upper_chunk.index] = upper_chunk
85 elif i == j:
86 target = in_tensor.cix[i, j]
87 if i > 0:
88 prev_chunks = []
89 for p in range(i):
90 a, b = lower_chunks[i, p], upper_chunks[p, j]
91 prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(
92 [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)
93 prev_chunks.append(prev_chunk)
94 if len(prev_chunks) == 1:
95 s = prev_chunks[0]
96 else:
97 s = tree_add(prev_chunks[0].dtype, prev_chunks,
98 None, prev_chunks[0].shape)
99 target = TensorSubtract(dtype=tensor.dtype, lhs=target, rhs=s).new_chunk(
100 [target, s], shape=target.shape, order=tensor.order)
101 lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(
102 [target], shape=target.shape, index=(i, j), order=tensor.order)
103 upper_chunk = _H(lower_chunk)
104 lower_chunks[lower_chunk.index] = lower_chunk
105 upper_chunks[upper_chunk.index] = upper_chunk
106 else:
107 target = in_tensor.cix[j, i]
108 if j > 0:
109 prev_chunks = []
110 for p in range(j):
111 a, b = lower_chunks[j, p], upper_chunks[p, i]
112 prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(
113 [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)
114 prev_chunks.append(prev_chunk)
115 if len(prev_chunks) == 1:
116 s = prev_chunks[0]
117 else:
118 s = tree_add(prev_chunks[0].dtype, prev_chunks,
119 None, prev_chunks[0].shape)
120 target = TensorSubtract(dtype=tensor.dtype, lhs=target, rhs=s).new_chunk(
121 [target, s], shape=target.shape, order=tensor.order)
122 upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(
123 [lower_chunks[j, j], target], shape=target.shape,
124 index=(j, i), order=tensor.order)
125 lower_chunk = _H(upper_chunk)
126 lower_chunks[lower_chunk.index] = lower_chunk
127 upper_chunks[upper_chunk.index] = upper_chunk
128
129 new_op = op.copy()
130 if op.lower:
131 return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,
132 chunks=list(lower_chunks.values()), nsplits=in_tensor.nsplits)
133 else:
134 return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,
135 chunks=list(upper_chunks.values()), nsplits=in_tensor.nsplits)
136
137 @classmethod
138 def execute(cls, ctx, op):
139 chunk = op.outputs[0]
140 (a,), device_id, xp = as_same_device(
141 [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
142
143 with device(device_id):
144 if xp is np:
145 try:
146 import scipy.linalg
147
148 ctx[chunk.key] = scipy.linalg.cholesky(a, lower=op.lower)
149 return
150 except ImportError: # pragma: no cover
151 pass
152
153 r = xp.linalg.cholesky(a)
154 if not chunk.op.lower:
155 r = r.T.conj()
156
157 ctx[chunk.key] = r
158
159
160 def cholesky(a, lower=False):
161 """
162 Cholesky decomposition.
163
164 Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
165 where `L` is lower-triangular and .H is the conjugate transpose operator
166 (which is the ordinary transpose if `a` is real-valued). `a` must be
167 Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
168 actually returned.
169
170 Parameters
171 ----------
172 a : (..., M, M) array_like
173 Hermitian (symmetric if all elements are real), positive-definite
174 input matrix.
175 lower : bool
176 Whether to compute the upper or lower triangular Cholesky
177 factorization. Default is upper-triangular.
178
179 Returns
180 -------
181 L : (..., M, M) array_like
182 Upper or lower-triangular Cholesky factor of `a`.
183
184 Raises
185 ------
186 LinAlgError
187 If the decomposition fails, for example, if `a` is not
188 positive-definite.
189
190 Notes
191 -----
192
193 Broadcasting rules apply, see the `mt.linalg` documentation for
194 details.
195
196 The Cholesky decomposition is often used as a fast way of solving
197
198 .. math:: A \\mathbf{x} = \\mathbf{b}
199
200 (when `A` is both Hermitian/symmetric and positive-definite).
201
202 First, we solve for :math:`\\mathbf{y}` in
203
204 .. math:: L \\mathbf{y} = \\mathbf{b},
205
206 and then for :math:`\\mathbf{x}` in
207
208 .. math:: L.H \\mathbf{x} = \\mathbf{y}.
209
210 Examples
211 --------
212 >>> import mars.tensor as mt
213
214 >>> A = mt.array([[1,-2j],[2j,5]])
215 >>> A.execute()
216 array([[ 1.+0.j, 0.-2.j],
217 [ 0.+2.j, 5.+0.j]])
218 >>> L = mt.linalg.cholesky(A, lower=True)
219 >>> L.execute()
220 array([[ 1.+0.j, 0.+0.j],
221 [ 0.+2.j, 1.+0.j]])
222 >>> mt.dot(L, L.T.conj()).execute() # verify that L * L.H = A
223 array([[ 1.+0.j, 0.-2.j],
224 [ 0.+2.j, 5.+0.j]])
225 >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
226 >>> mt.linalg.cholesky(A, lower=True).execute()
227 array([[ 1.+0.j, 0.+0.j],
228 [ 0.+2.j, 1.+0.j]])
229
230 """
231 a = astensor(a)
232
233 if a.ndim != 2:
234 raise LinAlgError('{0}-dimensional array given. '
235 'Tensor must be two-dimensional'.format(a.ndim))
236 if a.shape[0] != a.shape[1]:
237 raise LinAlgError('Input must be square')
238
239 cho = np.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))
240
241 op = TensorCholesky(lower=lower, dtype=cho.dtype)
242 return op(a)
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mars/tensor/linalg/cholesky.py b/mars/tensor/linalg/cholesky.py
--- a/mars/tensor/linalg/cholesky.py
+++ b/mars/tensor/linalg/cholesky.py
@@ -25,16 +25,6 @@
from ..array_utils import as_same_device, device
-def _H(chunk):
- from ..base.transpose import TensorTranspose
- from ..arithmetic.conj import TensorConj
-
- trans_op = TensorTranspose(dtype=chunk.dtype)
- c = trans_op.new_chunk([chunk], shape=chunk.shape[::-1], index=chunk.index[::-1])
- conj_op = TensorConj(dtype=c.dtype)
- return conj_op.new_chunk([c], shape=c.shape, index=c.index)
-
-
class TensorCholesky(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.CHOLESKY
@@ -60,6 +50,8 @@
from ..datasource.zeros import TensorZeros
from ..arithmetic.subtract import TensorSubtract
from ..arithmetic.utils import tree_add
+ from ..base import TensorTranspose
+ from ..utils import reverse_order
from .dot import TensorDot
from .solve_triangular import TensorSolveTriangular
@@ -100,7 +92,9 @@
[target, s], shape=target.shape, order=tensor.order)
lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(
[target], shape=target.shape, index=(i, j), order=tensor.order)
- upper_chunk = _H(lower_chunk)
+ upper_chunk = TensorTranspose(dtype=lower_chunk.dtype).new_chunk(
+ [lower_chunk], shape=lower_chunk.shape[::-1],
+ index=lower_chunk.index[::-1], order=reverse_order(lower_chunk.order))
lower_chunks[lower_chunk.index] = lower_chunk
upper_chunks[upper_chunk.index] = upper_chunk
else:
@@ -122,7 +116,9 @@
upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(
[lower_chunks[j, j], target], shape=target.shape,
index=(j, i), order=tensor.order)
- lower_chunk = _H(upper_chunk)
+ lower_chunk = TensorTranspose(dtype=upper_chunk.dtype).new_chunk(
+ [upper_chunk], shape=upper_chunk.shape[::-1],
+ index=upper_chunk.index[::-1], order=reverse_order(upper_chunk.order))
lower_chunks[lower_chunk.index] = lower_chunk
upper_chunks[upper_chunk.index] = upper_chunk
|
{"golden_diff": "diff --git a/mars/tensor/linalg/cholesky.py b/mars/tensor/linalg/cholesky.py\n--- a/mars/tensor/linalg/cholesky.py\n+++ b/mars/tensor/linalg/cholesky.py\n@@ -25,16 +25,6 @@\n from ..array_utils import as_same_device, device\n \n \n-def _H(chunk):\n- from ..base.transpose import TensorTranspose\n- from ..arithmetic.conj import TensorConj\n-\n- trans_op = TensorTranspose(dtype=chunk.dtype)\n- c = trans_op.new_chunk([chunk], shape=chunk.shape[::-1], index=chunk.index[::-1])\n- conj_op = TensorConj(dtype=c.dtype)\n- return conj_op.new_chunk([c], shape=c.shape, index=c.index)\n-\n-\n class TensorCholesky(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY\n \n@@ -60,6 +50,8 @@\n from ..datasource.zeros import TensorZeros\n from ..arithmetic.subtract import TensorSubtract\n from ..arithmetic.utils import tree_add\n+ from ..base import TensorTranspose\n+ from ..utils import reverse_order\n from .dot import TensorDot\n from .solve_triangular import TensorSolveTriangular\n \n@@ -100,7 +92,9 @@\n [target, s], shape=target.shape, order=tensor.order)\n lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(\n [target], shape=target.shape, index=(i, j), order=tensor.order)\n- upper_chunk = _H(lower_chunk)\n+ upper_chunk = TensorTranspose(dtype=lower_chunk.dtype).new_chunk(\n+ [lower_chunk], shape=lower_chunk.shape[::-1],\n+ index=lower_chunk.index[::-1], order=reverse_order(lower_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n else:\n@@ -122,7 +116,9 @@\n upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(\n [lower_chunks[j, j], target], shape=target.shape,\n index=(j, i), order=tensor.order)\n- lower_chunk = _H(upper_chunk)\n+ lower_chunk = TensorTranspose(dtype=upper_chunk.dtype).new_chunk(\n+ [upper_chunk], shape=upper_chunk.shape[::-1],\n+ index=upper_chunk.index[::-1], order=reverse_order(upper_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n", "issue": "Remove conjugate in Cholesky decomposition which effects performance heavily\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\nIn the tile of Cholesky decomposition, we do a conjugate after transpose which is unnecessary and effect performance heavily.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\nfrom ...serialize import KeyField, BoolField\nfrom ... import opcodes as OperandDef\nfrom ..operands import TensorHasInput, TensorOperandMixin\nfrom ..datasource import tensor as astensor\nfrom ..core import TensorOrder\nfrom ..array_utils import as_same_device, device\n\n\ndef _H(chunk):\n from ..base.transpose import TensorTranspose\n from ..arithmetic.conj import TensorConj\n\n trans_op = TensorTranspose(dtype=chunk.dtype)\n c = trans_op.new_chunk([chunk], shape=chunk.shape[::-1], index=chunk.index[::-1])\n conj_op = TensorConj(dtype=c.dtype)\n return conj_op.new_chunk([c], shape=c.shape, index=c.index)\n\n\nclass TensorCholesky(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY\n\n _input = KeyField('input')\n _lower = BoolField('lower')\n\n def __init__(self, lower=None, dtype=None, **kw):\n super(TensorCholesky, self).__init__(_lower=lower, _dtype=dtype, **kw)\n\n @property\n def lower(self):\n return self._lower\n\n def _set_inputs(self, inputs):\n super(TensorCholesky, self)._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, a):\n return self.new_tensor([a], a.shape, order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n from ..datasource.zeros import TensorZeros\n from ..arithmetic.subtract import TensorSubtract\n from ..arithmetic.utils import tree_add\n from .dot import TensorDot\n from .solve_triangular import TensorSolveTriangular\n\n tensor = op.outputs[0]\n in_tensor = op.input\n if in_tensor.nsplits[0] != in_tensor.nsplits[1]:\n # all chunks on diagonal should be square\n nsplits = in_tensor.nsplits[0]\n in_tensor = in_tensor.rechunk([nsplits, nsplits]).single_tiles()\n\n lower_chunks, upper_chunks = {}, {}\n for i in range(in_tensor.chunk_shape[0]):\n for j in range(in_tensor.chunk_shape[1]):\n if i < j:\n lower_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]),\n index=(i, j), order=tensor.order)\n upper_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[1][j], in_tensor.nsplits[0][i]),\n index=(j, i), order=tensor.order)\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n elif i == j:\n target = in_tensor.cix[i, j]\n if i > 0:\n prev_chunks = []\n for p in range(i):\n a, b = lower_chunks[i, p], upper_chunks[p, j]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n if len(prev_chunks) == 1:\n s = prev_chunks[0]\n else:\n s = tree_add(prev_chunks[0].dtype, prev_chunks,\n None, prev_chunks[0].shape)\n target = TensorSubtract(dtype=tensor.dtype, lhs=target, rhs=s).new_chunk(\n [target, s], shape=target.shape, order=tensor.order)\n lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(\n [target], shape=target.shape, index=(i, j), order=tensor.order)\n upper_chunk = _H(lower_chunk)\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n else:\n target = in_tensor.cix[j, i]\n if j > 0:\n prev_chunks = []\n for p in range(j):\n a, b = lower_chunks[j, p], upper_chunks[p, i]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n if len(prev_chunks) == 1:\n s = prev_chunks[0]\n else:\n s = tree_add(prev_chunks[0].dtype, prev_chunks,\n None, prev_chunks[0].shape)\n target = TensorSubtract(dtype=tensor.dtype, lhs=target, rhs=s).new_chunk(\n [target, s], shape=target.shape, order=tensor.order)\n upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(\n [lower_chunks[j, j], target], shape=target.shape,\n index=(j, i), order=tensor.order)\n lower_chunk = _H(upper_chunk)\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n\n new_op = op.copy()\n if op.lower:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(lower_chunks.values()), nsplits=in_tensor.nsplits)\n else:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(upper_chunks.values()), nsplits=in_tensor.nsplits)\n\n @classmethod\n def execute(cls, ctx, op):\n chunk = op.outputs[0]\n (a,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n if xp is np:\n try:\n import scipy.linalg\n\n ctx[chunk.key] = scipy.linalg.cholesky(a, lower=op.lower)\n return\n except ImportError: # pragma: no cover\n pass\n\n r = xp.linalg.cholesky(a)\n if not chunk.op.lower:\n r = r.T.conj()\n\n ctx[chunk.key] = r\n\n\ndef cholesky(a, lower=False):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n lower : bool\n Whether to compute the upper or lower triangular Cholesky\n factorization. Default is upper-triangular.\n\n Returns\n -------\n L : (..., M, M) array_like\n Upper or lower-triangular Cholesky factor of `a`.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n\n Broadcasting rules apply, see the `mt.linalg` documentation for\n details.\n\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> A = mt.array([[1,-2j],[2j,5]])\n >>> A.execute()\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = mt.linalg.cholesky(A, lower=True)\n >>> L.execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> mt.dot(L, L.T.conj()).execute() # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> mt.linalg.cholesky(A, lower=True).execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n a = astensor(a)\n\n if a.ndim != 2:\n raise LinAlgError('{0}-dimensional array given. '\n 'Tensor must be two-dimensional'.format(a.ndim))\n if a.shape[0] != a.shape[1]:\n raise LinAlgError('Input must be square')\n\n cho = np.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))\n\n op = TensorCholesky(lower=lower, dtype=cho.dtype)\n return op(a)\n", "path": "mars/tensor/linalg/cholesky.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\nfrom ...serialize import KeyField, BoolField\nfrom ... import opcodes as OperandDef\nfrom ..operands import TensorHasInput, TensorOperandMixin\nfrom ..datasource import tensor as astensor\nfrom ..core import TensorOrder\nfrom ..array_utils import as_same_device, device\n\n\nclass TensorCholesky(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY\n\n _input = KeyField('input')\n _lower = BoolField('lower')\n\n def __init__(self, lower=None, dtype=None, **kw):\n super(TensorCholesky, self).__init__(_lower=lower, _dtype=dtype, **kw)\n\n @property\n def lower(self):\n return self._lower\n\n def _set_inputs(self, inputs):\n super(TensorCholesky, self)._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, a):\n return self.new_tensor([a], a.shape, order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n from ..datasource.zeros import TensorZeros\n from ..arithmetic.subtract import TensorSubtract\n from ..arithmetic.utils import tree_add\n from ..base import TensorTranspose\n from ..utils import reverse_order\n from .dot import TensorDot\n from .solve_triangular import TensorSolveTriangular\n\n tensor = op.outputs[0]\n in_tensor = op.input\n if in_tensor.nsplits[0] != in_tensor.nsplits[1]:\n # all chunks on diagonal should be square\n nsplits = in_tensor.nsplits[0]\n in_tensor = in_tensor.rechunk([nsplits, nsplits]).single_tiles()\n\n lower_chunks, upper_chunks = {}, {}\n for i in range(in_tensor.chunk_shape[0]):\n for j in range(in_tensor.chunk_shape[1]):\n if i < j:\n lower_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]),\n index=(i, j), order=tensor.order)\n upper_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[1][j], in_tensor.nsplits[0][i]),\n index=(j, i), order=tensor.order)\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n elif i == j:\n target = in_tensor.cix[i, j]\n if i > 0:\n prev_chunks = []\n for p in range(i):\n a, b = lower_chunks[i, p], upper_chunks[p, j]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n if len(prev_chunks) == 1:\n s = prev_chunks[0]\n else:\n s = tree_add(prev_chunks[0].dtype, prev_chunks,\n None, prev_chunks[0].shape)\n target = TensorSubtract(dtype=tensor.dtype, lhs=target, rhs=s).new_chunk(\n [target, s], shape=target.shape, order=tensor.order)\n lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(\n [target], shape=target.shape, index=(i, j), order=tensor.order)\n upper_chunk = TensorTranspose(dtype=lower_chunk.dtype).new_chunk(\n [lower_chunk], shape=lower_chunk.shape[::-1],\n index=lower_chunk.index[::-1], order=reverse_order(lower_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n else:\n target = in_tensor.cix[j, i]\n if j > 0:\n prev_chunks = []\n for p in range(j):\n a, b = lower_chunks[j, p], upper_chunks[p, i]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n if len(prev_chunks) == 1:\n s = prev_chunks[0]\n else:\n s = tree_add(prev_chunks[0].dtype, prev_chunks,\n None, prev_chunks[0].shape)\n target = TensorSubtract(dtype=tensor.dtype, lhs=target, rhs=s).new_chunk(\n [target, s], shape=target.shape, order=tensor.order)\n upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(\n [lower_chunks[j, j], target], shape=target.shape,\n index=(j, i), order=tensor.order)\n lower_chunk = TensorTranspose(dtype=upper_chunk.dtype).new_chunk(\n [upper_chunk], shape=upper_chunk.shape[::-1],\n index=upper_chunk.index[::-1], order=reverse_order(upper_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n\n new_op = op.copy()\n if op.lower:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(lower_chunks.values()), nsplits=in_tensor.nsplits)\n else:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(upper_chunks.values()), nsplits=in_tensor.nsplits)\n\n @classmethod\n def execute(cls, ctx, op):\n chunk = op.outputs[0]\n (a,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n if xp is np:\n try:\n import scipy.linalg\n\n ctx[chunk.key] = scipy.linalg.cholesky(a, lower=op.lower)\n return\n except ImportError: # pragma: no cover\n pass\n\n r = xp.linalg.cholesky(a)\n if not chunk.op.lower:\n r = r.T.conj()\n\n ctx[chunk.key] = r\n\n\ndef cholesky(a, lower=False):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n lower : bool\n Whether to compute the upper or lower triangular Cholesky\n factorization. Default is upper-triangular.\n\n Returns\n -------\n L : (..., M, M) array_like\n Upper or lower-triangular Cholesky factor of `a`.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n\n Broadcasting rules apply, see the `mt.linalg` documentation for\n details.\n\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> A = mt.array([[1,-2j],[2j,5]])\n >>> A.execute()\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = mt.linalg.cholesky(A, lower=True)\n >>> L.execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> mt.dot(L, L.T.conj()).execute() # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> mt.linalg.cholesky(A, lower=True).execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n a = astensor(a)\n\n if a.ndim != 2:\n raise LinAlgError('{0}-dimensional array given. '\n 'Tensor must be two-dimensional'.format(a.ndim))\n if a.shape[0] != a.shape[1]:\n raise LinAlgError('Input must be square')\n\n cho = np.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))\n\n op = TensorCholesky(lower=lower, dtype=cho.dtype)\n return op(a)\n", "path": "mars/tensor/linalg/cholesky.py"}]}
| 3,242 | 584 |
gh_patches_debug_63730
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-2561
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Permissions for Creating Folders?
Hello,
I'm working on HydroShare export functionality for Model My Watershed. I am now able to successfully create resources and add files to them (for example: [66ce7b01a1e7490383e37a407979631c](https://beta.hydroshare.org/resource/66ce7b01a1e7490383e37a407979631c/)). So far I am using `hs_restclient`'s `createResource` and `addResourceFile` methods, which work very well. However, if I try to create a folder with `createResourceFolder`, I get a HydroShareNotAuthorized exception:
```
HydroShareNotAuthorized at /export/hydroshare/
Not authorized to perform PUT on https://beta.hydroshare.org/hsapi/resource/3c741c66702649f689aedcd580e23f9b/folders/area-of-interest.
```
I am able to create folders using the HydroShare UI, so it seems I just don't have the permission in the REST API, which seems odd.
Here's what my code looks like (messy, early draft):
```python
resource = hs.createResource(
'CompositeResource',
params.get('title', project.name),
abstract=params.get('abstract', ''),
keywords=('mmw', 'model-my-watershed')
)
aoi_folder = 'area-of-interest'
hs.createResourceFolder(resource, pathname=aoi_folder)
# AoI GeoJSON
aoi_geojson = GEOSGeometry(project.area_of_interest).geojson
aoi_file = StringIO.StringIO()
aoi_file.write(aoi_geojson)
hs.addResourceFile(resource, aoi_file,
'{}/{}'.format(aoi_folder, 'shape.geojson'))
# AoI Shapefile
aoi_json = json.loads(aoi_geojson)
crs = {'no_defs': True, 'proj': 'longlat',
'ellps': 'WGS84', 'datum': 'WGS84'}
schema = {'geometry': aoi_json['type'], 'properties': {}}
with fiona.open('/tmp/{}.shp'.format(resource), 'w',
driver='ESRI Shapefile',
crs=crs, schema=schema) as shapefile:
shapefile.write({'geometry': aoi_json, 'properties': {}})
for ext in SHAPEFILE_EXTENSIONS:
with open('/tmp/{}.{}'.format(resource, ext)) as shapefile:
hs.addResourceFile(resource, shapefile,
'{}/shape.{}'.format(aoi_folder, ext))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_composite_resource/models.py`
Content:
```
1 import os
2
3 from mezzanine.pages.page_processors import processor_for
4
5 from hs_core.models import BaseResource, ResourceManager, resource_processor
6
7 from hs_file_types.models import GenericLogicalFile
8
9
10 class CompositeResource(BaseResource):
11 objects = ResourceManager("CompositeResource")
12
13 class Meta:
14 verbose_name = 'Composite Resource'
15 proxy = True
16
17 @property
18 def can_be_public_or_discoverable(self):
19 # resource level metadata check
20 if not super(CompositeResource, self).can_be_public_or_discoverable:
21 return False
22
23 # filetype level metadata check
24 for lf in self.logical_files:
25 if not lf.metadata.has_all_required_elements():
26 return False
27
28 return True
29
30 def set_default_logical_file(self):
31 """sets an instance of GenericLogicalFile to any resource file objects of this instance
32 of the resource that is not already associated with a logical file. """
33
34 for res_file in self.files.all():
35 if not res_file.has_logical_file:
36 logical_file = GenericLogicalFile.create()
37 res_file.logical_file_content_object = logical_file
38 res_file.save()
39
40 @property
41 def supports_logical_file(self):
42 """ if this resource allows associating resource file objects with logical file"""
43 return True
44
45 def get_metadata_xml(self, pretty_print=True, include_format_elements=True):
46 from lxml import etree
47
48 # get resource level core metadata as xml string
49 # for composite resource we don't want the format elements at the resource level
50 # as they are included at the file level xml node
51 xml_string = super(CompositeResource, self).get_metadata_xml(pretty_print=False,
52 include_format_elements=False)
53 # add file type metadata xml
54
55 # create an etree xml object
56 RDF_ROOT = etree.fromstring(xml_string)
57
58 # get root 'Description' element that contains all other elements
59 container = RDF_ROOT.find('rdf:Description', namespaces=self.metadata.NAMESPACES)
60
61 for lf in self.logical_files:
62 lf.metadata.add_to_xml_container(container)
63
64 return etree.tostring(RDF_ROOT, pretty_print=pretty_print)
65
66 def supports_folder_creation(self, folder_full_path):
67 """this checks if it is allowed to create a folder at the specified path"""
68
69 if __debug__:
70 assert(folder_full_path.startswith(self.file_path))
71
72 # determine containing folder
73 if "/" in folder_full_path:
74 path_to_check, _ = os.path.split(folder_full_path)
75 else:
76 path_to_check = folder_full_path
77
78 if path_to_check != self.file_path:
79 res_file_objs = [res_file_obj for res_file_obj in self.files.all() if
80 res_file_obj.dir_path == path_to_check]
81
82 for res_file_obj in res_file_objs:
83 if not res_file_obj.logical_file.supports_resource_file_rename or \
84 not res_file_obj.logical_file.supports_resource_file_move:
85 return False
86
87 return True
88
89 def supports_rename_path(self, src_full_path, tgt_full_path):
90 """checks if file/folder rename/move is allowed"""
91
92 if __debug__:
93 assert(src_full_path.startswith(self.file_path))
94 assert(tgt_full_path.startswith(self.file_path))
95
96 istorage = self.get_irods_storage()
97 tgt_file_dir = os.path.dirname(tgt_full_path)
98 src_file_dir = os.path.dirname(src_full_path)
99
100 def check_directory():
101 path_to_check = ''
102 if istorage.exists(tgt_file_dir):
103 path_to_check = tgt_file_dir
104 else:
105 if tgt_file_dir.startswith(src_file_dir):
106 path_to_check = src_file_dir
107
108 if path_to_check and not path_to_check.endswith("data/contents"):
109 # it is not the base directory - it must be a directory under base dir
110 res_file_objs = [res_file_obj for res_file_obj in self.files.all() if
111 res_file_obj.dir_path == path_to_check]
112
113 for res_file_obj in res_file_objs:
114 if not res_file_obj.logical_file.supports_resource_file_rename or \
115 not res_file_obj.logical_file.supports_resource_file_move:
116 return False
117 return True
118
119 res_file_objs = [res_file_obj for res_file_obj in self.files.all() if
120 res_file_obj.full_path == src_full_path]
121
122 if res_file_objs:
123 res_file_obj = res_file_objs[0]
124 # src_full_path contains file name
125 if not res_file_obj.logical_file.supports_resource_file_rename or \
126 not res_file_obj.logical_file.supports_resource_file_move:
127 return False
128
129 # check if the target directory allows stuff to be moved there
130 return check_directory()
131 else:
132 # src_full_path is a folder path without file name
133 # tgt_full_path also must be a folder path without file name
134 # check that if the target folder contains any files and if any of those files
135 # allow moving stuff there
136 return check_directory()
137
138 def can_add_files(self, target_full_path):
139 """
140 checks if file(s) can be uploaded to the specified *target_full_path*
141 :param target_full_path: full folder path name where file needs to be uploaded to
142 :return: True or False
143 """
144 istorage = self.get_irods_storage()
145 if istorage.exists(target_full_path):
146 path_to_check = target_full_path
147 else:
148 return False
149
150 if not path_to_check.endswith("data/contents"):
151 # it is not the base directory - it must be a directory under base dir
152 res_file_objs = [res_file_obj for res_file_obj in self.files.all() if
153 res_file_obj.dir_path == path_to_check]
154
155 for res_file_obj in res_file_objs:
156 if not res_file_obj.logical_file.supports_resource_file_add:
157 return False
158 return True
159
160 def supports_zip(self, folder_to_zip):
161 """check if the given folder can be zipped or not"""
162
163 # find all the resource files in the folder to be zipped
164 # this is being passed both qualified and unqualified paths!
165 full_path = folder_to_zip
166 if not full_path.startswith(self.file_path):
167 full_path = os.path.join(self.file_path, full_path)
168
169 if self.is_federated:
170 res_file_objects = self.files.filter(
171 object_id=self.id,
172 fed_resource_file__startswith=full_path).all()
173 else:
174 res_file_objects = self.files.filter(object_id=self.id,
175 resource_file__startswith=full_path).all()
176
177 # check any logical file associated with the resource file supports zip functionality
178 for res_file in res_file_objects:
179 if not res_file.logical_file.supports_zip:
180 return False
181
182 return True
183
184 def supports_delete_folder_on_zip(self, original_folder):
185 """check if the specified folder can be deleted at the end of zipping that folder"""
186
187 # find all the resource files in the folder to be deleted
188 # this is being passed both qualified and unqualified paths!
189 full_path = original_folder
190 if not full_path.startswith(self.file_path):
191 full_path = os.path.join(self.file_path, full_path)
192
193 if self.is_federated:
194 res_file_objects = self.files.filter(
195 object_id=self.id,
196 fed_resource_file__startswith=full_path).all()
197 else:
198 res_file_objects = self.files.filter(
199 object_id=self.id,
200 resource_file__startswith=full_path).all()
201
202 # check any logical file associated with the resource file supports deleting the folder
203 # after its zipped
204 for res_file in res_file_objects:
205 if not res_file.logical_file.supports_delete_folder_on_zip:
206 return False
207
208 return True
209
210 def get_missing_file_type_metadata_info(self):
211 # this is used in page pre-processor to build the context
212 # so that the landing page can show what metadata items are missing for each logical file
213 metadata_missing_info = []
214 for lfo in self.logical_files:
215 if not lfo.metadata.has_all_required_elements():
216 file_path = lfo.files.first().short_path
217 missing_elements = lfo.metadata.get_required_missing_elements()
218 metadata_missing_info.append({'file_path': file_path,
219 'missing_elements': missing_elements})
220 return metadata_missing_info
221
222 # this would allow us to pick up additional form elements for the template before the template
223 # is displayed
224 processor_for(CompositeResource)(resource_processor)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hs_composite_resource/models.py b/hs_composite_resource/models.py
--- a/hs_composite_resource/models.py
+++ b/hs_composite_resource/models.py
@@ -37,6 +37,11 @@
res_file.logical_file_content_object = logical_file
res_file.save()
+ @property
+ def supports_folders(self):
+ """ allow folders for CompositeResources """
+ return True
+
@property
def supports_logical_file(self):
""" if this resource allows associating resource file objects with logical file"""
|
{"golden_diff": "diff --git a/hs_composite_resource/models.py b/hs_composite_resource/models.py\n--- a/hs_composite_resource/models.py\n+++ b/hs_composite_resource/models.py\n@@ -37,6 +37,11 @@\n res_file.logical_file_content_object = logical_file\n res_file.save()\n \n+ @property\n+ def supports_folders(self):\n+ \"\"\" allow folders for CompositeResources \"\"\"\n+ return True\n+\n @property\n def supports_logical_file(self):\n \"\"\" if this resource allows associating resource file objects with logical file\"\"\"\n", "issue": "Permissions for Creating Folders?\nHello,\r\n\r\nI'm working on HydroShare export functionality for Model My Watershed. I am now able to successfully create resources and add files to them (for example: [66ce7b01a1e7490383e37a407979631c](https://beta.hydroshare.org/resource/66ce7b01a1e7490383e37a407979631c/)). So far I am using `hs_restclient`'s `createResource` and `addResourceFile` methods, which work very well. However, if I try to create a folder with `createResourceFolder`, I get a HydroShareNotAuthorized exception:\r\n\r\n```\r\nHydroShareNotAuthorized at /export/hydroshare/\r\nNot authorized to perform PUT on https://beta.hydroshare.org/hsapi/resource/3c741c66702649f689aedcd580e23f9b/folders/area-of-interest.\r\n```\r\n\r\nI am able to create folders using the HydroShare UI, so it seems I just don't have the permission in the REST API, which seems odd.\r\n\r\nHere's what my code looks like (messy, early draft):\r\n\r\n```python\r\nresource = hs.createResource(\r\n 'CompositeResource',\r\n params.get('title', project.name),\r\n abstract=params.get('abstract', ''),\r\n keywords=('mmw', 'model-my-watershed')\r\n)\r\n\r\naoi_folder = 'area-of-interest'\r\nhs.createResourceFolder(resource, pathname=aoi_folder)\r\n\r\n# AoI GeoJSON\r\naoi_geojson = GEOSGeometry(project.area_of_interest).geojson\r\naoi_file = StringIO.StringIO()\r\naoi_file.write(aoi_geojson)\r\nhs.addResourceFile(resource, aoi_file,\r\n '{}/{}'.format(aoi_folder, 'shape.geojson'))\r\n\r\n# AoI Shapefile\r\naoi_json = json.loads(aoi_geojson)\r\ncrs = {'no_defs': True, 'proj': 'longlat',\r\n 'ellps': 'WGS84', 'datum': 'WGS84'}\r\nschema = {'geometry': aoi_json['type'], 'properties': {}}\r\nwith fiona.open('/tmp/{}.shp'.format(resource), 'w',\r\n driver='ESRI Shapefile',\r\n crs=crs, schema=schema) as shapefile:\r\n shapefile.write({'geometry': aoi_json, 'properties': {}})\r\n\r\nfor ext in SHAPEFILE_EXTENSIONS:\r\n with open('/tmp/{}.{}'.format(resource, ext)) as shapefile:\r\n hs.addResourceFile(resource, shapefile,\r\n '{}/shape.{}'.format(aoi_folder, ext))\r\n```\n", "before_files": [{"content": "import os\n\nfrom mezzanine.pages.page_processors import processor_for\n\nfrom hs_core.models import BaseResource, ResourceManager, resource_processor\n\nfrom hs_file_types.models import GenericLogicalFile\n\n\nclass CompositeResource(BaseResource):\n objects = ResourceManager(\"CompositeResource\")\n\n class Meta:\n verbose_name = 'Composite Resource'\n proxy = True\n\n @property\n def can_be_public_or_discoverable(self):\n # resource level metadata check\n if not super(CompositeResource, self).can_be_public_or_discoverable:\n return False\n\n # filetype level metadata check\n for lf in self.logical_files:\n if not lf.metadata.has_all_required_elements():\n return False\n\n return True\n\n def set_default_logical_file(self):\n \"\"\"sets an instance of GenericLogicalFile to any resource file objects of this instance\n of the resource that is not already associated with a logical file. \"\"\"\n\n for res_file in self.files.all():\n if not res_file.has_logical_file:\n logical_file = GenericLogicalFile.create()\n res_file.logical_file_content_object = logical_file\n res_file.save()\n\n @property\n def supports_logical_file(self):\n \"\"\" if this resource allows associating resource file objects with logical file\"\"\"\n return True\n\n def get_metadata_xml(self, pretty_print=True, include_format_elements=True):\n from lxml import etree\n\n # get resource level core metadata as xml string\n # for composite resource we don't want the format elements at the resource level\n # as they are included at the file level xml node\n xml_string = super(CompositeResource, self).get_metadata_xml(pretty_print=False,\n include_format_elements=False)\n # add file type metadata xml\n\n # create an etree xml object\n RDF_ROOT = etree.fromstring(xml_string)\n\n # get root 'Description' element that contains all other elements\n container = RDF_ROOT.find('rdf:Description', namespaces=self.metadata.NAMESPACES)\n\n for lf in self.logical_files:\n lf.metadata.add_to_xml_container(container)\n\n return etree.tostring(RDF_ROOT, pretty_print=pretty_print)\n\n def supports_folder_creation(self, folder_full_path):\n \"\"\"this checks if it is allowed to create a folder at the specified path\"\"\"\n\n if __debug__:\n assert(folder_full_path.startswith(self.file_path))\n\n # determine containing folder\n if \"/\" in folder_full_path:\n path_to_check, _ = os.path.split(folder_full_path)\n else:\n path_to_check = folder_full_path\n\n if path_to_check != self.file_path:\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.dir_path == path_to_check]\n\n for res_file_obj in res_file_objs:\n if not res_file_obj.logical_file.supports_resource_file_rename or \\\n not res_file_obj.logical_file.supports_resource_file_move:\n return False\n\n return True\n\n def supports_rename_path(self, src_full_path, tgt_full_path):\n \"\"\"checks if file/folder rename/move is allowed\"\"\"\n\n if __debug__:\n assert(src_full_path.startswith(self.file_path))\n assert(tgt_full_path.startswith(self.file_path))\n\n istorage = self.get_irods_storage()\n tgt_file_dir = os.path.dirname(tgt_full_path)\n src_file_dir = os.path.dirname(src_full_path)\n\n def check_directory():\n path_to_check = ''\n if istorage.exists(tgt_file_dir):\n path_to_check = tgt_file_dir\n else:\n if tgt_file_dir.startswith(src_file_dir):\n path_to_check = src_file_dir\n\n if path_to_check and not path_to_check.endswith(\"data/contents\"):\n # it is not the base directory - it must be a directory under base dir\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.dir_path == path_to_check]\n\n for res_file_obj in res_file_objs:\n if not res_file_obj.logical_file.supports_resource_file_rename or \\\n not res_file_obj.logical_file.supports_resource_file_move:\n return False\n return True\n\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.full_path == src_full_path]\n\n if res_file_objs:\n res_file_obj = res_file_objs[0]\n # src_full_path contains file name\n if not res_file_obj.logical_file.supports_resource_file_rename or \\\n not res_file_obj.logical_file.supports_resource_file_move:\n return False\n\n # check if the target directory allows stuff to be moved there\n return check_directory()\n else:\n # src_full_path is a folder path without file name\n # tgt_full_path also must be a folder path without file name\n # check that if the target folder contains any files and if any of those files\n # allow moving stuff there\n return check_directory()\n\n def can_add_files(self, target_full_path):\n \"\"\"\n checks if file(s) can be uploaded to the specified *target_full_path*\n :param target_full_path: full folder path name where file needs to be uploaded to\n :return: True or False\n \"\"\"\n istorage = self.get_irods_storage()\n if istorage.exists(target_full_path):\n path_to_check = target_full_path\n else:\n return False\n\n if not path_to_check.endswith(\"data/contents\"):\n # it is not the base directory - it must be a directory under base dir\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.dir_path == path_to_check]\n\n for res_file_obj in res_file_objs:\n if not res_file_obj.logical_file.supports_resource_file_add:\n return False\n return True\n\n def supports_zip(self, folder_to_zip):\n \"\"\"check if the given folder can be zipped or not\"\"\"\n\n # find all the resource files in the folder to be zipped\n # this is being passed both qualified and unqualified paths!\n full_path = folder_to_zip\n if not full_path.startswith(self.file_path):\n full_path = os.path.join(self.file_path, full_path)\n\n if self.is_federated:\n res_file_objects = self.files.filter(\n object_id=self.id,\n fed_resource_file__startswith=full_path).all()\n else:\n res_file_objects = self.files.filter(object_id=self.id,\n resource_file__startswith=full_path).all()\n\n # check any logical file associated with the resource file supports zip functionality\n for res_file in res_file_objects:\n if not res_file.logical_file.supports_zip:\n return False\n\n return True\n\n def supports_delete_folder_on_zip(self, original_folder):\n \"\"\"check if the specified folder can be deleted at the end of zipping that folder\"\"\"\n\n # find all the resource files in the folder to be deleted\n # this is being passed both qualified and unqualified paths!\n full_path = original_folder\n if not full_path.startswith(self.file_path):\n full_path = os.path.join(self.file_path, full_path)\n\n if self.is_federated:\n res_file_objects = self.files.filter(\n object_id=self.id,\n fed_resource_file__startswith=full_path).all()\n else:\n res_file_objects = self.files.filter(\n object_id=self.id,\n resource_file__startswith=full_path).all()\n\n # check any logical file associated with the resource file supports deleting the folder\n # after its zipped\n for res_file in res_file_objects:\n if not res_file.logical_file.supports_delete_folder_on_zip:\n return False\n\n return True\n\n def get_missing_file_type_metadata_info(self):\n # this is used in page pre-processor to build the context\n # so that the landing page can show what metadata items are missing for each logical file\n metadata_missing_info = []\n for lfo in self.logical_files:\n if not lfo.metadata.has_all_required_elements():\n file_path = lfo.files.first().short_path\n missing_elements = lfo.metadata.get_required_missing_elements()\n metadata_missing_info.append({'file_path': file_path,\n 'missing_elements': missing_elements})\n return metadata_missing_info\n\n# this would allow us to pick up additional form elements for the template before the template\n# is displayed\nprocessor_for(CompositeResource)(resource_processor)\n", "path": "hs_composite_resource/models.py"}], "after_files": [{"content": "import os\n\nfrom mezzanine.pages.page_processors import processor_for\n\nfrom hs_core.models import BaseResource, ResourceManager, resource_processor\n\nfrom hs_file_types.models import GenericLogicalFile\n\n\nclass CompositeResource(BaseResource):\n objects = ResourceManager(\"CompositeResource\")\n\n class Meta:\n verbose_name = 'Composite Resource'\n proxy = True\n\n @property\n def can_be_public_or_discoverable(self):\n # resource level metadata check\n if not super(CompositeResource, self).can_be_public_or_discoverable:\n return False\n\n # filetype level metadata check\n for lf in self.logical_files:\n if not lf.metadata.has_all_required_elements():\n return False\n\n return True\n\n def set_default_logical_file(self):\n \"\"\"sets an instance of GenericLogicalFile to any resource file objects of this instance\n of the resource that is not already associated with a logical file. \"\"\"\n\n for res_file in self.files.all():\n if not res_file.has_logical_file:\n logical_file = GenericLogicalFile.create()\n res_file.logical_file_content_object = logical_file\n res_file.save()\n\n @property\n def supports_folders(self):\n \"\"\" allow folders for CompositeResources \"\"\"\n return True\n\n @property\n def supports_logical_file(self):\n \"\"\" if this resource allows associating resource file objects with logical file\"\"\"\n return True\n\n def get_metadata_xml(self, pretty_print=True, include_format_elements=True):\n from lxml import etree\n\n # get resource level core metadata as xml string\n # for composite resource we don't want the format elements at the resource level\n # as they are included at the file level xml node\n xml_string = super(CompositeResource, self).get_metadata_xml(pretty_print=False,\n include_format_elements=False)\n # add file type metadata xml\n\n # create an etree xml object\n RDF_ROOT = etree.fromstring(xml_string)\n\n # get root 'Description' element that contains all other elements\n container = RDF_ROOT.find('rdf:Description', namespaces=self.metadata.NAMESPACES)\n\n for lf in self.logical_files:\n lf.metadata.add_to_xml_container(container)\n\n return etree.tostring(RDF_ROOT, pretty_print=pretty_print)\n\n def supports_folder_creation(self, folder_full_path):\n \"\"\"this checks if it is allowed to create a folder at the specified path\"\"\"\n\n if __debug__:\n assert(folder_full_path.startswith(self.file_path))\n\n # determine containing folder\n if \"/\" in folder_full_path:\n path_to_check, _ = os.path.split(folder_full_path)\n else:\n path_to_check = folder_full_path\n\n if path_to_check != self.file_path:\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.dir_path == path_to_check]\n\n for res_file_obj in res_file_objs:\n if not res_file_obj.logical_file.supports_resource_file_rename or \\\n not res_file_obj.logical_file.supports_resource_file_move:\n return False\n\n return True\n\n def supports_rename_path(self, src_full_path, tgt_full_path):\n \"\"\"checks if file/folder rename/move is allowed\"\"\"\n\n if __debug__:\n assert(src_full_path.startswith(self.file_path))\n assert(tgt_full_path.startswith(self.file_path))\n\n istorage = self.get_irods_storage()\n tgt_file_dir = os.path.dirname(tgt_full_path)\n src_file_dir = os.path.dirname(src_full_path)\n\n def check_directory():\n path_to_check = ''\n if istorage.exists(tgt_file_dir):\n path_to_check = tgt_file_dir\n else:\n if tgt_file_dir.startswith(src_file_dir):\n path_to_check = src_file_dir\n\n if path_to_check and not path_to_check.endswith(\"data/contents\"):\n # it is not the base directory - it must be a directory under base dir\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.dir_path == path_to_check]\n\n for res_file_obj in res_file_objs:\n if not res_file_obj.logical_file.supports_resource_file_rename or \\\n not res_file_obj.logical_file.supports_resource_file_move:\n return False\n return True\n\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.full_path == src_full_path]\n\n if res_file_objs:\n res_file_obj = res_file_objs[0]\n # src_full_path contains file name\n if not res_file_obj.logical_file.supports_resource_file_rename or \\\n not res_file_obj.logical_file.supports_resource_file_move:\n return False\n\n # check if the target directory allows stuff to be moved there\n return check_directory()\n else:\n # src_full_path is a folder path without file name\n # tgt_full_path also must be a folder path without file name\n # check that if the target folder contains any files and if any of those files\n # allow moving stuff there\n return check_directory()\n\n def can_add_files(self, target_full_path):\n \"\"\"\n checks if file(s) can be uploaded to the specified *target_full_path*\n :param target_full_path: full folder path name where file needs to be uploaded to\n :return: True or False\n \"\"\"\n istorage = self.get_irods_storage()\n if istorage.exists(target_full_path):\n path_to_check = target_full_path\n else:\n return False\n\n if not path_to_check.endswith(\"data/contents\"):\n # it is not the base directory - it must be a directory under base dir\n res_file_objs = [res_file_obj for res_file_obj in self.files.all() if\n res_file_obj.dir_path == path_to_check]\n\n for res_file_obj in res_file_objs:\n if not res_file_obj.logical_file.supports_resource_file_add:\n return False\n return True\n\n def supports_zip(self, folder_to_zip):\n \"\"\"check if the given folder can be zipped or not\"\"\"\n\n # find all the resource files in the folder to be zipped\n # this is being passed both qualified and unqualified paths!\n full_path = folder_to_zip\n if not full_path.startswith(self.file_path):\n full_path = os.path.join(self.file_path, full_path)\n\n if self.is_federated:\n res_file_objects = self.files.filter(\n object_id=self.id,\n fed_resource_file__startswith=full_path).all()\n else:\n res_file_objects = self.files.filter(object_id=self.id,\n resource_file__startswith=full_path).all()\n\n # check any logical file associated with the resource file supports zip functionality\n for res_file in res_file_objects:\n if not res_file.logical_file.supports_zip:\n return False\n\n return True\n\n def supports_delete_folder_on_zip(self, original_folder):\n \"\"\"check if the specified folder can be deleted at the end of zipping that folder\"\"\"\n\n # find all the resource files in the folder to be deleted\n # this is being passed both qualified and unqualified paths!\n full_path = original_folder\n if not full_path.startswith(self.file_path):\n full_path = os.path.join(self.file_path, full_path)\n\n if self.is_federated:\n res_file_objects = self.files.filter(\n object_id=self.id,\n fed_resource_file__startswith=full_path).all()\n else:\n res_file_objects = self.files.filter(\n object_id=self.id,\n resource_file__startswith=full_path).all()\n\n # check any logical file associated with the resource file supports deleting the folder\n # after its zipped\n for res_file in res_file_objects:\n if not res_file.logical_file.supports_delete_folder_on_zip:\n return False\n\n return True\n\n def get_missing_file_type_metadata_info(self):\n # this is used in page pre-processor to build the context\n # so that the landing page can show what metadata items are missing for each logical file\n metadata_missing_info = []\n for lfo in self.logical_files:\n if not lfo.metadata.has_all_required_elements():\n file_path = lfo.files.first().short_path\n missing_elements = lfo.metadata.get_required_missing_elements()\n metadata_missing_info.append({'file_path': file_path,\n 'missing_elements': missing_elements})\n return metadata_missing_info\n\n# this would allow us to pick up additional form elements for the template before the template\n# is displayed\nprocessor_for(CompositeResource)(resource_processor)\n", "path": "hs_composite_resource/models.py"}]}
| 3,232 | 123 |
gh_patches_debug_34241
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1014
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ColorMap getLookupTable fails when alpha=None
<!-- In the following, please describe your issue in detail! -->
### Short description
The `getLookupTable` method of `pyqtgraph.ColorMap` fails to automatically determine whether alpha values should be included in a lookup table.
### Code to reproduce
```python
import pyqtgraph as pg
colors = [
(0, 0, 0),
(45, 5, 61),
(84, 42, 55)
]
cmap = pg.ColorMap(pos=(0, 1, 2), color=colors)
cmap.getLookupTable(alpha=False) # Works
cmap.getLookupTable(alpha=True) # Works, but returned lookup table does not contain alpha values.
cmap.getLookupTable() # Does not work
```
### Expected behavior
A lookup table is returned.
### Real behavior
The following exception is thrown:
```
Traceback (most recent call last):
File "test.py", line 13, in <module>
cmap.getLookupTable() # Does not work
File "...\venv\lib\site-packages\pyqtgraph\colormap.py", line 223, in getLookupTable
alpha = self.usesAlpha()
File "...\venv\lib\site-packages\pyqtgraph\colormap.py", line 236, in usesAlpha
return np.any(self.color[:,3] != max)
IndexError: index 3 is out of bounds for axis 1 with size 3
```
### Tested environment(s)
* PyQtGraph version: 0.11.0.dev0+gad7453c
* Qt Python binding: 0.11.0.dev0+gad7453c
* Python version: 3.7.3
* NumPy version: 0.11.0.dev0+gad7453c
* Operating system: Windows 10
* Installation method: pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/colormap.py`
Content:
```
1 import numpy as np
2 from .Qt import QtGui, QtCore
3 from .python2_3 import basestring
4
5
6 class ColorMap(object):
7 """
8 A ColorMap defines a relationship between a scalar value and a range of colors.
9 ColorMaps are commonly used for false-coloring monochromatic images, coloring
10 scatter-plot points, and coloring surface plots by height.
11
12 Each color map is defined by a set of colors, each corresponding to a
13 particular scalar value. For example:
14
15 | 0.0 -> black
16 | 0.2 -> red
17 | 0.6 -> yellow
18 | 1.0 -> white
19
20 The colors for intermediate values are determined by interpolating between
21 the two nearest colors in either RGB or HSV color space.
22
23 To provide user-defined color mappings, see :class:`GradientWidget <pyqtgraph.GradientWidget>`.
24 """
25
26
27 ## color interpolation modes
28 RGB = 1
29 HSV_POS = 2
30 HSV_NEG = 3
31
32 ## boundary modes
33 CLIP = 1
34 REPEAT = 2
35 MIRROR = 3
36
37 ## return types
38 BYTE = 1
39 FLOAT = 2
40 QCOLOR = 3
41
42 enumMap = {
43 'rgb': RGB,
44 'hsv+': HSV_POS,
45 'hsv-': HSV_NEG,
46 'clip': CLIP,
47 'repeat': REPEAT,
48 'mirror': MIRROR,
49 'byte': BYTE,
50 'float': FLOAT,
51 'qcolor': QCOLOR,
52 }
53
54 def __init__(self, pos, color, mode=None):
55 """
56 =============== ==============================================================
57 **Arguments:**
58 pos Array of positions where each color is defined
59 color Array of RGBA colors.
60 Integer data types are interpreted as 0-255; float data types
61 are interpreted as 0.0-1.0
62 mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
63 indicating the color space that should be used when
64 interpolating between stops. Note that the last mode value is
65 ignored. By default, the mode is entirely RGB.
66 =============== ==============================================================
67 """
68 self.pos = np.array(pos)
69 order = np.argsort(self.pos)
70 self.pos = self.pos[order]
71 self.color = np.array(color)[order]
72 if mode is None:
73 mode = np.ones(len(pos))
74 self.mode = mode
75 self.stopsCache = {}
76
77 def map(self, data, mode='byte'):
78 """
79 Return an array of colors corresponding to the values in *data*.
80 Data must be either a scalar position or an array (any shape) of positions.
81
82 The *mode* argument determines the type of data returned:
83
84 =========== ===============================================================
85 byte (default) Values are returned as 0-255 unsigned bytes.
86 float Values are returned as 0.0-1.0 floats.
87 qcolor Values are returned as an array of QColor objects.
88 =========== ===============================================================
89 """
90 if isinstance(mode, basestring):
91 mode = self.enumMap[mode.lower()]
92
93 if mode == self.QCOLOR:
94 pos, color = self.getStops(self.BYTE)
95 else:
96 pos, color = self.getStops(mode)
97
98 # don't need this--np.interp takes care of it.
99 #data = np.clip(data, pos.min(), pos.max())
100
101 # Interpolate
102 # TODO: is griddata faster?
103 # interp = scipy.interpolate.griddata(pos, color, data)
104 if np.isscalar(data):
105 interp = np.empty((color.shape[1],), dtype=color.dtype)
106 else:
107 if not isinstance(data, np.ndarray):
108 data = np.array(data)
109 interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)
110 for i in range(color.shape[1]):
111 interp[...,i] = np.interp(data, pos, color[:,i])
112
113 # Convert to QColor if requested
114 if mode == self.QCOLOR:
115 if np.isscalar(data):
116 return QtGui.QColor(*interp)
117 else:
118 return [QtGui.QColor(*x) for x in interp]
119 else:
120 return interp
121
122 def mapToQColor(self, data):
123 """Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
124 return self.map(data, mode=self.QCOLOR)
125
126 def mapToByte(self, data):
127 """Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
128 return self.map(data, mode=self.BYTE)
129
130 def mapToFloat(self, data):
131 """Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
132 return self.map(data, mode=self.FLOAT)
133
134 def getGradient(self, p1=None, p2=None):
135 """Return a QLinearGradient object spanning from QPoints p1 to p2."""
136 if p1 == None:
137 p1 = QtCore.QPointF(0,0)
138 if p2 == None:
139 p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)
140 g = QtGui.QLinearGradient(p1, p2)
141
142 pos, color = self.getStops(mode=self.BYTE)
143 color = [QtGui.QColor(*x) for x in color]
144 g.setStops(list(zip(pos, color)))
145
146 #if self.colorMode == 'rgb':
147 #ticks = self.listTicks()
148 #g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])
149 #elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop
150 #ticks = self.listTicks()
151 #stops = []
152 #stops.append((ticks[0][1], ticks[0][0].color))
153 #for i in range(1,len(ticks)):
154 #x1 = ticks[i-1][1]
155 #x2 = ticks[i][1]
156 #dx = (x2-x1) / 10.
157 #for j in range(1,10):
158 #x = x1 + dx*j
159 #stops.append((x, self.getColor(x)))
160 #stops.append((x2, self.getColor(x2)))
161 #g.setStops(stops)
162 return g
163
164 def getColors(self, mode=None):
165 """Return list of all color stops converted to the specified mode.
166 If mode is None, then no conversion is done."""
167 if isinstance(mode, basestring):
168 mode = self.enumMap[mode.lower()]
169
170 color = self.color
171 if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':
172 color = (color * 255).astype(np.ubyte)
173 elif mode == self.FLOAT and color.dtype.kind != 'f':
174 color = color.astype(float) / 255.
175
176 if mode == self.QCOLOR:
177 color = [QtGui.QColor(*x) for x in color]
178
179 return color
180
181 def getStops(self, mode):
182 ## Get fully-expanded set of RGBA stops in either float or byte mode.
183 if mode not in self.stopsCache:
184 color = self.color
185 if mode == self.BYTE and color.dtype.kind == 'f':
186 color = (color * 255).astype(np.ubyte)
187 elif mode == self.FLOAT and color.dtype.kind != 'f':
188 color = color.astype(float) / 255.
189
190 ## to support HSV mode, we need to do a little more work..
191 #stops = []
192 #for i in range(len(self.pos)):
193 #pos = self.pos[i]
194 #color = color[i]
195
196 #imode = self.mode[i]
197 #if imode == self.RGB:
198 #stops.append((x,color))
199 #else:
200 #ns =
201 self.stopsCache[mode] = (self.pos, color)
202 return self.stopsCache[mode]
203
204 def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode='byte'):
205 """
206 Return an RGB(A) lookup table (ndarray).
207
208 =============== =============================================================================
209 **Arguments:**
210 start The starting value in the lookup table (default=0.0)
211 stop The final value in the lookup table (default=1.0)
212 nPts The number of points in the returned lookup table.
213 alpha True, False, or None - Specifies whether or not alpha values are included
214 in the table. If alpha is None, it will be automatically determined.
215 mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.
216 See :func:`map() <pyqtgraph.ColorMap.map>`.
217 =============== =============================================================================
218 """
219 if isinstance(mode, basestring):
220 mode = self.enumMap[mode.lower()]
221
222 if alpha is None:
223 alpha = self.usesAlpha()
224
225 x = np.linspace(start, stop, nPts)
226 table = self.map(x, mode)
227
228 if not alpha:
229 return table[:,:3]
230 else:
231 return table
232
233 def usesAlpha(self):
234 """Return True if any stops have an alpha < 255"""
235 max = 1.0 if self.color.dtype.kind == 'f' else 255
236 return np.any(self.color[:,3] != max)
237
238 def isMapTrivial(self):
239 """
240 Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0.
241 """
242 if len(self.pos) != 2:
243 return False
244 if self.pos[0] != 0.0 or self.pos[1] != 1.0:
245 return False
246 if self.color.dtype.kind == 'f':
247 return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))
248 else:
249 return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))
250
251 def __repr__(self):
252 pos = repr(self.pos).replace('\n', '')
253 color = repr(self.color).replace('\n', '')
254 return "ColorMap(%s, %s)" % (pos, color)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/colormap.py b/pyqtgraph/colormap.py
--- a/pyqtgraph/colormap.py
+++ b/pyqtgraph/colormap.py
@@ -1,6 +1,7 @@
import numpy as np
from .Qt import QtGui, QtCore
from .python2_3 import basestring
+from .functions import mkColor
class ColorMap(object):
@@ -56,9 +57,9 @@
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
- color Array of RGBA colors.
- Integer data types are interpreted as 0-255; float data types
- are interpreted as 0.0-1.0
+ color Array of colors.
+ Values are interpreted via
+ :func:`mkColor() <pyqtgraph.mkColor>`.
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
@@ -68,7 +69,11 @@
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
- self.color = np.array(color)[order]
+ self.color = np.apply_along_axis(
+ func1d = lambda x: mkColor(x).getRgb(),
+ axis = -1,
+ arr = color,
+ )[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
@@ -225,7 +230,7 @@
x = np.linspace(start, stop, nPts)
table = self.map(x, mode)
- if not alpha:
+ if not alpha and mode != self.QCOLOR:
return table[:,:3]
else:
return table
|
{"golden_diff": "diff --git a/pyqtgraph/colormap.py b/pyqtgraph/colormap.py\n--- a/pyqtgraph/colormap.py\n+++ b/pyqtgraph/colormap.py\n@@ -1,6 +1,7 @@\n import numpy as np\n from .Qt import QtGui, QtCore\n from .python2_3 import basestring\n+from .functions import mkColor\n \n \n class ColorMap(object):\n@@ -56,9 +57,9 @@\n =============== ==============================================================\n **Arguments:**\n pos Array of positions where each color is defined\n- color Array of RGBA colors.\n- Integer data types are interpreted as 0-255; float data types\n- are interpreted as 0.0-1.0\n+ color Array of colors.\n+ Values are interpreted via \n+ :func:`mkColor() <pyqtgraph.mkColor>`.\n mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)\n indicating the color space that should be used when\n interpolating between stops. Note that the last mode value is\n@@ -68,7 +69,11 @@\n self.pos = np.array(pos)\n order = np.argsort(self.pos)\n self.pos = self.pos[order]\n- self.color = np.array(color)[order]\n+ self.color = np.apply_along_axis(\n+ func1d = lambda x: mkColor(x).getRgb(),\n+ axis = -1,\n+ arr = color,\n+ )[order]\n if mode is None:\n mode = np.ones(len(pos))\n self.mode = mode\n@@ -225,7 +230,7 @@\n x = np.linspace(start, stop, nPts)\n table = self.map(x, mode)\n \n- if not alpha:\n+ if not alpha and mode != self.QCOLOR:\n return table[:,:3]\n else:\n return table\n", "issue": "ColorMap getLookupTable fails when alpha=None\n<!-- In the following, please describe your issue in detail! -->\r\n### Short description\r\nThe `getLookupTable` method of `pyqtgraph.ColorMap` fails to automatically determine whether alpha values should be included in a lookup table.\r\n\r\n### Code to reproduce\r\n\r\n```python\r\nimport pyqtgraph as pg\r\n\r\ncolors = [\r\n (0, 0, 0),\r\n (45, 5, 61),\r\n (84, 42, 55)\r\n]\r\ncmap = pg.ColorMap(pos=(0, 1, 2), color=colors)\r\n\r\ncmap.getLookupTable(alpha=False) # Works\r\ncmap.getLookupTable(alpha=True) # Works, but returned lookup table does not contain alpha values.\r\ncmap.getLookupTable() # Does not work\r\n```\r\n\r\n### Expected behavior\r\nA lookup table is returned.\r\n\r\n### Real behavior\r\nThe following exception is thrown:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 13, in <module>\r\n cmap.getLookupTable() # Does not work\r\n File \"...\\venv\\lib\\site-packages\\pyqtgraph\\colormap.py\", line 223, in getLookupTable\r\n alpha = self.usesAlpha()\r\n File \"...\\venv\\lib\\site-packages\\pyqtgraph\\colormap.py\", line 236, in usesAlpha\r\n return np.any(self.color[:,3] != max)\r\nIndexError: index 3 is out of bounds for axis 1 with size 3\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11.0.dev0+gad7453c\r\n * Qt Python binding: 0.11.0.dev0+gad7453c\r\n * Python version: 3.7.3\r\n * NumPy version: 0.11.0.dev0+gad7453c\r\n * Operating system: Windows 10\r\n * Installation method: pip\r\n\n", "before_files": [{"content": "import numpy as np\nfrom .Qt import QtGui, QtCore\nfrom .python2_3 import basestring\n\n\nclass ColorMap(object):\n \"\"\"\n A ColorMap defines a relationship between a scalar value and a range of colors. \n ColorMaps are commonly used for false-coloring monochromatic images, coloring \n scatter-plot points, and coloring surface plots by height. \n \n Each color map is defined by a set of colors, each corresponding to a\n particular scalar value. For example:\n \n | 0.0 -> black\n | 0.2 -> red\n | 0.6 -> yellow\n | 1.0 -> white\n \n The colors for intermediate values are determined by interpolating between \n the two nearest colors in either RGB or HSV color space.\n \n To provide user-defined color mappings, see :class:`GradientWidget <pyqtgraph.GradientWidget>`.\n \"\"\"\n \n \n ## color interpolation modes\n RGB = 1\n HSV_POS = 2\n HSV_NEG = 3\n \n ## boundary modes\n CLIP = 1\n REPEAT = 2\n MIRROR = 3\n \n ## return types\n BYTE = 1\n FLOAT = 2\n QCOLOR = 3\n \n enumMap = {\n 'rgb': RGB,\n 'hsv+': HSV_POS,\n 'hsv-': HSV_NEG,\n 'clip': CLIP,\n 'repeat': REPEAT,\n 'mirror': MIRROR,\n 'byte': BYTE,\n 'float': FLOAT,\n 'qcolor': QCOLOR,\n }\n \n def __init__(self, pos, color, mode=None):\n \"\"\"\n =============== ==============================================================\n **Arguments:**\n pos Array of positions where each color is defined\n color Array of RGBA colors.\n Integer data types are interpreted as 0-255; float data types\n are interpreted as 0.0-1.0\n mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)\n indicating the color space that should be used when\n interpolating between stops. Note that the last mode value is\n ignored. By default, the mode is entirely RGB.\n =============== ==============================================================\n \"\"\"\n self.pos = np.array(pos)\n order = np.argsort(self.pos)\n self.pos = self.pos[order]\n self.color = np.array(color)[order]\n if mode is None:\n mode = np.ones(len(pos))\n self.mode = mode\n self.stopsCache = {}\n \n def map(self, data, mode='byte'):\n \"\"\"\n Return an array of colors corresponding to the values in *data*. \n Data must be either a scalar position or an array (any shape) of positions.\n \n The *mode* argument determines the type of data returned:\n \n =========== ===============================================================\n byte (default) Values are returned as 0-255 unsigned bytes.\n float Values are returned as 0.0-1.0 floats. \n qcolor Values are returned as an array of QColor objects.\n =========== ===============================================================\n \"\"\"\n if isinstance(mode, basestring):\n mode = self.enumMap[mode.lower()]\n \n if mode == self.QCOLOR:\n pos, color = self.getStops(self.BYTE)\n else:\n pos, color = self.getStops(mode)\n \n # don't need this--np.interp takes care of it.\n #data = np.clip(data, pos.min(), pos.max())\n \n # Interpolate\n # TODO: is griddata faster?\n # interp = scipy.interpolate.griddata(pos, color, data)\n if np.isscalar(data):\n interp = np.empty((color.shape[1],), dtype=color.dtype)\n else:\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)\n for i in range(color.shape[1]):\n interp[...,i] = np.interp(data, pos, color[:,i])\n\n # Convert to QColor if requested\n if mode == self.QCOLOR:\n if np.isscalar(data):\n return QtGui.QColor(*interp)\n else:\n return [QtGui.QColor(*x) for x in interp]\n else:\n return interp\n \n def mapToQColor(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.QCOLOR)\n\n def mapToByte(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.BYTE)\n\n def mapToFloat(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.FLOAT)\n \n def getGradient(self, p1=None, p2=None):\n \"\"\"Return a QLinearGradient object spanning from QPoints p1 to p2.\"\"\"\n if p1 == None:\n p1 = QtCore.QPointF(0,0)\n if p2 == None:\n p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)\n g = QtGui.QLinearGradient(p1, p2)\n \n pos, color = self.getStops(mode=self.BYTE)\n color = [QtGui.QColor(*x) for x in color]\n g.setStops(list(zip(pos, color)))\n \n #if self.colorMode == 'rgb':\n #ticks = self.listTicks()\n #g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])\n #elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop\n #ticks = self.listTicks()\n #stops = []\n #stops.append((ticks[0][1], ticks[0][0].color))\n #for i in range(1,len(ticks)):\n #x1 = ticks[i-1][1]\n #x2 = ticks[i][1]\n #dx = (x2-x1) / 10.\n #for j in range(1,10):\n #x = x1 + dx*j\n #stops.append((x, self.getColor(x)))\n #stops.append((x2, self.getColor(x2)))\n #g.setStops(stops)\n return g\n \n def getColors(self, mode=None):\n \"\"\"Return list of all color stops converted to the specified mode.\n If mode is None, then no conversion is done.\"\"\"\n if isinstance(mode, basestring):\n mode = self.enumMap[mode.lower()]\n \n color = self.color\n if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':\n color = (color * 255).astype(np.ubyte)\n elif mode == self.FLOAT and color.dtype.kind != 'f':\n color = color.astype(float) / 255.\n \n if mode == self.QCOLOR:\n color = [QtGui.QColor(*x) for x in color]\n \n return color\n \n def getStops(self, mode):\n ## Get fully-expanded set of RGBA stops in either float or byte mode.\n if mode not in self.stopsCache:\n color = self.color\n if mode == self.BYTE and color.dtype.kind == 'f':\n color = (color * 255).astype(np.ubyte)\n elif mode == self.FLOAT and color.dtype.kind != 'f':\n color = color.astype(float) / 255.\n \n ## to support HSV mode, we need to do a little more work..\n #stops = []\n #for i in range(len(self.pos)):\n #pos = self.pos[i]\n #color = color[i]\n \n #imode = self.mode[i]\n #if imode == self.RGB:\n #stops.append((x,color)) \n #else:\n #ns = \n self.stopsCache[mode] = (self.pos, color)\n return self.stopsCache[mode]\n \n def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode='byte'):\n \"\"\"\n Return an RGB(A) lookup table (ndarray). \n \n =============== =============================================================================\n **Arguments:**\n start The starting value in the lookup table (default=0.0)\n stop The final value in the lookup table (default=1.0)\n nPts The number of points in the returned lookup table.\n alpha True, False, or None - Specifies whether or not alpha values are included\n in the table. If alpha is None, it will be automatically determined.\n mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.\n See :func:`map() <pyqtgraph.ColorMap.map>`.\n =============== =============================================================================\n \"\"\"\n if isinstance(mode, basestring):\n mode = self.enumMap[mode.lower()]\n \n if alpha is None:\n alpha = self.usesAlpha()\n \n x = np.linspace(start, stop, nPts)\n table = self.map(x, mode)\n \n if not alpha:\n return table[:,:3]\n else:\n return table\n \n def usesAlpha(self):\n \"\"\"Return True if any stops have an alpha < 255\"\"\"\n max = 1.0 if self.color.dtype.kind == 'f' else 255\n return np.any(self.color[:,3] != max)\n \n def isMapTrivial(self):\n \"\"\"\n Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0.\n \"\"\"\n if len(self.pos) != 2:\n return False\n if self.pos[0] != 0.0 or self.pos[1] != 1.0:\n return False\n if self.color.dtype.kind == 'f':\n return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))\n else:\n return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))\n\n def __repr__(self):\n pos = repr(self.pos).replace('\\n', '')\n color = repr(self.color).replace('\\n', '')\n return \"ColorMap(%s, %s)\" % (pos, color)\n", "path": "pyqtgraph/colormap.py"}], "after_files": [{"content": "import numpy as np\nfrom .Qt import QtGui, QtCore\nfrom .python2_3 import basestring\nfrom .functions import mkColor\n\n\nclass ColorMap(object):\n \"\"\"\n A ColorMap defines a relationship between a scalar value and a range of colors. \n ColorMaps are commonly used for false-coloring monochromatic images, coloring \n scatter-plot points, and coloring surface plots by height. \n \n Each color map is defined by a set of colors, each corresponding to a\n particular scalar value. For example:\n \n | 0.0 -> black\n | 0.2 -> red\n | 0.6 -> yellow\n | 1.0 -> white\n \n The colors for intermediate values are determined by interpolating between \n the two nearest colors in either RGB or HSV color space.\n \n To provide user-defined color mappings, see :class:`GradientWidget <pyqtgraph.GradientWidget>`.\n \"\"\"\n \n \n ## color interpolation modes\n RGB = 1\n HSV_POS = 2\n HSV_NEG = 3\n \n ## boundary modes\n CLIP = 1\n REPEAT = 2\n MIRROR = 3\n \n ## return types\n BYTE = 1\n FLOAT = 2\n QCOLOR = 3\n \n enumMap = {\n 'rgb': RGB,\n 'hsv+': HSV_POS,\n 'hsv-': HSV_NEG,\n 'clip': CLIP,\n 'repeat': REPEAT,\n 'mirror': MIRROR,\n 'byte': BYTE,\n 'float': FLOAT,\n 'qcolor': QCOLOR,\n }\n \n def __init__(self, pos, color, mode=None):\n \"\"\"\n =============== ==============================================================\n **Arguments:**\n pos Array of positions where each color is defined\n color Array of colors.\n Values are interpreted via \n :func:`mkColor() <pyqtgraph.mkColor>`.\n mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)\n indicating the color space that should be used when\n interpolating between stops. Note that the last mode value is\n ignored. By default, the mode is entirely RGB.\n =============== ==============================================================\n \"\"\"\n self.pos = np.array(pos)\n order = np.argsort(self.pos)\n self.pos = self.pos[order]\n self.color = np.apply_along_axis(\n func1d = lambda x: mkColor(x).getRgb(),\n axis = -1,\n arr = color,\n )[order]\n if mode is None:\n mode = np.ones(len(pos))\n self.mode = mode\n self.stopsCache = {}\n \n def map(self, data, mode='byte'):\n \"\"\"\n Return an array of colors corresponding to the values in *data*. \n Data must be either a scalar position or an array (any shape) of positions.\n \n The *mode* argument determines the type of data returned:\n \n =========== ===============================================================\n byte (default) Values are returned as 0-255 unsigned bytes.\n float Values are returned as 0.0-1.0 floats. \n qcolor Values are returned as an array of QColor objects.\n =========== ===============================================================\n \"\"\"\n if isinstance(mode, basestring):\n mode = self.enumMap[mode.lower()]\n \n if mode == self.QCOLOR:\n pos, color = self.getStops(self.BYTE)\n else:\n pos, color = self.getStops(mode)\n \n # don't need this--np.interp takes care of it.\n #data = np.clip(data, pos.min(), pos.max())\n \n # Interpolate\n # TODO: is griddata faster?\n # interp = scipy.interpolate.griddata(pos, color, data)\n if np.isscalar(data):\n interp = np.empty((color.shape[1],), dtype=color.dtype)\n else:\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)\n for i in range(color.shape[1]):\n interp[...,i] = np.interp(data, pos, color[:,i])\n\n # Convert to QColor if requested\n if mode == self.QCOLOR:\n if np.isscalar(data):\n return QtGui.QColor(*interp)\n else:\n return [QtGui.QColor(*x) for x in interp]\n else:\n return interp\n \n def mapToQColor(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.QCOLOR)\n\n def mapToByte(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.BYTE)\n\n def mapToFloat(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.FLOAT)\n \n def getGradient(self, p1=None, p2=None):\n \"\"\"Return a QLinearGradient object spanning from QPoints p1 to p2.\"\"\"\n if p1 == None:\n p1 = QtCore.QPointF(0,0)\n if p2 == None:\n p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)\n g = QtGui.QLinearGradient(p1, p2)\n \n pos, color = self.getStops(mode=self.BYTE)\n color = [QtGui.QColor(*x) for x in color]\n g.setStops(list(zip(pos, color)))\n \n #if self.colorMode == 'rgb':\n #ticks = self.listTicks()\n #g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])\n #elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop\n #ticks = self.listTicks()\n #stops = []\n #stops.append((ticks[0][1], ticks[0][0].color))\n #for i in range(1,len(ticks)):\n #x1 = ticks[i-1][1]\n #x2 = ticks[i][1]\n #dx = (x2-x1) / 10.\n #for j in range(1,10):\n #x = x1 + dx*j\n #stops.append((x, self.getColor(x)))\n #stops.append((x2, self.getColor(x2)))\n #g.setStops(stops)\n return g\n \n def getColors(self, mode=None):\n \"\"\"Return list of all color stops converted to the specified mode.\n If mode is None, then no conversion is done.\"\"\"\n if isinstance(mode, basestring):\n mode = self.enumMap[mode.lower()]\n \n color = self.color\n if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':\n color = (color * 255).astype(np.ubyte)\n elif mode == self.FLOAT and color.dtype.kind != 'f':\n color = color.astype(float) / 255.\n \n if mode == self.QCOLOR:\n color = [QtGui.QColor(*x) for x in color]\n \n return color\n \n def getStops(self, mode):\n ## Get fully-expanded set of RGBA stops in either float or byte mode.\n if mode not in self.stopsCache:\n color = self.color\n if mode == self.BYTE and color.dtype.kind == 'f':\n color = (color * 255).astype(np.ubyte)\n elif mode == self.FLOAT and color.dtype.kind != 'f':\n color = color.astype(float) / 255.\n \n ## to support HSV mode, we need to do a little more work..\n #stops = []\n #for i in range(len(self.pos)):\n #pos = self.pos[i]\n #color = color[i]\n \n #imode = self.mode[i]\n #if imode == self.RGB:\n #stops.append((x,color)) \n #else:\n #ns = \n self.stopsCache[mode] = (self.pos, color)\n return self.stopsCache[mode]\n \n def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode='byte'):\n \"\"\"\n Return an RGB(A) lookup table (ndarray). \n \n =============== =============================================================================\n **Arguments:**\n start The starting value in the lookup table (default=0.0)\n stop The final value in the lookup table (default=1.0)\n nPts The number of points in the returned lookup table.\n alpha True, False, or None - Specifies whether or not alpha values are included\n in the table. If alpha is None, it will be automatically determined.\n mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.\n See :func:`map() <pyqtgraph.ColorMap.map>`.\n =============== =============================================================================\n \"\"\"\n if isinstance(mode, basestring):\n mode = self.enumMap[mode.lower()]\n \n if alpha is None:\n alpha = self.usesAlpha()\n \n x = np.linspace(start, stop, nPts)\n table = self.map(x, mode)\n \n if not alpha and mode != self.QCOLOR:\n return table[:,:3]\n else:\n return table\n \n def usesAlpha(self):\n \"\"\"Return True if any stops have an alpha < 255\"\"\"\n max = 1.0 if self.color.dtype.kind == 'f' else 255\n return np.any(self.color[:,3] != max)\n \n def isMapTrivial(self):\n \"\"\"\n Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0.\n \"\"\"\n if len(self.pos) != 2:\n return False\n if self.pos[0] != 0.0 or self.pos[1] != 1.0:\n return False\n if self.color.dtype.kind == 'f':\n return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))\n else:\n return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))\n\n def __repr__(self):\n pos = repr(self.pos).replace('\\n', '')\n color = repr(self.color).replace('\\n', '')\n return \"ColorMap(%s, %s)\" % (pos, color)\n", "path": "pyqtgraph/colormap.py"}]}
| 3,675 | 422 |
gh_patches_debug_3750
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-565
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
documentation: Show reduced redundancy option in "aws s3 cp help"
When a user types `aws s3 cp help` it describes this option:
```
--storage-class The type of storage to use for the object. Defaults to
'STANDARD'
```
It would be super-helpful to list the string the user should specify here if they want reduced redundancy storage (i.e., `'REDUCED_REDUNDANCY'`).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/s3/description.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14
15 def add_command_descriptions(cmd_dict):
16 """
17 This function adds descritpions to the various commands along with
18 usage.
19 """
20 cmd_dict['cp']['description'] = "Copies a local file or S3 object to \
21 another location locally or in S3."
22 cmd_dict['cp']['usage'] = "<LocalPath> <S3Path> or <S3Path> <LocalPath> " \
23 "or <S3Path> <S3Path>"
24
25 cmd_dict['mv']['description'] = "Moves a local file or S3 object to " \
26 "another location locally or in S3."
27 cmd_dict['mv']['usage'] = "<LocalPath> <S3Path> or <S3Path> <LocalPath> " \
28 "or <S3Path> <S3Path>"
29
30 cmd_dict['rm']['description'] = "Deletes an S3 object."
31 cmd_dict['rm']['usage'] = "<S3Path>"
32
33 cmd_dict['sync']['description'] = "Syncs directories and S3 prefixes."
34 cmd_dict['sync']['usage'] = "<LocalPath> <S3Path> or <S3Path> " \
35 "<LocalPath> or <S3Path> <S3Path>"
36
37 cmd_dict['ls']['description'] = "List S3 objects and common prefixes " \
38 "under a prefix or all S3 buckets."
39 cmd_dict['ls']['usage'] = "<S3Path> or NONE"
40
41 cmd_dict['mb']['description'] = "Creates an S3 bucket."
42 cmd_dict['mb']['usage'] = "<S3Path>"
43
44 cmd_dict['rb']['description'] = "Deletes an S3 bucket."
45 cmd_dict['rb']['usage'] = "<S3Path>"
46
47
48 def add_param_descriptions(params_dict):
49 """
50 This function adds descriptions to the various parameters that can be
51 used in commands.
52 """
53 params_dict['dryrun']['documents'] = "Displays the operations that " \
54 "would be performed using the specified command without actually" \
55 "running them."
56
57 params_dict['quiet']['documents'] = "Does not display the operations " \
58 "performed from the specified command."
59
60 params_dict['recursive']['documents'] = "Command is performed on all" \
61 "files or objects under the specified directory or prefix."
62
63 params_dict['delete']['documents'] = "Files that exist in the " \
64 "destination but not in the source are deleted during sync."
65
66 params_dict['exclude']['documents'] = "Exclude all files or objects" \
67 " from the command that matches the specified pattern."
68
69 params_dict['include']['documents'] = "Don't exclude files or objects in " \
70 "the command that match the specified pattern"
71
72 params_dict['acl']['documents'] = "Sets the ACl for the object when the " \
73 "command is performed. Only accepts values of ``private``, \
74 ``public-read``, or ``public-read-write``."
75
76 params_dict['force']['documents'] = "Deletes all objects in the bucket " \
77 "including the bucket itself."
78
79 params_dict['no-guess-mime-type']['documents'] = (
80 "Do not try to guess the mime type for uploaded files. By default the "
81 "mime type of a file is guessed when it is uploaded.")
82
83 params_dict['content-type']['documents'] = (
84 "Specify an explicit content type for this operation. "
85 "This value overrides any guessed mime types.")
86
87 params_dict['cache-control']['documents'] = \
88 "Specifies caching behavior along the request/reply chain."
89
90 params_dict['content-disposition']['documents'] = \
91 "Specifies presentational information for the object."
92
93 params_dict['content-encoding']['documents'] = (
94 "Specifies what content encodings have been "
95 "applied to the object and thus what decoding mechanisms "
96 "must be applied to obtain the media-type referenced "
97 "by the Content-Type header field.")
98
99 params_dict['content-language']['documents'] = \
100 "The language the content is in."
101
102 params_dict['expires']['documents'] = \
103 "The date and time at which the object is no longer cacheable."
104
105 params_dict['sse']['documents'] = (
106 "Enable Server Side Encryption of the object in S3")
107
108 params_dict['storage-class']['documents'] = (
109 "The type of storage to use for the object. "
110 "Defaults to 'STANDARD'")
111
112 params_dict['website-redirect']['documents'] = (
113 "If the bucket is configured as a website, redirects requests "
114 "for this object to another object in the same bucket or to an "
115 "external URL. Amazon S3 stores the value of this header in the "
116 "object metadata.")
117
118 params_dict['grants']['documents'] = (
119 "Grant specific permissions to individual users or groups. "
120 "You can supply a list of grants of the form "
121 "``permission=grantee`` where permission is one of: "
122 "``read``, ``readacl``, ``writeacp``, ``full``")
123
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/s3/description.py b/awscli/customizations/s3/description.py
--- a/awscli/customizations/s3/description.py
+++ b/awscli/customizations/s3/description.py
@@ -107,6 +107,7 @@
params_dict['storage-class']['documents'] = (
"The type of storage to use for the object. "
+ "Valid choices are: STANDARD | REDUCED_REDUNDANCY. "
"Defaults to 'STANDARD'")
params_dict['website-redirect']['documents'] = (
|
{"golden_diff": "diff --git a/awscli/customizations/s3/description.py b/awscli/customizations/s3/description.py\n--- a/awscli/customizations/s3/description.py\n+++ b/awscli/customizations/s3/description.py\n@@ -107,6 +107,7 @@\n \n params_dict['storage-class']['documents'] = (\n \"The type of storage to use for the object. \"\n+ \"Valid choices are: STANDARD | REDUCED_REDUNDANCY. \"\n \"Defaults to 'STANDARD'\")\n \n params_dict['website-redirect']['documents'] = (\n", "issue": "documentation: Show reduced redundancy option in \"aws s3 cp help\"\nWhen a user types `aws s3 cp help` it describes this option:\n\n```\n--storage-class The type of storage to use for the object. Defaults to\n'STANDARD'\n```\n\nIt would be super-helpful to list the string the user should specify here if they want reduced redundancy storage (i.e., `'REDUCED_REDUNDANCY'`).\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\n\ndef add_command_descriptions(cmd_dict):\n \"\"\"\n This function adds descritpions to the various commands along with\n usage.\n \"\"\"\n cmd_dict['cp']['description'] = \"Copies a local file or S3 object to \\\n another location locally or in S3.\"\n cmd_dict['cp']['usage'] = \"<LocalPath> <S3Path> or <S3Path> <LocalPath> \" \\\n \"or <S3Path> <S3Path>\"\n\n cmd_dict['mv']['description'] = \"Moves a local file or S3 object to \" \\\n \"another location locally or in S3.\"\n cmd_dict['mv']['usage'] = \"<LocalPath> <S3Path> or <S3Path> <LocalPath> \" \\\n \"or <S3Path> <S3Path>\"\n\n cmd_dict['rm']['description'] = \"Deletes an S3 object.\"\n cmd_dict['rm']['usage'] = \"<S3Path>\"\n\n cmd_dict['sync']['description'] = \"Syncs directories and S3 prefixes.\"\n cmd_dict['sync']['usage'] = \"<LocalPath> <S3Path> or <S3Path> \" \\\n \"<LocalPath> or <S3Path> <S3Path>\"\n\n cmd_dict['ls']['description'] = \"List S3 objects and common prefixes \" \\\n \"under a prefix or all S3 buckets.\"\n cmd_dict['ls']['usage'] = \"<S3Path> or NONE\"\n\n cmd_dict['mb']['description'] = \"Creates an S3 bucket.\"\n cmd_dict['mb']['usage'] = \"<S3Path>\"\n\n cmd_dict['rb']['description'] = \"Deletes an S3 bucket.\"\n cmd_dict['rb']['usage'] = \"<S3Path>\"\n\n\ndef add_param_descriptions(params_dict):\n \"\"\"\n This function adds descriptions to the various parameters that can be\n used in commands.\n \"\"\"\n params_dict['dryrun']['documents'] = \"Displays the operations that \" \\\n \"would be performed using the specified command without actually\" \\\n \"running them.\"\n\n params_dict['quiet']['documents'] = \"Does not display the operations \" \\\n \"performed from the specified command.\"\n\n params_dict['recursive']['documents'] = \"Command is performed on all\" \\\n \"files or objects under the specified directory or prefix.\"\n\n params_dict['delete']['documents'] = \"Files that exist in the \" \\\n \"destination but not in the source are deleted during sync.\"\n\n params_dict['exclude']['documents'] = \"Exclude all files or objects\" \\\n \" from the command that matches the specified pattern.\"\n\n params_dict['include']['documents'] = \"Don't exclude files or objects in \" \\\n \"the command that match the specified pattern\"\n\n params_dict['acl']['documents'] = \"Sets the ACl for the object when the \" \\\n \"command is performed. Only accepts values of ``private``, \\\n ``public-read``, or ``public-read-write``.\"\n\n params_dict['force']['documents'] = \"Deletes all objects in the bucket \" \\\n \"including the bucket itself.\"\n\n params_dict['no-guess-mime-type']['documents'] = (\n \"Do not try to guess the mime type for uploaded files. By default the \"\n \"mime type of a file is guessed when it is uploaded.\")\n\n params_dict['content-type']['documents'] = (\n \"Specify an explicit content type for this operation. \"\n \"This value overrides any guessed mime types.\")\n\n params_dict['cache-control']['documents'] = \\\n \"Specifies caching behavior along the request/reply chain.\"\n\n params_dict['content-disposition']['documents'] = \\\n \"Specifies presentational information for the object.\"\n \n params_dict['content-encoding']['documents'] = (\n \"Specifies what content encodings have been \"\n \"applied to the object and thus what decoding mechanisms \"\n \"must be applied to obtain the media-type referenced \"\n \"by the Content-Type header field.\")\n \n params_dict['content-language']['documents'] = \\\n \"The language the content is in.\"\n\n params_dict['expires']['documents'] = \\\n \"The date and time at which the object is no longer cacheable.\"\n \n params_dict['sse']['documents'] = (\n \"Enable Server Side Encryption of the object in S3\")\n\n params_dict['storage-class']['documents'] = (\n \"The type of storage to use for the object. \"\n \"Defaults to 'STANDARD'\")\n\n params_dict['website-redirect']['documents'] = (\n \"If the bucket is configured as a website, redirects requests \"\n \"for this object to another object in the same bucket or to an \"\n \"external URL. Amazon S3 stores the value of this header in the \"\n \"object metadata.\")\n\n params_dict['grants']['documents'] = (\n \"Grant specific permissions to individual users or groups. \"\n \"You can supply a list of grants of the form \"\n \"``permission=grantee`` where permission is one of: \"\n \"``read``, ``readacl``, ``writeacp``, ``full``\")\n\n", "path": "awscli/customizations/s3/description.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\n\ndef add_command_descriptions(cmd_dict):\n \"\"\"\n This function adds descritpions to the various commands along with\n usage.\n \"\"\"\n cmd_dict['cp']['description'] = \"Copies a local file or S3 object to \\\n another location locally or in S3.\"\n cmd_dict['cp']['usage'] = \"<LocalPath> <S3Path> or <S3Path> <LocalPath> \" \\\n \"or <S3Path> <S3Path>\"\n\n cmd_dict['mv']['description'] = \"Moves a local file or S3 object to \" \\\n \"another location locally or in S3.\"\n cmd_dict['mv']['usage'] = \"<LocalPath> <S3Path> or <S3Path> <LocalPath> \" \\\n \"or <S3Path> <S3Path>\"\n\n cmd_dict['rm']['description'] = \"Deletes an S3 object.\"\n cmd_dict['rm']['usage'] = \"<S3Path>\"\n\n cmd_dict['sync']['description'] = \"Syncs directories and S3 prefixes.\"\n cmd_dict['sync']['usage'] = \"<LocalPath> <S3Path> or <S3Path> \" \\\n \"<LocalPath> or <S3Path> <S3Path>\"\n\n cmd_dict['ls']['description'] = \"List S3 objects and common prefixes \" \\\n \"under a prefix or all S3 buckets.\"\n cmd_dict['ls']['usage'] = \"<S3Path> or NONE\"\n\n cmd_dict['mb']['description'] = \"Creates an S3 bucket.\"\n cmd_dict['mb']['usage'] = \"<S3Path>\"\n\n cmd_dict['rb']['description'] = \"Deletes an S3 bucket.\"\n cmd_dict['rb']['usage'] = \"<S3Path>\"\n\n\ndef add_param_descriptions(params_dict):\n \"\"\"\n This function adds descriptions to the various parameters that can be\n used in commands.\n \"\"\"\n params_dict['dryrun']['documents'] = \"Displays the operations that \" \\\n \"would be performed using the specified command without actually\" \\\n \"running them.\"\n\n params_dict['quiet']['documents'] = \"Does not display the operations \" \\\n \"performed from the specified command.\"\n\n params_dict['recursive']['documents'] = \"Command is performed on all\" \\\n \"files or objects under the specified directory or prefix.\"\n\n params_dict['delete']['documents'] = \"Files that exist in the \" \\\n \"destination but not in the source are deleted during sync.\"\n\n params_dict['exclude']['documents'] = \"Exclude all files or objects\" \\\n \" from the command that matches the specified pattern.\"\n\n params_dict['include']['documents'] = \"Don't exclude files or objects in \" \\\n \"the command that match the specified pattern\"\n\n params_dict['acl']['documents'] = \"Sets the ACl for the object when the \" \\\n \"command is performed. Only accepts values of ``private``, \\\n ``public-read``, or ``public-read-write``.\"\n\n params_dict['force']['documents'] = \"Deletes all objects in the bucket \" \\\n \"including the bucket itself.\"\n\n params_dict['no-guess-mime-type']['documents'] = (\n \"Do not try to guess the mime type for uploaded files. By default the \"\n \"mime type of a file is guessed when it is uploaded.\")\n\n params_dict['content-type']['documents'] = (\n \"Specify an explicit content type for this operation. \"\n \"This value overrides any guessed mime types.\")\n\n params_dict['cache-control']['documents'] = \\\n \"Specifies caching behavior along the request/reply chain.\"\n\n params_dict['content-disposition']['documents'] = \\\n \"Specifies presentational information for the object.\"\n \n params_dict['content-encoding']['documents'] = (\n \"Specifies what content encodings have been \"\n \"applied to the object and thus what decoding mechanisms \"\n \"must be applied to obtain the media-type referenced \"\n \"by the Content-Type header field.\")\n \n params_dict['content-language']['documents'] = \\\n \"The language the content is in.\"\n\n params_dict['expires']['documents'] = \\\n \"The date and time at which the object is no longer cacheable.\"\n \n params_dict['sse']['documents'] = (\n \"Enable Server Side Encryption of the object in S3\")\n\n params_dict['storage-class']['documents'] = (\n \"The type of storage to use for the object. \"\n \"Valid choices are: STANDARD | REDUCED_REDUNDANCY. \"\n \"Defaults to 'STANDARD'\")\n\n params_dict['website-redirect']['documents'] = (\n \"If the bucket is configured as a website, redirects requests \"\n \"for this object to another object in the same bucket or to an \"\n \"external URL. Amazon S3 stores the value of this header in the \"\n \"object metadata.\")\n\n params_dict['grants']['documents'] = (\n \"Grant specific permissions to individual users or groups. \"\n \"You can supply a list of grants of the form \"\n \"``permission=grantee`` where permission is one of: \"\n \"``read``, ``readacl``, ``writeacp``, ``full``\")\n\n", "path": "awscli/customizations/s3/description.py"}]}
| 1,855 | 126 |
gh_patches_debug_11395
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-958
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'NoneType' object has no attribute 'strip' (file "/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py", line 23, in codepoint)
[01:04am] <Ant> .u
01:04AM <Sopel> AttributeError: 'NoneType' object has no attribute 'strip' (file "/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py", line 23, in codepoint)
01:04AM <Sopel> Ant: Sopel v. 6.1.1
This is in my Debian oldstable with Python v2.7.3. :(
AttributeError: 'NoneType' object has no attribute 'strip' (file "/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py", line 23, in codepoint)
[01:04am] <Ant> .u
01:04AM <Sopel> AttributeError: 'NoneType' object has no attribute 'strip' (file "/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py", line 23, in codepoint)
01:04AM <Sopel> Ant: Sopel v. 6.1.1
This is in my Debian oldstable with Python v2.7.3. :(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/unicode_info.py`
Content:
```
1 # coding=utf-8
2 """Codepoints Module"""
3 # Copyright 2013, Elsie Powell, embolalia.com
4 # Copyright 2008, Sean B. Palmer, inamidst.com
5 # Licensed under the Eiffel Forum License 2.
6 from __future__ import unicode_literals, absolute_import, print_function, division
7 import unicodedata
8 import sys
9 from sopel.module import commands, example, NOLIMIT
10
11 if sys.version_info.major >= 3:
12 unichr = chr
13
14
15 @commands('u')
16 @example('.u ‽', 'U+203D INTERROBANG (‽)')
17 @example('.u 203D', 'U+203D INTERROBANG (‽)')
18 def codepoint(bot, trigger):
19 arg = trigger.group(2).strip()
20 if len(arg) == 0:
21 bot.reply('What code point do you want me to look up?')
22 return NOLIMIT
23 elif len(arg) > 1:
24 if arg.startswith('U+'):
25 arg = arg[2:]
26 try:
27 arg = unichr(int(arg, 16))
28 except:
29 bot.reply("That's not a valid code point.")
30 return NOLIMIT
31
32 # Get the hex value for the code point, and drop the 0x from the front
33 point = str(hex(ord(u'' + arg)))[2:]
34 # Make the hex 4 characters long with preceding 0s, and all upper case
35 point = point.rjust(4, str('0')).upper()
36 try:
37 name = unicodedata.name(arg)
38 except ValueError:
39 return 'U+%s (No name found)' % point
40
41 if not unicodedata.combining(arg):
42 template = 'U+%s %s (%s)'
43 else:
44 template = 'U+%s %s (\xe2\x97\x8c%s)'
45 bot.say(template % (point, name, arg))
46
47 if __name__ == "__main__":
48 from sopel.test_tools import run_example_tests
49 run_example_tests(__file__)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/modules/unicode_info.py b/sopel/modules/unicode_info.py
--- a/sopel/modules/unicode_info.py
+++ b/sopel/modules/unicode_info.py
@@ -16,11 +16,14 @@
@example('.u ‽', 'U+203D INTERROBANG (‽)')
@example('.u 203D', 'U+203D INTERROBANG (‽)')
def codepoint(bot, trigger):
- arg = trigger.group(2).strip()
- if len(arg) == 0:
+ arg = trigger.group(2)
+ if not arg:
bot.reply('What code point do you want me to look up?')
return NOLIMIT
- elif len(arg) > 1:
+ stripped = arg.strip()
+ if len(stripped) > 0:
+ arg = stripped
+ if len(arg) > 1:
if arg.startswith('U+'):
arg = arg[2:]
try:
|
{"golden_diff": "diff --git a/sopel/modules/unicode_info.py b/sopel/modules/unicode_info.py\n--- a/sopel/modules/unicode_info.py\n+++ b/sopel/modules/unicode_info.py\n@@ -16,11 +16,14 @@\n @example('.u \u203d', 'U+203D INTERROBANG (\u203d)')\n @example('.u 203D', 'U+203D INTERROBANG (\u203d)')\n def codepoint(bot, trigger):\n- arg = trigger.group(2).strip()\n- if len(arg) == 0:\n+ arg = trigger.group(2)\n+ if not arg:\n bot.reply('What code point do you want me to look up?')\n return NOLIMIT\n- elif len(arg) > 1:\n+ stripped = arg.strip()\n+ if len(stripped) > 0:\n+ arg = stripped\n+ if len(arg) > 1:\n if arg.startswith('U+'):\n arg = arg[2:]\n try:\n", "issue": "AttributeError: 'NoneType' object has no attribute 'strip' (file \"/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py\", line 23, in codepoint)\n[01:04am] <Ant> .u\n01:04AM <Sopel> AttributeError: 'NoneType' object has no attribute 'strip' (file \"/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py\", line 23, in codepoint)\n01:04AM <Sopel> Ant: Sopel v. 6.1.1\n\nThis is in my Debian oldstable with Python v2.7.3. :(\n\nAttributeError: 'NoneType' object has no attribute 'strip' (file \"/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py\", line 23, in codepoint)\n[01:04am] <Ant> .u\n01:04AM <Sopel> AttributeError: 'NoneType' object has no attribute 'strip' (file \"/usr/local/lib/python2.7/dist-packages/sopel/modules/unicode_info.py\", line 23, in codepoint)\n01:04AM <Sopel> Ant: Sopel v. 6.1.1\n\nThis is in my Debian oldstable with Python v2.7.3. :(\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Codepoints Module\"\"\"\n# Copyright 2013, Elsie Powell, embolalia.com\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nimport unicodedata\nimport sys\nfrom sopel.module import commands, example, NOLIMIT\n\nif sys.version_info.major >= 3:\n unichr = chr\n\n\n@commands('u')\n@example('.u \u203d', 'U+203D INTERROBANG (\u203d)')\n@example('.u 203D', 'U+203D INTERROBANG (\u203d)')\ndef codepoint(bot, trigger):\n arg = trigger.group(2).strip()\n if len(arg) == 0:\n bot.reply('What code point do you want me to look up?')\n return NOLIMIT\n elif len(arg) > 1:\n if arg.startswith('U+'):\n arg = arg[2:]\n try:\n arg = unichr(int(arg, 16))\n except:\n bot.reply(\"That's not a valid code point.\")\n return NOLIMIT\n\n # Get the hex value for the code point, and drop the 0x from the front\n point = str(hex(ord(u'' + arg)))[2:]\n # Make the hex 4 characters long with preceding 0s, and all upper case\n point = point.rjust(4, str('0')).upper()\n try:\n name = unicodedata.name(arg)\n except ValueError:\n return 'U+%s (No name found)' % point\n\n if not unicodedata.combining(arg):\n template = 'U+%s %s (%s)'\n else:\n template = 'U+%s %s (\\xe2\\x97\\x8c%s)'\n bot.say(template % (point, name, arg))\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/unicode_info.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"Codepoints Module\"\"\"\n# Copyright 2013, Elsie Powell, embolalia.com\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nimport unicodedata\nimport sys\nfrom sopel.module import commands, example, NOLIMIT\n\nif sys.version_info.major >= 3:\n unichr = chr\n\n\n@commands('u')\n@example('.u \u203d', 'U+203D INTERROBANG (\u203d)')\n@example('.u 203D', 'U+203D INTERROBANG (\u203d)')\ndef codepoint(bot, trigger):\n arg = trigger.group(2)\n if not arg:\n bot.reply('What code point do you want me to look up?')\n return NOLIMIT\n stripped = arg.strip()\n if len(stripped) > 0:\n arg = stripped\n if len(arg) > 1:\n if arg.startswith('U+'):\n arg = arg[2:]\n try:\n arg = unichr(int(arg, 16))\n except:\n bot.reply(\"That's not a valid code point.\")\n return NOLIMIT\n\n # Get the hex value for the code point, and drop the 0x from the front\n point = str(hex(ord(u'' + arg)))[2:]\n # Make the hex 4 characters long with preceding 0s, and all upper case\n point = point.rjust(4, str('0')).upper()\n try:\n name = unicodedata.name(arg)\n except ValueError:\n return 'U+%s (No name found)' % point\n\n if not unicodedata.combining(arg):\n template = 'U+%s %s (%s)'\n else:\n template = 'U+%s %s (\\xe2\\x97\\x8c%s)'\n bot.say(template % (point, name, arg))\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/unicode_info.py"}]}
| 1,124 | 231 |
gh_patches_debug_23174
|
rasdani/github-patches
|
git_diff
|
ansible__awx-14742
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError querying 'Address' key out of the CyberArk object using CyberArk Central Credential Provider Lookup
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)
### Bug Summary
Using AWX 23.3.0 and trying to fetch a credential (account) 'Address' from CyberARK PAM using Central Credential Provider version 12.6 we encounter a KeyError.
Fetching object property '' ('Content') or 'UserName' works flawlessly and using PowerShell the same query works and Address field in the JSON response is there and has the proper value.
related #13411
related #13402
related #5743
### AWX version
23.3.0
### Select the relevant components
- [X] UI
- [ ] UI (tech preview)
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
kubernetes
### Modifications
no
### Ansible version
core 2.12.5.post0
### Operating system
CentOS Stream release 8
### Web browser
Chrome
### Steps to reproduce
Reproduceable with a CyberARK PAM account that has the Address field populated and an AWX credential of Credential Type "VMware vCenter" passing through the lookup of the 'VCenter Host' details using AIM (External Secret Management System) to a self managed safe using this data:
```
{
"reason": "no particular",
"object_query": "Safe=XXXXXX_Self;Object=awx-vcenter-test",
"object_property": "Address",
"object_query_format": "Exact"
}
```
### Expected results
env var VMWARE_HOST set with the value in the Address field of the CyberArk credential
### Actual results
Test button in Credential Edit->External Secret Management System->Metadata page will show a KeyError pop up, and playbook in debug mode using this credential fails with these messages:
```
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py", line 528, in run
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 455, in inject_credential
getattr(builtin_injectors, credential.credential_type.namespace)(credential, injected_env, private_data_dir)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/injectors.py", line 71, in vmware
env['VMWARE_HOST'] = cred.get_input('host', default='')
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 279, in get_input
return self._get_dynamic_input(field_name)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 313, in _get_dynamic_input
return input_source.get_input_value()
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py", line 1258, in get_input_value
return backend(**backend_kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/aim.py", line 115, in aim_backend
raise KeyError('Property {} not found in object'.format(object_property))
KeyError: 'Property Address not found in object'
```
### Additional information
Using another instance (same versions AWX and CCP) we can reproduce this error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx/main/credential_plugins/aim.py`
Content:
```
1 from .plugin import CredentialPlugin, CertFiles, raise_for_status
2
3 from urllib.parse import quote, urlencode, urljoin
4
5 from django.utils.translation import gettext_lazy as _
6 import requests
7
8 aim_inputs = {
9 'fields': [
10 {
11 'id': 'url',
12 'label': _('CyberArk CCP URL'),
13 'type': 'string',
14 'format': 'url',
15 },
16 {
17 'id': 'webservice_id',
18 'label': _('Web Service ID'),
19 'type': 'string',
20 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
21 },
22 {
23 'id': 'app_id',
24 'label': _('Application ID'),
25 'type': 'string',
26 'secret': True,
27 },
28 {
29 'id': 'client_key',
30 'label': _('Client Key'),
31 'type': 'string',
32 'secret': True,
33 'multiline': True,
34 },
35 {
36 'id': 'client_cert',
37 'label': _('Client Certificate'),
38 'type': 'string',
39 'secret': True,
40 'multiline': True,
41 },
42 {
43 'id': 'verify',
44 'label': _('Verify SSL Certificates'),
45 'type': 'boolean',
46 'default': True,
47 },
48 ],
49 'metadata': [
50 {
51 'id': 'object_query',
52 'label': _('Object Query'),
53 'type': 'string',
54 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
55 },
56 {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
57 {
58 'id': 'object_property',
59 'label': _('Object Property'),
60 'type': 'string',
61 'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
62 },
63 {
64 'id': 'reason',
65 'label': _('Reason'),
66 'type': 'string',
67 'help_text': _('Object request reason. This is only needed if it is required by the object\'s policy.'),
68 },
69 ],
70 'required': ['url', 'app_id', 'object_query'],
71 }
72
73
74 def aim_backend(**kwargs):
75 url = kwargs['url']
76 client_cert = kwargs.get('client_cert', None)
77 client_key = kwargs.get('client_key', None)
78 verify = kwargs['verify']
79 webservice_id = kwargs.get('webservice_id', '')
80 app_id = kwargs['app_id']
81 object_query = kwargs['object_query']
82 object_query_format = kwargs['object_query_format']
83 object_property = kwargs.get('object_property', '')
84 reason = kwargs.get('reason', None)
85 if webservice_id == '':
86 webservice_id = 'AIMWebService'
87
88 query_params = {
89 'AppId': app_id,
90 'Query': object_query,
91 'QueryFormat': object_query_format,
92 }
93 if reason:
94 query_params['reason'] = reason
95
96 request_qs = '?' + urlencode(query_params, quote_via=quote)
97 request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
98
99 with CertFiles(client_cert, client_key) as cert:
100 res = requests.get(
101 request_url + request_qs,
102 timeout=30,
103 cert=cert,
104 verify=verify,
105 allow_redirects=False,
106 )
107 raise_for_status(res)
108 # CCP returns the property name capitalized, username is camel case
109 # so we need to handle that case
110 if object_property == '':
111 object_property = 'Content'
112 elif object_property.lower() == 'username':
113 object_property = 'UserName'
114 elif object_property not in res:
115 raise KeyError('Property {} not found in object'.format(object_property))
116 else:
117 object_property = object_property.capitalize()
118
119 return res.json()[object_property]
120
121
122 aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py
--- a/awx/main/credential_plugins/aim.py
+++ b/awx/main/credential_plugins/aim.py
@@ -58,7 +58,7 @@
'id': 'object_property',
'label': _('Object Property'),
'type': 'string',
- 'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
+ 'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'),
},
{
'id': 'reason',
@@ -111,8 +111,12 @@
object_property = 'Content'
elif object_property.lower() == 'username':
object_property = 'UserName'
+ elif object_property.lower() == 'password':
+ object_property = 'Content'
+ elif object_property.lower() == 'address':
+ object_property = 'Address'
elif object_property not in res:
- raise KeyError('Property {} not found in object'.format(object_property))
+ raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property))
else:
object_property = object_property.capitalize()
|
{"golden_diff": "diff --git a/awx/main/credential_plugins/aim.py b/awx/main/credential_plugins/aim.py\n--- a/awx/main/credential_plugins/aim.py\n+++ b/awx/main/credential_plugins/aim.py\n@@ -58,7 +58,7 @@\n 'id': 'object_property',\n 'label': _('Object Property'),\n 'type': 'string',\n- 'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),\n+ 'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'),\n },\n {\n 'id': 'reason',\n@@ -111,8 +111,12 @@\n object_property = 'Content'\n elif object_property.lower() == 'username':\n object_property = 'UserName'\n+ elif object_property.lower() == 'password':\n+ object_property = 'Content'\n+ elif object_property.lower() == 'address':\n+ object_property = 'Address'\n elif object_property not in res:\n- raise KeyError('Property {} not found in object'.format(object_property))\n+ raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property))\n else:\n object_property = object_property.capitalize()\n", "issue": "KeyError querying 'Address' key out of the CyberArk object using CyberArk Central Credential Provider Lookup\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\r\n\r\n### Bug Summary\r\n\r\nUsing AWX 23.3.0 and trying to fetch a credential (account) 'Address' from CyberARK PAM using Central Credential Provider version 12.6 we encounter a KeyError.\r\n\r\nFetching object property '' ('Content') or 'UserName' works flawlessly and using PowerShell the same query works and Address field in the JSON response is there and has the proper value.\r\n\r\nrelated #13411\r\nrelated #13402\r\nrelated #5743\r\n\r\n### AWX version\r\n\r\n23.3.0\r\n\r\n### Select the relevant components\r\n\r\n- [X] UI\r\n- [ ] UI (tech preview)\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nkubernetes\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\ncore 2.12.5.post0\r\n\r\n### Operating system\r\n\r\nCentOS Stream release 8\r\n\r\n### Web browser\r\n\r\nChrome\r\n\r\n### Steps to reproduce\r\n\r\nReproduceable with a CyberARK PAM account that has the Address field populated and an AWX credential of Credential Type \"VMware vCenter\" passing through the lookup of the 'VCenter Host' details using AIM (External Secret Management System) to a self managed safe using this data:\r\n\r\n```\r\n{\r\n \"reason\": \"no particular\",\r\n \"object_query\": \"Safe=XXXXXX_Self;Object=awx-vcenter-test\",\r\n \"object_property\": \"Address\",\r\n \"object_query_format\": \"Exact\"\r\n}\r\n```\r\n\r\n\r\n\r\n\r\n\r\n### Expected results\r\n\r\nenv var VMWARE_HOST set with the value in the Address field of the CyberArk credential\r\n\r\n### Actual results\r\n\r\nTest button in Credential Edit->External Secret Management System->Metadata page will show a KeyError pop up, and playbook in debug mode using this credential fails with these messages:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/tasks/jobs.py\", line 528, in run\r\n credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 455, in inject_credential\r\n getattr(builtin_injectors, credential.credential_type.namespace)(credential, injected_env, private_data_dir)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/injectors.py\", line 71, in vmware\r\n env['VMWARE_HOST'] = cred.get_input('host', default='')\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 279, in get_input\r\n return self._get_dynamic_input(field_name)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 313, in _get_dynamic_input\r\n return input_source.get_input_value()\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/models/credential/__init__.py\", line 1258, in get_input_value\r\n return backend(**backend_kwargs)\r\n File \"/var/lib/awx/venv/awx/lib64/python3.9/site-packages/awx/main/credential_plugins/aim.py\", line 115, in aim_backend\r\n raise KeyError('Property {} not found in object'.format(object_property))\r\nKeyError: 'Property Address not found in object'\r\n```\r\n\r\n### Additional information\r\n\r\nUsing another instance (same versions AWX and CCP) we can reproduce this error.\n", "before_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import quote, urlencode, urljoin\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\naim_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('CyberArk CCP URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'webservice_id',\n 'label': _('Web Service ID'),\n 'type': 'string',\n 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),\n },\n {\n 'id': 'app_id',\n 'label': _('Application ID'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'client_key',\n 'label': _('Client Key'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'client_cert',\n 'label': _('Client Certificate'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'verify',\n 'label': _('Verify SSL Certificates'),\n 'type': 'boolean',\n 'default': True,\n },\n ],\n 'metadata': [\n {\n 'id': 'object_query',\n 'label': _('Object Query'),\n 'type': 'string',\n 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),\n },\n {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},\n {\n 'id': 'object_property',\n 'label': _('Object Property'),\n 'type': 'string',\n 'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),\n },\n {\n 'id': 'reason',\n 'label': _('Reason'),\n 'type': 'string',\n 'help_text': _('Object request reason. This is only needed if it is required by the object\\'s policy.'),\n },\n ],\n 'required': ['url', 'app_id', 'object_query'],\n}\n\n\ndef aim_backend(**kwargs):\n url = kwargs['url']\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n webservice_id = kwargs.get('webservice_id', '')\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n object_property = kwargs.get('object_property', '')\n reason = kwargs.get('reason', None)\n if webservice_id == '':\n webservice_id = 'AIMWebService'\n\n query_params = {\n 'AppId': app_id,\n 'Query': object_query,\n 'QueryFormat': object_query_format,\n }\n if reason:\n query_params['reason'] = reason\n\n request_qs = '?' + urlencode(query_params, quote_via=quote)\n request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))\n\n with CertFiles(client_cert, client_key) as cert:\n res = requests.get(\n request_url + request_qs,\n timeout=30,\n cert=cert,\n verify=verify,\n allow_redirects=False,\n )\n raise_for_status(res)\n # CCP returns the property name capitalized, username is camel case\n # so we need to handle that case\n if object_property == '':\n object_property = 'Content'\n elif object_property.lower() == 'username':\n object_property = 'UserName'\n elif object_property not in res:\n raise KeyError('Property {} not found in object'.format(object_property))\n else:\n object_property = object_property.capitalize()\n\n return res.json()[object_property]\n\n\naim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)\n", "path": "awx/main/credential_plugins/aim.py"}], "after_files": [{"content": "from .plugin import CredentialPlugin, CertFiles, raise_for_status\n\nfrom urllib.parse import quote, urlencode, urljoin\n\nfrom django.utils.translation import gettext_lazy as _\nimport requests\n\naim_inputs = {\n 'fields': [\n {\n 'id': 'url',\n 'label': _('CyberArk CCP URL'),\n 'type': 'string',\n 'format': 'url',\n },\n {\n 'id': 'webservice_id',\n 'label': _('Web Service ID'),\n 'type': 'string',\n 'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),\n },\n {\n 'id': 'app_id',\n 'label': _('Application ID'),\n 'type': 'string',\n 'secret': True,\n },\n {\n 'id': 'client_key',\n 'label': _('Client Key'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'client_cert',\n 'label': _('Client Certificate'),\n 'type': 'string',\n 'secret': True,\n 'multiline': True,\n },\n {\n 'id': 'verify',\n 'label': _('Verify SSL Certificates'),\n 'type': 'boolean',\n 'default': True,\n },\n ],\n 'metadata': [\n {\n 'id': 'object_query',\n 'label': _('Object Query'),\n 'type': 'string',\n 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),\n },\n {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},\n {\n 'id': 'object_property',\n 'label': _('Object Property'),\n 'type': 'string',\n 'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'),\n },\n {\n 'id': 'reason',\n 'label': _('Reason'),\n 'type': 'string',\n 'help_text': _('Object request reason. This is only needed if it is required by the object\\'s policy.'),\n },\n ],\n 'required': ['url', 'app_id', 'object_query'],\n}\n\n\ndef aim_backend(**kwargs):\n url = kwargs['url']\n client_cert = kwargs.get('client_cert', None)\n client_key = kwargs.get('client_key', None)\n verify = kwargs['verify']\n webservice_id = kwargs.get('webservice_id', '')\n app_id = kwargs['app_id']\n object_query = kwargs['object_query']\n object_query_format = kwargs['object_query_format']\n object_property = kwargs.get('object_property', '')\n reason = kwargs.get('reason', None)\n if webservice_id == '':\n webservice_id = 'AIMWebService'\n\n query_params = {\n 'AppId': app_id,\n 'Query': object_query,\n 'QueryFormat': object_query_format,\n }\n if reason:\n query_params['reason'] = reason\n\n request_qs = '?' + urlencode(query_params, quote_via=quote)\n request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))\n\n with CertFiles(client_cert, client_key) as cert:\n res = requests.get(\n request_url + request_qs,\n timeout=30,\n cert=cert,\n verify=verify,\n allow_redirects=False,\n )\n raise_for_status(res)\n # CCP returns the property name capitalized, username is camel case\n # so we need to handle that case\n if object_property == '':\n object_property = 'Content'\n elif object_property.lower() == 'username':\n object_property = 'UserName'\n elif object_property.lower() == 'password':\n object_property = 'Content'\n elif object_property.lower() == 'address':\n object_property = 'Address'\n elif object_property not in res:\n raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property))\n else:\n object_property = object_property.capitalize()\n\n return res.json()[object_property]\n\n\naim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)\n", "path": "awx/main/credential_plugins/aim.py"}]}
| 2,401 | 285 |
gh_patches_debug_38450
|
rasdani/github-patches
|
git_diff
|
searx__searx-1452
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Findx is shutting down
https://privacore.github.io/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/findx.py`
Content:
```
1 """
2 FindX (General, Images, Videos)
3
4 @website https://www.findx.com
5 @provide-api no
6 @using-api no
7 @results HTML
8 @stable no
9 @parse url, title, content, embedded, img_src, thumbnail_src
10 """
11
12 from dateutil import parser
13 from json import loads
14 import re
15
16 from lxml import html
17
18 from searx import logger
19 from searx.engines.xpath import extract_text
20 from searx.engines.youtube_noapi import base_youtube_url, embedded_url
21 from searx.url_utils import urlencode
22
23
24 paging = True
25 results_xpath = '//script[@id="initial-state"]'
26 search_url = 'https://www.findx.com/{category}?{q}'
27 type_map = {
28 'none': 'web',
29 'general': 'web',
30 'images': 'images',
31 'videos': 'videos',
32 }
33
34
35 def request(query, params):
36 params['url'] = search_url.format(
37 category=type_map[params['category']],
38 q=urlencode({
39 'q': query,
40 'page': params['pageno']
41 })
42 )
43 return params
44
45
46 def response(resp):
47 dom = html.fromstring(resp.text)
48 results_raw_json = dom.xpath(results_xpath)
49 results_json = loads(extract_text(results_raw_json))
50
51 if len(results_json['web']['results']) > 0:
52 return _general_results(results_json['web']['results']['webSearch']['results'])
53
54 if len(results_json['images']['results']) > 0:
55 return _images_results(results_json['images']['results'])
56
57 if len(results_json['video']['results']) > 0:
58 return _videos_results(results_json['video']['results'])
59
60 return []
61
62
63 def _general_results(general_results):
64 results = []
65 for result in general_results:
66 results.append({
67 'url': result['url'],
68 'title': result['title'],
69 'content': result['sum'],
70 })
71 return results
72
73
74 def _images_results(image_results):
75 results = []
76 for result in image_results:
77 results.append({
78 'url': result['sourceURL'],
79 'title': result['title'],
80 'content': result['source'],
81 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),
82 'img_src': _extract_url(result['assets']['file']['url']),
83 'template': 'images.html',
84 })
85 return results
86
87
88 def _videos_results(video_results):
89 results = []
90 for result in video_results:
91 if not result['kind'].startswith('youtube'):
92 logger.warn('Unknown video kind in findx: {}'.format(result['kind']))
93 continue
94
95 description = result['snippet']['description']
96 if len(description) > 300:
97 description = description[:300] + '...'
98
99 results.append({
100 'url': base_youtube_url + result['id'],
101 'title': result['snippet']['title'],
102 'content': description,
103 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),
104 'publishedDate': parser.parse(result['snippet']['publishedAt']),
105 'embedded': embedded_url.format(videoid=result['id']),
106 'template': 'videos.html',
107 })
108 return results
109
110
111 def _extract_url(url):
112 matching = re.search('(/https?://[^)]+)', url)
113 if matching:
114 return matching.group(0)[1:]
115 return ''
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/findx.py b/searx/engines/findx.py
deleted file mode 100644
--- a/searx/engines/findx.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""
-FindX (General, Images, Videos)
-
-@website https://www.findx.com
-@provide-api no
-@using-api no
-@results HTML
-@stable no
-@parse url, title, content, embedded, img_src, thumbnail_src
-"""
-
-from dateutil import parser
-from json import loads
-import re
-
-from lxml import html
-
-from searx import logger
-from searx.engines.xpath import extract_text
-from searx.engines.youtube_noapi import base_youtube_url, embedded_url
-from searx.url_utils import urlencode
-
-
-paging = True
-results_xpath = '//script[@id="initial-state"]'
-search_url = 'https://www.findx.com/{category}?{q}'
-type_map = {
- 'none': 'web',
- 'general': 'web',
- 'images': 'images',
- 'videos': 'videos',
-}
-
-
-def request(query, params):
- params['url'] = search_url.format(
- category=type_map[params['category']],
- q=urlencode({
- 'q': query,
- 'page': params['pageno']
- })
- )
- return params
-
-
-def response(resp):
- dom = html.fromstring(resp.text)
- results_raw_json = dom.xpath(results_xpath)
- results_json = loads(extract_text(results_raw_json))
-
- if len(results_json['web']['results']) > 0:
- return _general_results(results_json['web']['results']['webSearch']['results'])
-
- if len(results_json['images']['results']) > 0:
- return _images_results(results_json['images']['results'])
-
- if len(results_json['video']['results']) > 0:
- return _videos_results(results_json['video']['results'])
-
- return []
-
-
-def _general_results(general_results):
- results = []
- for result in general_results:
- results.append({
- 'url': result['url'],
- 'title': result['title'],
- 'content': result['sum'],
- })
- return results
-
-
-def _images_results(image_results):
- results = []
- for result in image_results:
- results.append({
- 'url': result['sourceURL'],
- 'title': result['title'],
- 'content': result['source'],
- 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),
- 'img_src': _extract_url(result['assets']['file']['url']),
- 'template': 'images.html',
- })
- return results
-
-
-def _videos_results(video_results):
- results = []
- for result in video_results:
- if not result['kind'].startswith('youtube'):
- logger.warn('Unknown video kind in findx: {}'.format(result['kind']))
- continue
-
- description = result['snippet']['description']
- if len(description) > 300:
- description = description[:300] + '...'
-
- results.append({
- 'url': base_youtube_url + result['id'],
- 'title': result['snippet']['title'],
- 'content': description,
- 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),
- 'publishedDate': parser.parse(result['snippet']['publishedAt']),
- 'embedded': embedded_url.format(videoid=result['id']),
- 'template': 'videos.html',
- })
- return results
-
-
-def _extract_url(url):
- matching = re.search('(/https?://[^)]+)', url)
- if matching:
- return matching.group(0)[1:]
- return ''
|
{"golden_diff": "diff --git a/searx/engines/findx.py b/searx/engines/findx.py\ndeleted file mode 100644\n--- a/searx/engines/findx.py\n+++ /dev/null\n@@ -1,115 +0,0 @@\n-\"\"\"\n-FindX (General, Images, Videos)\n-\n-@website https://www.findx.com\n-@provide-api no\n-@using-api no\n-@results HTML\n-@stable no\n-@parse url, title, content, embedded, img_src, thumbnail_src\n-\"\"\"\n-\n-from dateutil import parser\n-from json import loads\n-import re\n-\n-from lxml import html\n-\n-from searx import logger\n-from searx.engines.xpath import extract_text\n-from searx.engines.youtube_noapi import base_youtube_url, embedded_url\n-from searx.url_utils import urlencode\n-\n-\n-paging = True\n-results_xpath = '//script[@id=\"initial-state\"]'\n-search_url = 'https://www.findx.com/{category}?{q}'\n-type_map = {\n- 'none': 'web',\n- 'general': 'web',\n- 'images': 'images',\n- 'videos': 'videos',\n-}\n-\n-\n-def request(query, params):\n- params['url'] = search_url.format(\n- category=type_map[params['category']],\n- q=urlencode({\n- 'q': query,\n- 'page': params['pageno']\n- })\n- )\n- return params\n-\n-\n-def response(resp):\n- dom = html.fromstring(resp.text)\n- results_raw_json = dom.xpath(results_xpath)\n- results_json = loads(extract_text(results_raw_json))\n-\n- if len(results_json['web']['results']) > 0:\n- return _general_results(results_json['web']['results']['webSearch']['results'])\n-\n- if len(results_json['images']['results']) > 0:\n- return _images_results(results_json['images']['results'])\n-\n- if len(results_json['video']['results']) > 0:\n- return _videos_results(results_json['video']['results'])\n-\n- return []\n-\n-\n-def _general_results(general_results):\n- results = []\n- for result in general_results:\n- results.append({\n- 'url': result['url'],\n- 'title': result['title'],\n- 'content': result['sum'],\n- })\n- return results\n-\n-\n-def _images_results(image_results):\n- results = []\n- for result in image_results:\n- results.append({\n- 'url': result['sourceURL'],\n- 'title': result['title'],\n- 'content': result['source'],\n- 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),\n- 'img_src': _extract_url(result['assets']['file']['url']),\n- 'template': 'images.html',\n- })\n- return results\n-\n-\n-def _videos_results(video_results):\n- results = []\n- for result in video_results:\n- if not result['kind'].startswith('youtube'):\n- logger.warn('Unknown video kind in findx: {}'.format(result['kind']))\n- continue\n-\n- description = result['snippet']['description']\n- if len(description) > 300:\n- description = description[:300] + '...'\n-\n- results.append({\n- 'url': base_youtube_url + result['id'],\n- 'title': result['snippet']['title'],\n- 'content': description,\n- 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),\n- 'publishedDate': parser.parse(result['snippet']['publishedAt']),\n- 'embedded': embedded_url.format(videoid=result['id']),\n- 'template': 'videos.html',\n- })\n- return results\n-\n-\n-def _extract_url(url):\n- matching = re.search('(/https?://[^)]+)', url)\n- if matching:\n- return matching.group(0)[1:]\n- return ''\n", "issue": "Findx is shutting down\nhttps://privacore.github.io/\n", "before_files": [{"content": "\"\"\"\nFindX (General, Images, Videos)\n\n@website https://www.findx.com\n@provide-api no\n@using-api no\n@results HTML\n@stable no\n@parse url, title, content, embedded, img_src, thumbnail_src\n\"\"\"\n\nfrom dateutil import parser\nfrom json import loads\nimport re\n\nfrom lxml import html\n\nfrom searx import logger\nfrom searx.engines.xpath import extract_text\nfrom searx.engines.youtube_noapi import base_youtube_url, embedded_url\nfrom searx.url_utils import urlencode\n\n\npaging = True\nresults_xpath = '//script[@id=\"initial-state\"]'\nsearch_url = 'https://www.findx.com/{category}?{q}'\ntype_map = {\n 'none': 'web',\n 'general': 'web',\n 'images': 'images',\n 'videos': 'videos',\n}\n\n\ndef request(query, params):\n params['url'] = search_url.format(\n category=type_map[params['category']],\n q=urlencode({\n 'q': query,\n 'page': params['pageno']\n })\n )\n return params\n\n\ndef response(resp):\n dom = html.fromstring(resp.text)\n results_raw_json = dom.xpath(results_xpath)\n results_json = loads(extract_text(results_raw_json))\n\n if len(results_json['web']['results']) > 0:\n return _general_results(results_json['web']['results']['webSearch']['results'])\n\n if len(results_json['images']['results']) > 0:\n return _images_results(results_json['images']['results'])\n\n if len(results_json['video']['results']) > 0:\n return _videos_results(results_json['video']['results'])\n\n return []\n\n\ndef _general_results(general_results):\n results = []\n for result in general_results:\n results.append({\n 'url': result['url'],\n 'title': result['title'],\n 'content': result['sum'],\n })\n return results\n\n\ndef _images_results(image_results):\n results = []\n for result in image_results:\n results.append({\n 'url': result['sourceURL'],\n 'title': result['title'],\n 'content': result['source'],\n 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),\n 'img_src': _extract_url(result['assets']['file']['url']),\n 'template': 'images.html',\n })\n return results\n\n\ndef _videos_results(video_results):\n results = []\n for result in video_results:\n if not result['kind'].startswith('youtube'):\n logger.warn('Unknown video kind in findx: {}'.format(result['kind']))\n continue\n\n description = result['snippet']['description']\n if len(description) > 300:\n description = description[:300] + '...'\n\n results.append({\n 'url': base_youtube_url + result['id'],\n 'title': result['snippet']['title'],\n 'content': description,\n 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),\n 'publishedDate': parser.parse(result['snippet']['publishedAt']),\n 'embedded': embedded_url.format(videoid=result['id']),\n 'template': 'videos.html',\n })\n return results\n\n\ndef _extract_url(url):\n matching = re.search('(/https?://[^)]+)', url)\n if matching:\n return matching.group(0)[1:]\n return ''\n", "path": "searx/engines/findx.py"}], "after_files": [{"content": null, "path": "searx/engines/findx.py"}]}
| 1,261 | 882 |
gh_patches_debug_35740
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-4524
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenNebula dynamic inventory plugin does not support keyed_groups
### Summary
The documentation for the OpenNebula dynamic inventory plugin [0] refers to an option to created keyed_groups (because the documentation extends it).
```
extends_documentation_fragment:
- constructed
```
However, this functionality is not implemented. Even though the `InventoryModule` inherits from `Constructable`, `self._add_host_to_keyed_groups()` is never called.
[0] https://docs.ansible.com/ansible/latest/collections/community/general/opennebula_inventory.html
### Issue Type
Bug Report
### Component Name
opennebula dynamic inventory plugin
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.4]
config file = /code/rafiki/ansible.cfg
configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/vagrant/.pyenv/versions/3.10.3/lib/python3.10/site-packages/ansible
ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections
executable location = /home/vagrant/.pyenv/versions/3.10.3/bin/ansible
python version = 3.10.3 (main, Apr 6 2022, 22:36:07) [GCC 11.2.0]
jinja version = 3.1.1
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /home/vagrant/.pyenv/versions/3.10.3/lib/python3.10/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 4.7.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
HOST_KEY_CHECKING(/code/rafiki/ansible.cfg) = False
```
### OS / Environment
```
vagrant@ubuntu-jammy:/code/rafiki$ cat /etc/lsb-release
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=22.04
DISTRIB_CODENAME=jammy
DISTRIB_DESCRIPTION="Ubuntu Jammy Jellyfish (development branch)"
```
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
opennebula.yml:
```yaml (paste below)
plugin: community.general.opennebula
api_url: "RPC_URL"
# specify these here, or in the form username:password in ~/.one/one_auth or export ONE_AUTH=/some/other/path
#api_username:
#api_password:
keyed_groups:
- prefix: distro
key: ansible_distribution
```
`ansible-inventory --list -i opennebula.yml`
### Expected Results
I expected a grouping of hosts by distribution. Note that the plugin does perform grouping by LABEL, which is seen below with the `srv` and `foo` labels/groups.
### Actual Results
```console (paste below)
```console
ansible-inventory --list -i opennebula.yml
{
"_meta": {
"hostvars": {
"terraform_demo_00": {
"INFO": "",
"INPUTS_ORDER": "",
"LABELS": [],
"LOGO": "images/logos/logo.png",
"LXD_SECURITY_PRIVILEGED": "true",
"MEMORY_UNIT_COST": "MB",
"SCHED_REQUIREMENTS": "ARCH=\"x86_64\"",
"ansible_host": "192.168.11.235",
"name": "terraform_demo_00",
"v4_first_ip": "192.168.11.235",
"v6_first_ip": false
},
"terraform_demo_01": {
"INFO": "",
"INPUTS_ORDER": "",
"LABELS": [
"srv",
"foo"
],
"LOGO": "images/logos/logo.png",
"LXD_SECURITY_PRIVILEGED": "true",
"MEMORY_UNIT_COST": "MB",
"SCHED_REQUIREMENTS": "ARCH=\"x86_64\"",
"ansible_host": "192.168.11.241",
"name": "terraform_demo_01",
"v4_first_ip": "192.168.11.241",
"v6_first_ip": false
}
}
},
"all": {
"children": [
"srv",
"foo",
"ungrouped"
]
},
"srv": {
"hosts": [
"terraform_demo_01"
]
},
"foo": {
"hosts": [
"terraform_demo_01"
]
},
"ungrouped": {
"hosts": [
"terraform_demo_00"
]
}
}
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/inventory/opennebula.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <[email protected]>
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4
5 from __future__ import (absolute_import, division, print_function)
6
7 __metaclass__ = type
8
9 DOCUMENTATION = r'''
10 name: opennebula
11 author:
12 - Kristian Feldsam (@feldsam)
13 short_description: OpenNebula inventory source
14 version_added: "3.8.0"
15 extends_documentation_fragment:
16 - constructed
17 description:
18 - Get inventory hosts from OpenNebula cloud.
19 - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
20 to set parameter values.
21 - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
22 options:
23 plugin:
24 description: Token that ensures this is a source file for the 'opennebula' plugin.
25 type: string
26 required: true
27 choices: [ community.general.opennebula ]
28 api_url:
29 description:
30 - URL of the OpenNebula RPC server.
31 - It is recommended to use HTTPS so that the username/password are not
32 transferred over the network unencrypted.
33 - If not set then the value of the C(ONE_URL) environment variable is used.
34 env:
35 - name: ONE_URL
36 required: True
37 type: string
38 api_username:
39 description:
40 - Name of the user to login into the OpenNebula RPC server. If not set
41 then the value of the C(ONE_USERNAME) environment variable is used.
42 env:
43 - name: ONE_USERNAME
44 type: string
45 api_password:
46 description:
47 - Password or a token of the user to login into OpenNebula RPC server.
48 - If not set, the value of the C(ONE_PASSWORD) environment variable is used.
49 env:
50 - name: ONE_PASSWORD
51 required: False
52 type: string
53 api_authfile:
54 description:
55 - If both I(api_username) or I(api_password) are not set, then it will try
56 authenticate with ONE auth file. Default path is C(~/.one/one_auth).
57 - Set environment variable C(ONE_AUTH) to override this path.
58 env:
59 - name: ONE_AUTH
60 required: False
61 type: string
62 hostname:
63 description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
64 type: string
65 default: v4_first_ip
66 choices:
67 - v4_first_ip
68 - v6_first_ip
69 - name
70 filter_by_label:
71 description: Only return servers filtered by this label.
72 type: string
73 group_by_labels:
74 description: Create host groups by vm labels
75 type: bool
76 default: True
77 '''
78
79 EXAMPLES = r'''
80 # inventory_opennebula.yml file in YAML format
81 # Example command line: ansible-inventory --list -i inventory_opennebula.yml
82
83 # Pass a label filter to the API
84 plugin: community.general.opennebula
85 api_url: https://opennebula:2633/RPC2
86 filter_by_label: Cache
87 '''
88
89 try:
90 import pyone
91
92 HAS_PYONE = True
93 except ImportError:
94 HAS_PYONE = False
95
96 from ansible.errors import AnsibleError
97 from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
98 from ansible.module_utils.common.text.converters import to_native
99
100 from collections import namedtuple
101 import os
102
103
104 class InventoryModule(BaseInventoryPlugin, Constructable):
105 NAME = 'community.general.opennebula'
106
107 def verify_file(self, path):
108 valid = False
109 if super(InventoryModule, self).verify_file(path):
110 if path.endswith(('opennebula.yaml', 'opennebula.yml')):
111 valid = True
112 return valid
113
114 def _get_connection_info(self):
115 url = self.get_option('api_url')
116 username = self.get_option('api_username')
117 password = self.get_option('api_password')
118 authfile = self.get_option('api_authfile')
119
120 if not username and not password:
121 if authfile is None:
122 authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
123 try:
124 with open(authfile, "r") as fp:
125 authstring = fp.read().rstrip()
126 username, password = authstring.split(":")
127 except (OSError, IOError):
128 raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
129 except Exception:
130 raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
131
132 auth_params = namedtuple('auth', ('url', 'username', 'password'))
133
134 return auth_params(url=url, username=username, password=password)
135
136 def _get_vm_ipv4(self, vm):
137 nic = vm.TEMPLATE.get('NIC')
138
139 if isinstance(nic, dict):
140 nic = [nic]
141
142 for net in nic:
143 return net['IP']
144
145 return False
146
147 def _get_vm_ipv6(self, vm):
148 nic = vm.TEMPLATE.get('NIC')
149
150 if isinstance(nic, dict):
151 nic = [nic]
152
153 for net in nic:
154 if net.get('IP6_GLOBAL'):
155 return net['IP6_GLOBAL']
156
157 return False
158
159 def _get_vm_pool(self):
160 auth = self._get_connection_info()
161
162 if not (auth.username and auth.password):
163 raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
164 else:
165 one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
166
167 # get hosts (VMs)
168 try:
169 vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
170 except Exception as e:
171 raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
172
173 return vm_pool
174
175 def _retrieve_servers(self, label_filter=None):
176 vm_pool = self._get_vm_pool()
177
178 result = []
179
180 # iterate over hosts
181 for vm in vm_pool.VM:
182 server = vm.USER_TEMPLATE
183
184 labels = []
185 if vm.USER_TEMPLATE.get('LABELS'):
186 labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
187 labels = ''.join(labels)
188 labels = labels.replace(' ', '_')
189 labels = labels.replace('-', '_')
190 labels = labels.split(',')
191
192 # filter by label
193 if label_filter is not None:
194 if label_filter not in labels:
195 continue
196
197 server['name'] = vm.NAME
198 server['LABELS'] = labels
199 server['v4_first_ip'] = self._get_vm_ipv4(vm)
200 server['v6_first_ip'] = self._get_vm_ipv6(vm)
201
202 result.append(server)
203
204 return result
205
206 def _populate(self):
207 hostname_preference = self.get_option('hostname')
208 group_by_labels = self.get_option('group_by_labels')
209
210 # Add a top group 'one'
211 self.inventory.add_group(group='all')
212
213 filter_by_label = self.get_option('filter_by_label')
214 for server in self._retrieve_servers(filter_by_label):
215 # check for labels
216 if group_by_labels and server['LABELS']:
217 for label in server['LABELS']:
218 self.inventory.add_group(group=label)
219 self.inventory.add_host(host=server['name'], group=label)
220
221 self.inventory.add_host(host=server['name'], group='all')
222
223 for attribute, value in server.items():
224 self.inventory.set_variable(server['name'], attribute, value)
225
226 if hostname_preference != 'name':
227 self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
228
229 if server.get('SSH_PORT'):
230 self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
231
232 def parse(self, inventory, loader, path, cache=True):
233 if not HAS_PYONE:
234 raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
235
236 super(InventoryModule, self).parse(inventory, loader, path)
237 self._read_config_data(path=path)
238
239 self._populate()
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py
--- a/plugins/inventory/opennebula.py
+++ b/plugins/inventory/opennebula.py
@@ -206,28 +206,40 @@
def _populate(self):
hostname_preference = self.get_option('hostname')
group_by_labels = self.get_option('group_by_labels')
+ strict = self.get_option('strict')
# Add a top group 'one'
self.inventory.add_group(group='all')
filter_by_label = self.get_option('filter_by_label')
- for server in self._retrieve_servers(filter_by_label):
+ servers = self._retrieve_servers(filter_by_label)
+ for server in servers:
+ hostname = server['name']
# check for labels
if group_by_labels and server['LABELS']:
for label in server['LABELS']:
self.inventory.add_group(group=label)
- self.inventory.add_host(host=server['name'], group=label)
+ self.inventory.add_host(host=hostname, group=label)
- self.inventory.add_host(host=server['name'], group='all')
+ self.inventory.add_host(host=hostname, group='all')
for attribute, value in server.items():
- self.inventory.set_variable(server['name'], attribute, value)
+ self.inventory.set_variable(hostname, attribute, value)
if hostname_preference != 'name':
- self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
+ self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference])
if server.get('SSH_PORT'):
- self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
+ self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT'])
+
+ # handle construcable implementation: get composed variables if any
+ self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict)
+
+ # groups based on jinja conditionals get added to specific groups
+ self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict)
+
+ # groups based on variables associated with them in the inventory
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict)
def parse(self, inventory, loader, path, cache=True):
if not HAS_PYONE:
|
{"golden_diff": "diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py\n--- a/plugins/inventory/opennebula.py\n+++ b/plugins/inventory/opennebula.py\n@@ -206,28 +206,40 @@\n def _populate(self):\n hostname_preference = self.get_option('hostname')\n group_by_labels = self.get_option('group_by_labels')\n+ strict = self.get_option('strict')\n \n # Add a top group 'one'\n self.inventory.add_group(group='all')\n \n filter_by_label = self.get_option('filter_by_label')\n- for server in self._retrieve_servers(filter_by_label):\n+ servers = self._retrieve_servers(filter_by_label)\n+ for server in servers:\n+ hostname = server['name']\n # check for labels\n if group_by_labels and server['LABELS']:\n for label in server['LABELS']:\n self.inventory.add_group(group=label)\n- self.inventory.add_host(host=server['name'], group=label)\n+ self.inventory.add_host(host=hostname, group=label)\n \n- self.inventory.add_host(host=server['name'], group='all')\n+ self.inventory.add_host(host=hostname, group='all')\n \n for attribute, value in server.items():\n- self.inventory.set_variable(server['name'], attribute, value)\n+ self.inventory.set_variable(hostname, attribute, value)\n \n if hostname_preference != 'name':\n- self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])\n+ self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference])\n \n if server.get('SSH_PORT'):\n- self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])\n+ self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT'])\n+\n+ # handle construcable implementation: get composed variables if any\n+ self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict)\n+\n+ # groups based on jinja conditionals get added to specific groups\n+ self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict)\n+\n+ # groups based on variables associated with them in the inventory\n+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict)\n \n def parse(self, inventory, loader, path, cache=True):\n if not HAS_PYONE:\n", "issue": "OpenNebula dynamic inventory plugin does not support keyed_groups\n### Summary\r\n\r\nThe documentation for the OpenNebula dynamic inventory plugin [0] refers to an option to created keyed_groups (because the documentation extends it).\r\n\r\n```\r\n extends_documentation_fragment:\r\n - constructed\r\n```\r\n\r\nHowever, this functionality is not implemented. Even though the `InventoryModule` inherits from `Constructable`, `self._add_host_to_keyed_groups()` is never called.\r\n\r\n[0] https://docs.ansible.com/ansible/latest/collections/community/general/opennebula_inventory.html\r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\nopennebula dynamic inventory plugin\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.4]\r\n config file = /code/rafiki/ansible.cfg\r\n configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/vagrant/.pyenv/versions/3.10.3/lib/python3.10/site-packages/ansible\r\n ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/vagrant/.pyenv/versions/3.10.3/bin/ansible\r\n python version = 3.10.3 (main, Apr 6 2022, 22:36:07) [GCC 11.2.0]\r\n jinja version = 3.1.1\r\n libyaml = True\r\n\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /home/vagrant/.pyenv/versions/3.10.3/lib/python3.10/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 4.7.0 \r\n\r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nHOST_KEY_CHECKING(/code/rafiki/ansible.cfg) = False\r\n\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\n```\r\nvagrant@ubuntu-jammy:/code/rafiki$ cat /etc/lsb-release \r\nDISTRIB_ID=Ubuntu\r\nDISTRIB_RELEASE=22.04\r\nDISTRIB_CODENAME=jammy\r\nDISTRIB_DESCRIPTION=\"Ubuntu Jammy Jellyfish (development branch)\"\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n\r\nopennebula.yml:\r\n\r\n```yaml (paste below)\r\nplugin: community.general.opennebula\r\napi_url: \"RPC_URL\"\r\n# specify these here, or in the form username:password in ~/.one/one_auth or export ONE_AUTH=/some/other/path\r\n#api_username:\r\n#api_password:\r\nkeyed_groups:\r\n - prefix: distro\r\n key: ansible_distribution\r\n\r\n```\r\n\r\n`ansible-inventory --list -i opennebula.yml`\r\n\r\n### Expected Results\r\n\r\nI expected a grouping of hosts by distribution. Note that the plugin does perform grouping by LABEL, which is seen below with the `srv` and `foo` labels/groups.\r\n\r\n### Actual Results\r\n\r\n```console (paste below)\r\n```console\r\nansible-inventory --list -i opennebula.yml\r\n{\r\n \"_meta\": {\r\n \"hostvars\": {\r\n \"terraform_demo_00\": {\r\n \"INFO\": \"\",\r\n \"INPUTS_ORDER\": \"\",\r\n \"LABELS\": [],\r\n \"LOGO\": \"images/logos/logo.png\",\r\n \"LXD_SECURITY_PRIVILEGED\": \"true\",\r\n \"MEMORY_UNIT_COST\": \"MB\",\r\n \"SCHED_REQUIREMENTS\": \"ARCH=\\\"x86_64\\\"\",\r\n \"ansible_host\": \"192.168.11.235\",\r\n \"name\": \"terraform_demo_00\",\r\n \"v4_first_ip\": \"192.168.11.235\",\r\n \"v6_first_ip\": false\r\n },\r\n \"terraform_demo_01\": {\r\n \"INFO\": \"\",\r\n \"INPUTS_ORDER\": \"\",\r\n \"LABELS\": [\r\n \"srv\",\r\n \"foo\"\r\n ],\r\n \"LOGO\": \"images/logos/logo.png\",\r\n \"LXD_SECURITY_PRIVILEGED\": \"true\",\r\n \"MEMORY_UNIT_COST\": \"MB\",\r\n \"SCHED_REQUIREMENTS\": \"ARCH=\\\"x86_64\\\"\",\r\n \"ansible_host\": \"192.168.11.241\",\r\n \"name\": \"terraform_demo_01\",\r\n \"v4_first_ip\": \"192.168.11.241\",\r\n \"v6_first_ip\": false\r\n }\r\n }\r\n },\r\n \"all\": {\r\n \"children\": [\r\n \"srv\",\r\n \"foo\",\r\n \"ungrouped\"\r\n ]\r\n },\r\n \"srv\": {\r\n \"hosts\": [\r\n \"terraform_demo_01\"\r\n ]\r\n },\r\n \"foo\": {\r\n \"hosts\": [\r\n \"terraform_demo_01\"\r\n ]\r\n },\r\n \"ungrouped\": {\r\n \"hosts\": [\r\n \"terraform_demo_00\"\r\n ]\r\n }\r\n}\r\n```\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost\u2122 <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n name: opennebula\n author:\n - Kristian Feldsam (@feldsam)\n short_description: OpenNebula inventory source\n version_added: \"3.8.0\"\n extends_documentation_fragment:\n - constructed\n description:\n - Get inventory hosts from OpenNebula cloud.\n - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)\n to set parameter values.\n - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.\n options:\n plugin:\n description: Token that ensures this is a source file for the 'opennebula' plugin.\n type: string\n required: true\n choices: [ community.general.opennebula ]\n api_url:\n description:\n - URL of the OpenNebula RPC server.\n - It is recommended to use HTTPS so that the username/password are not\n transferred over the network unencrypted.\n - If not set then the value of the C(ONE_URL) environment variable is used.\n env:\n - name: ONE_URL\n required: True\n type: string\n api_username:\n description:\n - Name of the user to login into the OpenNebula RPC server. If not set\n then the value of the C(ONE_USERNAME) environment variable is used.\n env:\n - name: ONE_USERNAME\n type: string\n api_password:\n description:\n - Password or a token of the user to login into OpenNebula RPC server.\n - If not set, the value of the C(ONE_PASSWORD) environment variable is used.\n env:\n - name: ONE_PASSWORD\n required: False\n type: string\n api_authfile:\n description:\n - If both I(api_username) or I(api_password) are not set, then it will try\n authenticate with ONE auth file. Default path is C(~/.one/one_auth).\n - Set environment variable C(ONE_AUTH) to override this path.\n env:\n - name: ONE_AUTH\n required: False\n type: string\n hostname:\n description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.\n type: string\n default: v4_first_ip\n choices:\n - v4_first_ip\n - v6_first_ip\n - name\n filter_by_label:\n description: Only return servers filtered by this label.\n type: string\n group_by_labels:\n description: Create host groups by vm labels\n type: bool\n default: True\n'''\n\nEXAMPLES = r'''\n# inventory_opennebula.yml file in YAML format\n# Example command line: ansible-inventory --list -i inventory_opennebula.yml\n\n# Pass a label filter to the API\nplugin: community.general.opennebula\napi_url: https://opennebula:2633/RPC2\nfilter_by_label: Cache\n'''\n\ntry:\n import pyone\n\n HAS_PYONE = True\nexcept ImportError:\n HAS_PYONE = False\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.inventory import BaseInventoryPlugin, Constructable\nfrom ansible.module_utils.common.text.converters import to_native\n\nfrom collections import namedtuple\nimport os\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable):\n NAME = 'community.general.opennebula'\n\n def verify_file(self, path):\n valid = False\n if super(InventoryModule, self).verify_file(path):\n if path.endswith(('opennebula.yaml', 'opennebula.yml')):\n valid = True\n return valid\n\n def _get_connection_info(self):\n url = self.get_option('api_url')\n username = self.get_option('api_username')\n password = self.get_option('api_password')\n authfile = self.get_option('api_authfile')\n\n if not username and not password:\n if authfile is None:\n authfile = os.path.join(os.environ.get(\"HOME\"), \".one\", \"one_auth\")\n try:\n with open(authfile, \"r\") as fp:\n authstring = fp.read().rstrip()\n username, password = authstring.split(\":\")\n except (OSError, IOError):\n raise AnsibleError(\"Could not find or read ONE_AUTH file at '{e}'\".format(e=authfile))\n except Exception:\n raise AnsibleError(\"Error occurs when reading ONE_AUTH file at '{e}'\".format(e=authfile))\n\n auth_params = namedtuple('auth', ('url', 'username', 'password'))\n\n return auth_params(url=url, username=username, password=password)\n\n def _get_vm_ipv4(self, vm):\n nic = vm.TEMPLATE.get('NIC')\n\n if isinstance(nic, dict):\n nic = [nic]\n\n for net in nic:\n return net['IP']\n\n return False\n\n def _get_vm_ipv6(self, vm):\n nic = vm.TEMPLATE.get('NIC')\n\n if isinstance(nic, dict):\n nic = [nic]\n\n for net in nic:\n if net.get('IP6_GLOBAL'):\n return net['IP6_GLOBAL']\n\n return False\n\n def _get_vm_pool(self):\n auth = self._get_connection_info()\n\n if not (auth.username and auth.password):\n raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')\n else:\n one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)\n\n # get hosts (VMs)\n try:\n vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)\n except Exception as e:\n raise AnsibleError(\"Something happened during XML-RPC call: {e}\".format(e=to_native(e)))\n\n return vm_pool\n\n def _retrieve_servers(self, label_filter=None):\n vm_pool = self._get_vm_pool()\n\n result = []\n\n # iterate over hosts\n for vm in vm_pool.VM:\n server = vm.USER_TEMPLATE\n\n labels = []\n if vm.USER_TEMPLATE.get('LABELS'):\n labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]\n labels = ''.join(labels)\n labels = labels.replace(' ', '_')\n labels = labels.replace('-', '_')\n labels = labels.split(',')\n\n # filter by label\n if label_filter is not None:\n if label_filter not in labels:\n continue\n\n server['name'] = vm.NAME\n server['LABELS'] = labels\n server['v4_first_ip'] = self._get_vm_ipv4(vm)\n server['v6_first_ip'] = self._get_vm_ipv6(vm)\n\n result.append(server)\n\n return result\n\n def _populate(self):\n hostname_preference = self.get_option('hostname')\n group_by_labels = self.get_option('group_by_labels')\n\n # Add a top group 'one'\n self.inventory.add_group(group='all')\n\n filter_by_label = self.get_option('filter_by_label')\n for server in self._retrieve_servers(filter_by_label):\n # check for labels\n if group_by_labels and server['LABELS']:\n for label in server['LABELS']:\n self.inventory.add_group(group=label)\n self.inventory.add_host(host=server['name'], group=label)\n\n self.inventory.add_host(host=server['name'], group='all')\n\n for attribute, value in server.items():\n self.inventory.set_variable(server['name'], attribute, value)\n\n if hostname_preference != 'name':\n self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])\n\n if server.get('SSH_PORT'):\n self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])\n\n def parse(self, inventory, loader, path, cache=True):\n if not HAS_PYONE:\n raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')\n\n super(InventoryModule, self).parse(inventory, loader, path)\n self._read_config_data(path=path)\n\n self._populate()\n", "path": "plugins/inventory/opennebula.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost\u2122 <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n name: opennebula\n author:\n - Kristian Feldsam (@feldsam)\n short_description: OpenNebula inventory source\n version_added: \"3.8.0\"\n extends_documentation_fragment:\n - constructed\n description:\n - Get inventory hosts from OpenNebula cloud.\n - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)\n to set parameter values.\n - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.\n options:\n plugin:\n description: Token that ensures this is a source file for the 'opennebula' plugin.\n type: string\n required: true\n choices: [ community.general.opennebula ]\n api_url:\n description:\n - URL of the OpenNebula RPC server.\n - It is recommended to use HTTPS so that the username/password are not\n transferred over the network unencrypted.\n - If not set then the value of the C(ONE_URL) environment variable is used.\n env:\n - name: ONE_URL\n required: True\n type: string\n api_username:\n description:\n - Name of the user to login into the OpenNebula RPC server. If not set\n then the value of the C(ONE_USERNAME) environment variable is used.\n env:\n - name: ONE_USERNAME\n type: string\n api_password:\n description:\n - Password or a token of the user to login into OpenNebula RPC server.\n - If not set, the value of the C(ONE_PASSWORD) environment variable is used.\n env:\n - name: ONE_PASSWORD\n required: False\n type: string\n api_authfile:\n description:\n - If both I(api_username) or I(api_password) are not set, then it will try\n authenticate with ONE auth file. Default path is C(~/.one/one_auth).\n - Set environment variable C(ONE_AUTH) to override this path.\n env:\n - name: ONE_AUTH\n required: False\n type: string\n hostname:\n description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.\n type: string\n default: v4_first_ip\n choices:\n - v4_first_ip\n - v6_first_ip\n - name\n filter_by_label:\n description: Only return servers filtered by this label.\n type: string\n group_by_labels:\n description: Create host groups by vm labels\n type: bool\n default: True\n'''\n\nEXAMPLES = r'''\n# inventory_opennebula.yml file in YAML format\n# Example command line: ansible-inventory --list -i inventory_opennebula.yml\n\n# Pass a label filter to the API\nplugin: community.general.opennebula\napi_url: https://opennebula:2633/RPC2\nfilter_by_label: Cache\n'''\n\ntry:\n import pyone\n\n HAS_PYONE = True\nexcept ImportError:\n HAS_PYONE = False\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.inventory import BaseInventoryPlugin, Constructable\nfrom ansible.module_utils.common.text.converters import to_native\n\nfrom collections import namedtuple\nimport os\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable):\n NAME = 'community.general.opennebula'\n\n def verify_file(self, path):\n valid = False\n if super(InventoryModule, self).verify_file(path):\n if path.endswith(('opennebula.yaml', 'opennebula.yml')):\n valid = True\n return valid\n\n def _get_connection_info(self):\n url = self.get_option('api_url')\n username = self.get_option('api_username')\n password = self.get_option('api_password')\n authfile = self.get_option('api_authfile')\n\n if not username and not password:\n if authfile is None:\n authfile = os.path.join(os.environ.get(\"HOME\"), \".one\", \"one_auth\")\n try:\n with open(authfile, \"r\") as fp:\n authstring = fp.read().rstrip()\n username, password = authstring.split(\":\")\n except (OSError, IOError):\n raise AnsibleError(\"Could not find or read ONE_AUTH file at '{e}'\".format(e=authfile))\n except Exception:\n raise AnsibleError(\"Error occurs when reading ONE_AUTH file at '{e}'\".format(e=authfile))\n\n auth_params = namedtuple('auth', ('url', 'username', 'password'))\n\n return auth_params(url=url, username=username, password=password)\n\n def _get_vm_ipv4(self, vm):\n nic = vm.TEMPLATE.get('NIC')\n\n if isinstance(nic, dict):\n nic = [nic]\n\n for net in nic:\n return net['IP']\n\n return False\n\n def _get_vm_ipv6(self, vm):\n nic = vm.TEMPLATE.get('NIC')\n\n if isinstance(nic, dict):\n nic = [nic]\n\n for net in nic:\n if net.get('IP6_GLOBAL'):\n return net['IP6_GLOBAL']\n\n return False\n\n def _get_vm_pool(self):\n auth = self._get_connection_info()\n\n if not (auth.username and auth.password):\n raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')\n else:\n one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)\n\n # get hosts (VMs)\n try:\n vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)\n except Exception as e:\n raise AnsibleError(\"Something happened during XML-RPC call: {e}\".format(e=to_native(e)))\n\n return vm_pool\n\n def _retrieve_servers(self, label_filter=None):\n vm_pool = self._get_vm_pool()\n\n result = []\n\n # iterate over hosts\n for vm in vm_pool.VM:\n server = vm.USER_TEMPLATE\n\n labels = []\n if vm.USER_TEMPLATE.get('LABELS'):\n labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]\n labels = ''.join(labels)\n labels = labels.replace(' ', '_')\n labels = labels.replace('-', '_')\n labels = labels.split(',')\n\n # filter by label\n if label_filter is not None:\n if label_filter not in labels:\n continue\n\n server['name'] = vm.NAME\n server['LABELS'] = labels\n server['v4_first_ip'] = self._get_vm_ipv4(vm)\n server['v6_first_ip'] = self._get_vm_ipv6(vm)\n\n result.append(server)\n\n return result\n\n def _populate(self):\n hostname_preference = self.get_option('hostname')\n group_by_labels = self.get_option('group_by_labels')\n strict = self.get_option('strict')\n\n # Add a top group 'one'\n self.inventory.add_group(group='all')\n\n filter_by_label = self.get_option('filter_by_label')\n servers = self._retrieve_servers(filter_by_label)\n for server in servers:\n hostname = server['name']\n # check for labels\n if group_by_labels and server['LABELS']:\n for label in server['LABELS']:\n self.inventory.add_group(group=label)\n self.inventory.add_host(host=hostname, group=label)\n\n self.inventory.add_host(host=hostname, group='all')\n\n for attribute, value in server.items():\n self.inventory.set_variable(hostname, attribute, value)\n\n if hostname_preference != 'name':\n self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference])\n\n if server.get('SSH_PORT'):\n self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT'])\n\n # handle construcable implementation: get composed variables if any\n self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict)\n\n # groups based on jinja conditionals get added to specific groups\n self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict)\n\n # groups based on variables associated with them in the inventory\n self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict)\n\n def parse(self, inventory, loader, path, cache=True):\n if not HAS_PYONE:\n raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')\n\n super(InventoryModule, self).parse(inventory, loader, path)\n self._read_config_data(path=path)\n\n self._populate()\n", "path": "plugins/inventory/opennebula.py"}]}
| 3,895 | 533 |
gh_patches_debug_40518
|
rasdani/github-patches
|
git_diff
|
numba__numba-3291
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`numba -s` should show total cores and available cores
As title.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/numba_entry.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import sys
4 import argparse
5 import os
6 import subprocess
7
8
9 def get_sys_info():
10 # delay these imports until now as they are only needed in this
11 # function which then exits.
12 import platform
13 import json
14 from numba import config
15 from numba import cuda as cu
16 from numba.cuda import cudadrv
17 from numba.cuda.cudadrv.driver import driver as cudriver
18 from numba import roc
19 from numba.roc.hlc import hlc, libhlc
20 import textwrap as tw
21 import ctypes as ct
22 import llvmlite.binding as llvmbind
23 import locale
24 from datetime import datetime
25 from itertools import chain
26 from subprocess import check_output, CalledProcessError
27
28 try:
29 fmt = "%-35s : %-s"
30 print("-" * 80)
31 print("__Time Stamp__")
32 print(datetime.utcnow())
33 print("")
34
35 print("__Hardware Information__")
36 print(fmt % ("Machine", platform.machine()))
37 print(fmt % ("CPU Name", llvmbind.get_host_cpu_name()))
38 try:
39 featuremap = llvmbind.get_host_cpu_features()
40 except RuntimeError:
41 print(fmt % ("CPU Features", "NA"))
42 else:
43 features = sorted([key for key, value in featuremap.items()
44 if value])
45 cpu_feat = tw.fill(' '.join(features), 80)
46 print(fmt % ("CPU Features", ""))
47 print(cpu_feat)
48 print("")
49
50 print("__OS Information__")
51 print(fmt % ("Platform", platform.platform(aliased=True)))
52 print(fmt % ("Release", platform.release()))
53 system_name = platform.system()
54 print(fmt % ("System Name", system_name))
55 print(fmt % ("Version", platform.version()))
56 try:
57 if system_name == 'Linux':
58 info = platform.linux_distribution()
59 elif system_name == 'Windows':
60 info = platform.win32_ver()
61 elif system_name == 'Darwin':
62 info = platform.mac_ver()
63 else:
64 raise RuntimeError("Unknown system.")
65 buf = ''.join([x
66 if x != '' else ' '
67 for x in list(chain.from_iterable(info))])
68 print(fmt % ("OS specific info", buf))
69
70 if system_name == 'Linux':
71 print(fmt % ("glibc info", ' '.join(platform.libc_ver())))
72 except:
73 print("Error: System name incorrectly identified or unknown.")
74 print("")
75
76 print("__Python Information__")
77 print(fmt % ("Python Compiler", platform.python_compiler()))
78 print(
79 fmt %
80 ("Python Implementation",
81 platform.python_implementation()))
82 print(fmt % ("Python Version", platform.python_version()))
83 print(
84 fmt %
85 ("Python Locale ", ' '.join(
86 [x for x in locale.getdefaultlocale() if x is not None])))
87
88 print("")
89 print("__LLVM information__")
90 print(
91 fmt %
92 ("LLVM version", '.'.join(
93 [str(k) for k in llvmbind.llvm_version_info])))
94
95 print("")
96 print("__CUDA Information__")
97 # Look for GPUs
98 try:
99 cu.list_devices()[0] # will a device initialise?
100 except BaseException as e:
101 msg_not_found = "CUDA driver library cannot be found"
102 msg_disabled_by_user = "CUDA is disabled"
103 msg_end = " or no CUDA enabled devices are present."
104 msg_generic_problem = "Error: CUDA device intialisation problem."
105 msg = getattr(e, 'msg', None)
106 if msg is not None:
107 if msg_not_found in msg:
108 err_msg = msg_not_found + msg_end
109 elif msg_disabled_by_user in msg:
110 err_msg = msg_disabled_by_user + msg_end
111 else:
112 err_msg = msg_generic_problem + " Message:" + msg
113 else:
114 err_msg = msg_generic_problem + " " + str(e)
115 # Best effort error report
116 print("%s\nError class: %s" % (err_msg, str(type(e))))
117 else:
118 try:
119 cu.detect()
120 dv = ct.c_int(0)
121 cudriver.cuDriverGetVersion(ct.byref(dv))
122 print(fmt % ("CUDA driver version", dv.value))
123 print("CUDA libraries:")
124 cudadrv.libs.test(sys.platform, print_paths=False)
125 except:
126 print(
127 "Error: Probing CUDA failed (device and driver present, runtime problem?)\n")
128
129 print("")
130 print("__ROC Information__")
131 roc_is_available = roc.is_available()
132 print(fmt % ("ROC available", roc_is_available))
133
134 toolchains = []
135 try:
136 libhlc.HLC()
137 toolchains.append('librocmlite library')
138 except:
139 pass
140 try:
141 cmd = hlc.CmdLine().check_tooling()
142 toolchains.append('ROC command line tools')
143 except:
144 pass
145
146 # if no ROC try and report why
147 if not roc_is_available:
148 from numba.roc.hsadrv.driver import hsa
149 try:
150 hsa.is_available
151 except BaseException as e:
152 msg = str(e)
153 else:
154 msg = 'No ROC toolchains found.'
155 print(fmt % ("Error initialising ROC due to", msg))
156
157 if toolchains:
158 print(fmt % ("Available Toolchains", ', '.join(toolchains)))
159
160 try:
161 # ROC might not be available due to lack of tool chain, but HSA
162 # agents may be listed
163 from numba.roc.hsadrv.driver import hsa, dgpu_count
164 decode = lambda x: x.decode('utf-8') if isinstance(x, bytes) else x
165 print("\nFound %s HSA Agents:" % len(hsa.agents))
166 for i, agent in enumerate(hsa.agents):
167 print('Agent id : %s' % i)
168 print(' vendor: %s' % decode(agent.vendor_name))
169 print(' name: %s' % decode(agent.name))
170 print(' type: %s' % agent.device)
171 print("")
172
173 _dgpus = []
174 for a in hsa.agents:
175 if a.is_component and a.device == 'GPU':
176 _dgpus.append(decode(a.name))
177 print(fmt % ("Found %s discrete GPU(s)" % dgpu_count(), \
178 ', '.join(_dgpus)))
179 except Exception as e:
180 print("No HSA Agents found, encountered exception when searching:")
181 print(e)
182
183
184 print("")
185 print("__SVML Information__")
186 # replicate some SVML detection logic from numba.__init__ here.
187 # if SVML load fails in numba.__init__ the splitting of the logic
188 # here will help diagnosis of the underlying issue
189 have_svml_library = True
190 try:
191 if sys.platform.startswith('linux'):
192 llvmbind.load_library_permanently("libsvml.so")
193 elif sys.platform.startswith('darwin'):
194 llvmbind.load_library_permanently("libsvml.dylib")
195 elif sys.platform.startswith('win'):
196 llvmbind.load_library_permanently("svml_dispmd")
197 else:
198 have_svml_library = False
199 except:
200 have_svml_library = False
201 func = getattr(llvmbind.targets, "has_svml", None)
202 llvm_svml_patched = func() if func is not None else False
203 svml_operational = (config.USING_SVML and llvm_svml_patched \
204 and have_svml_library)
205 print(fmt % ("SVML state, config.USING_SVML", config.USING_SVML))
206 print(fmt % ("SVML library found and loaded", have_svml_library))
207 print(fmt % ("llvmlite using SVML patched LLVM", llvm_svml_patched))
208 print(fmt % ("SVML operational:", svml_operational))
209
210 # Look for conda and conda information
211 print("")
212 print("__Conda Information__")
213 cmd = ["conda", "info", "--json"]
214 try:
215 conda_out = check_output(cmd)
216 except Exception as e:
217 print(
218 "Conda not present/not working.\nError was %s\n" % e)
219 else:
220 data = ''.join(conda_out.decode("utf-8").splitlines())
221 jsond = json.loads(data)
222 keys = ['conda_build_version',
223 'conda_env_version',
224 'platform',
225 'python_version',
226 'root_writable']
227 for k in keys:
228 try:
229 print(fmt % (k, jsond[k]))
230 except KeyError:
231 pass
232
233 # get info about current environment
234 cmd = ["conda", "list"]
235 try:
236 conda_out = check_output(cmd)
237 except CalledProcessError as e:
238 print("Error: Conda command failed. Error was %s\n" % e.output)
239 else:
240 print("")
241 print("__Current Conda Env__")
242 data = conda_out.decode("utf-8").splitlines()
243 for k in data:
244 if k[0] != '#': # don't show where the env is, personal data
245 print(k)
246
247 print("-" * 80)
248
249 except Exception as e:
250 print("Error: The system reporting tool has failed unexpectedly.")
251 print("Exception was:")
252 print(e)
253
254 finally:
255 print(
256 "%s" %
257 "If requested, please copy and paste the information between\n"
258 "the dashed (----) lines, or from a given specific section as\n"
259 "appropriate.\n\n"
260 "=============================================================\n"
261 "IMPORTANT: Please ensure that you are happy with sharing the\n"
262 "contents of the information present, any information that you\n"
263 "wish to keep private you should remove before sharing.\n"
264 "=============================================================\n")
265
266
267 def make_parser():
268 parser = argparse.ArgumentParser()
269 parser.add_argument('--annotate', help='Annotate source',
270 action='store_true')
271 parser.add_argument('--dump-llvm', action="store_true",
272 help='Print generated llvm assembly')
273 parser.add_argument('--dump-optimized', action='store_true',
274 help='Dump the optimized llvm assembly')
275 parser.add_argument('--dump-assembly', action='store_true',
276 help='Dump the LLVM generated assembly')
277 parser.add_argument('--dump-cfg', action="store_true",
278 help='[Deprecated] Dump the control flow graph')
279 parser.add_argument('--dump-ast', action="store_true",
280 help='[Deprecated] Dump the AST')
281 parser.add_argument('--annotate-html', nargs=1,
282 help='Output source annotation as html')
283 parser.add_argument('-s', '--sysinfo', action="store_true",
284 help='Output system information for bug reporting')
285 parser.add_argument('filename', nargs='?', help='Python source filename')
286 return parser
287
288
289 def main():
290 parser = make_parser()
291 args = parser.parse_args()
292
293 if args.dump_cfg:
294 print("CFG dump is removed.")
295 sys.exit(1)
296 if args.dump_ast:
297 print("AST dump is removed. Numba no longer depends on AST.")
298 sys.exit(1)
299
300 if args.sysinfo:
301 print("System info:")
302 get_sys_info()
303 sys.exit(0)
304
305 os.environ['NUMBA_DUMP_ANNOTATION'] = str(int(args.annotate))
306 if args.annotate_html is not None:
307 try:
308 from jinja2 import Template
309 except ImportError:
310 raise ImportError("Please install the 'jinja2' package")
311 os.environ['NUMBA_DUMP_HTML'] = str(args.annotate_html[0])
312 os.environ['NUMBA_DUMP_LLVM'] = str(int(args.dump_llvm))
313 os.environ['NUMBA_DUMP_OPTIMIZED'] = str(int(args.dump_optimized))
314 os.environ['NUMBA_DUMP_ASSEMBLY'] = str(int(args.dump_assembly))
315
316 if args.filename:
317 cmd = [sys.executable, args.filename]
318 subprocess.call(cmd)
319 else:
320 print("numba: error: the following arguments are required: filename")
321 sys.exit(1)
322
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numba/numba_entry.py b/numba/numba_entry.py
--- a/numba/numba_entry.py
+++ b/numba/numba_entry.py
@@ -11,6 +11,7 @@
# function which then exits.
import platform
import json
+ import multiprocessing
from numba import config
from numba import cuda as cu
from numba.cuda import cudadrv
@@ -26,15 +27,57 @@
from subprocess import check_output, CalledProcessError
try:
- fmt = "%-35s : %-s"
+ fmt = "%-45s : %-s"
print("-" * 80)
print("__Time Stamp__")
print(datetime.utcnow())
print("")
print("__Hardware Information__")
+ system_name = platform.system()
print(fmt % ("Machine", platform.machine()))
print(fmt % ("CPU Name", llvmbind.get_host_cpu_name()))
+ if system_name == 'Linux':
+ strmatch = 'Cpus_allowed'
+ try:
+ loc = '/proc/self/status'
+ with open(loc, 'rt') as f:
+ proc_stat = f.read().splitlines()
+ for x in proc_stat:
+ if x.startswith(strmatch):
+ if x.startswith('%s:' % strmatch):
+ hexnum = '0x%s' % x.split(':')[1].strip()
+ acc_cpus = int(hexnum, 16)
+ _n = str(bin(acc_cpus).count('1'))
+ print(fmt % ("Number of accessible CPU cores",
+ _n))
+ elif x.startswith('%s_list:' % strmatch):
+ _a = x.split(':')[1].strip()
+ print(fmt % ("Listed accessible CPUs cores",
+ _a))
+ except BaseException:
+ print(fmt % ("CPU count", multiprocessing.cpu_count()))
+ # See if CFS is in place
+ # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
+ try:
+ def scrape_lines(loc):
+ with open(loc, 'rt') as f:
+ return f.read().splitlines()
+ loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_period_us'
+ cfs_period = int(scrape_lines(loc)[0])
+ loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us'
+ cfs_quota = int(scrape_lines(loc)[0])
+ if cfs_quota == -1:
+ print(fmt % ("CFS restrictions", "None"))
+ else:
+ runtime_amount = float(cfs_quota)/float(cfs_period)
+ print(fmt % ("CFS restrictions (CPUs worth of runtime)",
+ runtime_amount))
+ except BaseException:
+ print(fmt % ("CFS restrictions", 'Information not available'))
+ else:
+ print(fmt % ("CPU count", multiprocessing.cpu_count()))
+
try:
featuremap = llvmbind.get_host_cpu_features()
except RuntimeError:
@@ -50,7 +93,6 @@
print("__OS Information__")
print(fmt % ("Platform", platform.platform(aliased=True)))
print(fmt % ("Release", platform.release()))
- system_name = platform.system()
print(fmt % ("System Name", system_name))
print(fmt % ("Version", platform.version()))
try:
|
{"golden_diff": "diff --git a/numba/numba_entry.py b/numba/numba_entry.py\n--- a/numba/numba_entry.py\n+++ b/numba/numba_entry.py\n@@ -11,6 +11,7 @@\n # function which then exits.\n import platform\n import json\n+ import multiprocessing\n from numba import config\n from numba import cuda as cu\n from numba.cuda import cudadrv\n@@ -26,15 +27,57 @@\n from subprocess import check_output, CalledProcessError\n \n try:\n- fmt = \"%-35s : %-s\"\n+ fmt = \"%-45s : %-s\"\n print(\"-\" * 80)\n print(\"__Time Stamp__\")\n print(datetime.utcnow())\n print(\"\")\n \n print(\"__Hardware Information__\")\n+ system_name = platform.system()\n print(fmt % (\"Machine\", platform.machine()))\n print(fmt % (\"CPU Name\", llvmbind.get_host_cpu_name()))\n+ if system_name == 'Linux':\n+ strmatch = 'Cpus_allowed'\n+ try:\n+ loc = '/proc/self/status'\n+ with open(loc, 'rt') as f:\n+ proc_stat = f.read().splitlines()\n+ for x in proc_stat:\n+ if x.startswith(strmatch):\n+ if x.startswith('%s:' % strmatch):\n+ hexnum = '0x%s' % x.split(':')[1].strip()\n+ acc_cpus = int(hexnum, 16)\n+ _n = str(bin(acc_cpus).count('1'))\n+ print(fmt % (\"Number of accessible CPU cores\",\n+ _n))\n+ elif x.startswith('%s_list:' % strmatch):\n+ _a = x.split(':')[1].strip()\n+ print(fmt % (\"Listed accessible CPUs cores\",\n+ _a))\n+ except BaseException:\n+ print(fmt % (\"CPU count\", multiprocessing.cpu_count()))\n+ # See if CFS is in place\n+ # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt\n+ try:\n+ def scrape_lines(loc):\n+ with open(loc, 'rt') as f:\n+ return f.read().splitlines()\n+ loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_period_us'\n+ cfs_period = int(scrape_lines(loc)[0])\n+ loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us'\n+ cfs_quota = int(scrape_lines(loc)[0])\n+ if cfs_quota == -1:\n+ print(fmt % (\"CFS restrictions\", \"None\"))\n+ else:\n+ runtime_amount = float(cfs_quota)/float(cfs_period)\n+ print(fmt % (\"CFS restrictions (CPUs worth of runtime)\",\n+ runtime_amount))\n+ except BaseException:\n+ print(fmt % (\"CFS restrictions\", 'Information not available'))\n+ else:\n+ print(fmt % (\"CPU count\", multiprocessing.cpu_count()))\n+\n try:\n featuremap = llvmbind.get_host_cpu_features()\n except RuntimeError:\n@@ -50,7 +93,6 @@\n print(\"__OS Information__\")\n print(fmt % (\"Platform\", platform.platform(aliased=True)))\n print(fmt % (\"Release\", platform.release()))\n- system_name = platform.system()\n print(fmt % (\"System Name\", system_name))\n print(fmt % (\"Version\", platform.version()))\n try:\n", "issue": "`numba -s` should show total cores and available cores\nAs title.\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport sys\nimport argparse\nimport os\nimport subprocess\n\n\ndef get_sys_info():\n # delay these imports until now as they are only needed in this\n # function which then exits.\n import platform\n import json\n from numba import config\n from numba import cuda as cu\n from numba.cuda import cudadrv\n from numba.cuda.cudadrv.driver import driver as cudriver\n from numba import roc\n from numba.roc.hlc import hlc, libhlc\n import textwrap as tw\n import ctypes as ct\n import llvmlite.binding as llvmbind\n import locale\n from datetime import datetime\n from itertools import chain\n from subprocess import check_output, CalledProcessError\n\n try:\n fmt = \"%-35s : %-s\"\n print(\"-\" * 80)\n print(\"__Time Stamp__\")\n print(datetime.utcnow())\n print(\"\")\n\n print(\"__Hardware Information__\")\n print(fmt % (\"Machine\", platform.machine()))\n print(fmt % (\"CPU Name\", llvmbind.get_host_cpu_name()))\n try:\n featuremap = llvmbind.get_host_cpu_features()\n except RuntimeError:\n print(fmt % (\"CPU Features\", \"NA\"))\n else:\n features = sorted([key for key, value in featuremap.items()\n if value])\n cpu_feat = tw.fill(' '.join(features), 80)\n print(fmt % (\"CPU Features\", \"\"))\n print(cpu_feat)\n print(\"\")\n\n print(\"__OS Information__\")\n print(fmt % (\"Platform\", platform.platform(aliased=True)))\n print(fmt % (\"Release\", platform.release()))\n system_name = platform.system()\n print(fmt % (\"System Name\", system_name))\n print(fmt % (\"Version\", platform.version()))\n try:\n if system_name == 'Linux':\n info = platform.linux_distribution()\n elif system_name == 'Windows':\n info = platform.win32_ver()\n elif system_name == 'Darwin':\n info = platform.mac_ver()\n else:\n raise RuntimeError(\"Unknown system.\")\n buf = ''.join([x\n if x != '' else ' '\n for x in list(chain.from_iterable(info))])\n print(fmt % (\"OS specific info\", buf))\n\n if system_name == 'Linux':\n print(fmt % (\"glibc info\", ' '.join(platform.libc_ver())))\n except:\n print(\"Error: System name incorrectly identified or unknown.\")\n print(\"\")\n\n print(\"__Python Information__\")\n print(fmt % (\"Python Compiler\", platform.python_compiler()))\n print(\n fmt %\n (\"Python Implementation\",\n platform.python_implementation()))\n print(fmt % (\"Python Version\", platform.python_version()))\n print(\n fmt %\n (\"Python Locale \", ' '.join(\n [x for x in locale.getdefaultlocale() if x is not None])))\n\n print(\"\")\n print(\"__LLVM information__\")\n print(\n fmt %\n (\"LLVM version\", '.'.join(\n [str(k) for k in llvmbind.llvm_version_info])))\n\n print(\"\")\n print(\"__CUDA Information__\")\n # Look for GPUs\n try:\n cu.list_devices()[0] # will a device initialise?\n except BaseException as e:\n msg_not_found = \"CUDA driver library cannot be found\"\n msg_disabled_by_user = \"CUDA is disabled\"\n msg_end = \" or no CUDA enabled devices are present.\"\n msg_generic_problem = \"Error: CUDA device intialisation problem.\"\n msg = getattr(e, 'msg', None)\n if msg is not None:\n if msg_not_found in msg:\n err_msg = msg_not_found + msg_end\n elif msg_disabled_by_user in msg:\n err_msg = msg_disabled_by_user + msg_end\n else:\n err_msg = msg_generic_problem + \" Message:\" + msg\n else:\n err_msg = msg_generic_problem + \" \" + str(e)\n # Best effort error report\n print(\"%s\\nError class: %s\" % (err_msg, str(type(e))))\n else:\n try:\n cu.detect()\n dv = ct.c_int(0)\n cudriver.cuDriverGetVersion(ct.byref(dv))\n print(fmt % (\"CUDA driver version\", dv.value))\n print(\"CUDA libraries:\")\n cudadrv.libs.test(sys.platform, print_paths=False)\n except:\n print(\n \"Error: Probing CUDA failed (device and driver present, runtime problem?)\\n\")\n\n print(\"\")\n print(\"__ROC Information__\")\n roc_is_available = roc.is_available()\n print(fmt % (\"ROC available\", roc_is_available))\n\n toolchains = []\n try:\n libhlc.HLC()\n toolchains.append('librocmlite library')\n except:\n pass\n try:\n cmd = hlc.CmdLine().check_tooling()\n toolchains.append('ROC command line tools')\n except:\n pass\n\n # if no ROC try and report why\n if not roc_is_available:\n from numba.roc.hsadrv.driver import hsa\n try:\n hsa.is_available\n except BaseException as e:\n msg = str(e)\n else:\n msg = 'No ROC toolchains found.'\n print(fmt % (\"Error initialising ROC due to\", msg))\n\n if toolchains:\n print(fmt % (\"Available Toolchains\", ', '.join(toolchains)))\n\n try:\n # ROC might not be available due to lack of tool chain, but HSA\n # agents may be listed\n from numba.roc.hsadrv.driver import hsa, dgpu_count\n decode = lambda x: x.decode('utf-8') if isinstance(x, bytes) else x\n print(\"\\nFound %s HSA Agents:\" % len(hsa.agents))\n for i, agent in enumerate(hsa.agents):\n print('Agent id : %s' % i)\n print(' vendor: %s' % decode(agent.vendor_name))\n print(' name: %s' % decode(agent.name))\n print(' type: %s' % agent.device)\n print(\"\")\n\n _dgpus = []\n for a in hsa.agents:\n if a.is_component and a.device == 'GPU':\n _dgpus.append(decode(a.name))\n print(fmt % (\"Found %s discrete GPU(s)\" % dgpu_count(), \\\n ', '.join(_dgpus)))\n except Exception as e:\n print(\"No HSA Agents found, encountered exception when searching:\")\n print(e)\n\n\n print(\"\")\n print(\"__SVML Information__\")\n # replicate some SVML detection logic from numba.__init__ here.\n # if SVML load fails in numba.__init__ the splitting of the logic\n # here will help diagnosis of the underlying issue\n have_svml_library = True\n try:\n if sys.platform.startswith('linux'):\n llvmbind.load_library_permanently(\"libsvml.so\")\n elif sys.platform.startswith('darwin'):\n llvmbind.load_library_permanently(\"libsvml.dylib\")\n elif sys.platform.startswith('win'):\n llvmbind.load_library_permanently(\"svml_dispmd\")\n else:\n have_svml_library = False\n except:\n have_svml_library = False\n func = getattr(llvmbind.targets, \"has_svml\", None)\n llvm_svml_patched = func() if func is not None else False\n svml_operational = (config.USING_SVML and llvm_svml_patched \\\n and have_svml_library)\n print(fmt % (\"SVML state, config.USING_SVML\", config.USING_SVML))\n print(fmt % (\"SVML library found and loaded\", have_svml_library))\n print(fmt % (\"llvmlite using SVML patched LLVM\", llvm_svml_patched))\n print(fmt % (\"SVML operational:\", svml_operational))\n\n # Look for conda and conda information\n print(\"\")\n print(\"__Conda Information__\")\n cmd = [\"conda\", \"info\", \"--json\"]\n try:\n conda_out = check_output(cmd)\n except Exception as e:\n print(\n \"Conda not present/not working.\\nError was %s\\n\" % e)\n else:\n data = ''.join(conda_out.decode(\"utf-8\").splitlines())\n jsond = json.loads(data)\n keys = ['conda_build_version',\n 'conda_env_version',\n 'platform',\n 'python_version',\n 'root_writable']\n for k in keys:\n try:\n print(fmt % (k, jsond[k]))\n except KeyError:\n pass\n\n # get info about current environment\n cmd = [\"conda\", \"list\"]\n try:\n conda_out = check_output(cmd)\n except CalledProcessError as e:\n print(\"Error: Conda command failed. Error was %s\\n\" % e.output)\n else:\n print(\"\")\n print(\"__Current Conda Env__\")\n data = conda_out.decode(\"utf-8\").splitlines()\n for k in data:\n if k[0] != '#': # don't show where the env is, personal data\n print(k)\n\n print(\"-\" * 80)\n\n except Exception as e:\n print(\"Error: The system reporting tool has failed unexpectedly.\")\n print(\"Exception was:\")\n print(e)\n\n finally:\n print(\n \"%s\" %\n \"If requested, please copy and paste the information between\\n\"\n \"the dashed (----) lines, or from a given specific section as\\n\"\n \"appropriate.\\n\\n\"\n \"=============================================================\\n\"\n \"IMPORTANT: Please ensure that you are happy with sharing the\\n\"\n \"contents of the information present, any information that you\\n\"\n \"wish to keep private you should remove before sharing.\\n\"\n \"=============================================================\\n\")\n\n\ndef make_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--annotate', help='Annotate source',\n action='store_true')\n parser.add_argument('--dump-llvm', action=\"store_true\",\n help='Print generated llvm assembly')\n parser.add_argument('--dump-optimized', action='store_true',\n help='Dump the optimized llvm assembly')\n parser.add_argument('--dump-assembly', action='store_true',\n help='Dump the LLVM generated assembly')\n parser.add_argument('--dump-cfg', action=\"store_true\",\n help='[Deprecated] Dump the control flow graph')\n parser.add_argument('--dump-ast', action=\"store_true\",\n help='[Deprecated] Dump the AST')\n parser.add_argument('--annotate-html', nargs=1,\n help='Output source annotation as html')\n parser.add_argument('-s', '--sysinfo', action=\"store_true\",\n help='Output system information for bug reporting')\n parser.add_argument('filename', nargs='?', help='Python source filename')\n return parser\n\n\ndef main():\n parser = make_parser()\n args = parser.parse_args()\n\n if args.dump_cfg:\n print(\"CFG dump is removed.\")\n sys.exit(1)\n if args.dump_ast:\n print(\"AST dump is removed. Numba no longer depends on AST.\")\n sys.exit(1)\n\n if args.sysinfo:\n print(\"System info:\")\n get_sys_info()\n sys.exit(0)\n\n os.environ['NUMBA_DUMP_ANNOTATION'] = str(int(args.annotate))\n if args.annotate_html is not None:\n try:\n from jinja2 import Template\n except ImportError:\n raise ImportError(\"Please install the 'jinja2' package\")\n os.environ['NUMBA_DUMP_HTML'] = str(args.annotate_html[0])\n os.environ['NUMBA_DUMP_LLVM'] = str(int(args.dump_llvm))\n os.environ['NUMBA_DUMP_OPTIMIZED'] = str(int(args.dump_optimized))\n os.environ['NUMBA_DUMP_ASSEMBLY'] = str(int(args.dump_assembly))\n\n if args.filename:\n cmd = [sys.executable, args.filename]\n subprocess.call(cmd)\n else:\n print(\"numba: error: the following arguments are required: filename\")\n sys.exit(1)\n", "path": "numba/numba_entry.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport sys\nimport argparse\nimport os\nimport subprocess\n\n\ndef get_sys_info():\n # delay these imports until now as they are only needed in this\n # function which then exits.\n import platform\n import json\n import multiprocessing\n from numba import config\n from numba import cuda as cu\n from numba.cuda import cudadrv\n from numba.cuda.cudadrv.driver import driver as cudriver\n from numba import roc\n from numba.roc.hlc import hlc, libhlc\n import textwrap as tw\n import ctypes as ct\n import llvmlite.binding as llvmbind\n import locale\n from datetime import datetime\n from itertools import chain\n from subprocess import check_output, CalledProcessError\n\n try:\n fmt = \"%-45s : %-s\"\n print(\"-\" * 80)\n print(\"__Time Stamp__\")\n print(datetime.utcnow())\n print(\"\")\n\n print(\"__Hardware Information__\")\n system_name = platform.system()\n print(fmt % (\"Machine\", platform.machine()))\n print(fmt % (\"CPU Name\", llvmbind.get_host_cpu_name()))\n if system_name == 'Linux':\n strmatch = 'Cpus_allowed'\n try:\n loc = '/proc/self/status'\n with open(loc, 'rt') as f:\n proc_stat = f.read().splitlines()\n for x in proc_stat:\n if x.startswith(strmatch):\n if x.startswith('%s:' % strmatch):\n hexnum = '0x%s' % x.split(':')[1].strip()\n acc_cpus = int(hexnum, 16)\n _n = str(bin(acc_cpus).count('1'))\n print(fmt % (\"Number of accessible CPU cores\",\n _n))\n elif x.startswith('%s_list:' % strmatch):\n _a = x.split(':')[1].strip()\n print(fmt % (\"Listed accessible CPUs cores\",\n _a))\n except BaseException:\n print(fmt % (\"CPU count\", multiprocessing.cpu_count()))\n # See if CFS is in place\n # https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt\n try:\n def scrape_lines(loc):\n with open(loc, 'rt') as f:\n return f.read().splitlines()\n loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_period_us'\n cfs_period = int(scrape_lines(loc)[0])\n loc = '/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us'\n cfs_quota = int(scrape_lines(loc)[0])\n if cfs_quota == -1:\n print(fmt % (\"CFS restrictions\", \"None\"))\n else:\n runtime_amount = float(cfs_quota)/float(cfs_period)\n print(fmt % (\"CFS restrictions (CPUs worth of runtime)\",\n runtime_amount))\n except BaseException:\n print(fmt % (\"CFS restrictions\", 'Information not available'))\n else:\n print(fmt % (\"CPU count\", multiprocessing.cpu_count()))\n\n try:\n featuremap = llvmbind.get_host_cpu_features()\n except RuntimeError:\n print(fmt % (\"CPU Features\", \"NA\"))\n else:\n features = sorted([key for key, value in featuremap.items()\n if value])\n cpu_feat = tw.fill(' '.join(features), 80)\n print(fmt % (\"CPU Features\", \"\"))\n print(cpu_feat)\n print(\"\")\n\n print(\"__OS Information__\")\n print(fmt % (\"Platform\", platform.platform(aliased=True)))\n print(fmt % (\"Release\", platform.release()))\n print(fmt % (\"System Name\", system_name))\n print(fmt % (\"Version\", platform.version()))\n try:\n if system_name == 'Linux':\n info = platform.linux_distribution()\n elif system_name == 'Windows':\n info = platform.win32_ver()\n elif system_name == 'Darwin':\n info = platform.mac_ver()\n else:\n raise RuntimeError(\"Unknown system.\")\n buf = ''.join([x\n if x != '' else ' '\n for x in list(chain.from_iterable(info))])\n print(fmt % (\"OS specific info\", buf))\n\n if system_name == 'Linux':\n print(fmt % (\"glibc info\", ' '.join(platform.libc_ver())))\n except:\n print(\"Error: System name incorrectly identified or unknown.\")\n print(\"\")\n\n print(\"__Python Information__\")\n print(fmt % (\"Python Compiler\", platform.python_compiler()))\n print(\n fmt %\n (\"Python Implementation\",\n platform.python_implementation()))\n print(fmt % (\"Python Version\", platform.python_version()))\n print(\n fmt %\n (\"Python Locale \", ' '.join(\n [x for x in locale.getdefaultlocale() if x is not None])))\n\n print(\"\")\n print(\"__LLVM information__\")\n print(\n fmt %\n (\"LLVM version\", '.'.join(\n [str(k) for k in llvmbind.llvm_version_info])))\n\n print(\"\")\n print(\"__CUDA Information__\")\n # Look for GPUs\n try:\n cu.list_devices()[0] # will a device initialise?\n except BaseException as e:\n msg_not_found = \"CUDA driver library cannot be found\"\n msg_disabled_by_user = \"CUDA is disabled\"\n msg_end = \" or no CUDA enabled devices are present.\"\n msg_generic_problem = \"Error: CUDA device intialisation problem.\"\n msg = getattr(e, 'msg', None)\n if msg is not None:\n if msg_not_found in msg:\n err_msg = msg_not_found + msg_end\n elif msg_disabled_by_user in msg:\n err_msg = msg_disabled_by_user + msg_end\n else:\n err_msg = msg_generic_problem + \" Message:\" + msg\n else:\n err_msg = msg_generic_problem + \" \" + str(e)\n # Best effort error report\n print(\"%s\\nError class: %s\" % (err_msg, str(type(e))))\n else:\n try:\n cu.detect()\n dv = ct.c_int(0)\n cudriver.cuDriverGetVersion(ct.byref(dv))\n print(fmt % (\"CUDA driver version\", dv.value))\n print(\"CUDA libraries:\")\n cudadrv.libs.test(sys.platform, print_paths=False)\n except:\n print(\n \"Error: Probing CUDA failed (device and driver present, runtime problem?)\\n\")\n\n print(\"\")\n print(\"__ROC Information__\")\n roc_is_available = roc.is_available()\n print(fmt % (\"ROC available\", roc_is_available))\n\n toolchains = []\n try:\n libhlc.HLC()\n toolchains.append('librocmlite library')\n except:\n pass\n try:\n cmd = hlc.CmdLine().check_tooling()\n toolchains.append('ROC command line tools')\n except:\n pass\n\n # if no ROC try and report why\n if not roc_is_available:\n from numba.roc.hsadrv.driver import hsa\n try:\n hsa.is_available\n except BaseException as e:\n msg = str(e)\n else:\n msg = 'No ROC toolchains found.'\n print(fmt % (\"Error initialising ROC due to\", msg))\n\n if toolchains:\n print(fmt % (\"Available Toolchains\", ', '.join(toolchains)))\n\n try:\n # ROC might not be available due to lack of tool chain, but HSA\n # agents may be listed\n from numba.roc.hsadrv.driver import hsa, dgpu_count\n decode = lambda x: x.decode('utf-8') if isinstance(x, bytes) else x\n print(\"\\nFound %s HSA Agents:\" % len(hsa.agents))\n for i, agent in enumerate(hsa.agents):\n print('Agent id : %s' % i)\n print(' vendor: %s' % decode(agent.vendor_name))\n print(' name: %s' % decode(agent.name))\n print(' type: %s' % agent.device)\n print(\"\")\n\n _dgpus = []\n for a in hsa.agents:\n if a.is_component and a.device == 'GPU':\n _dgpus.append(decode(a.name))\n print(fmt % (\"Found %s discrete GPU(s)\" % dgpu_count(), \\\n ', '.join(_dgpus)))\n except Exception as e:\n print(\"No HSA Agents found, encountered exception when searching:\")\n print(e)\n\n\n print(\"\")\n print(\"__SVML Information__\")\n # replicate some SVML detection logic from numba.__init__ here.\n # if SVML load fails in numba.__init__ the splitting of the logic\n # here will help diagnosis of the underlying issue\n have_svml_library = True\n try:\n if sys.platform.startswith('linux'):\n llvmbind.load_library_permanently(\"libsvml.so\")\n elif sys.platform.startswith('darwin'):\n llvmbind.load_library_permanently(\"libsvml.dylib\")\n elif sys.platform.startswith('win'):\n llvmbind.load_library_permanently(\"svml_dispmd\")\n else:\n have_svml_library = False\n except:\n have_svml_library = False\n func = getattr(llvmbind.targets, \"has_svml\", None)\n llvm_svml_patched = func() if func is not None else False\n svml_operational = (config.USING_SVML and llvm_svml_patched \\\n and have_svml_library)\n print(fmt % (\"SVML state, config.USING_SVML\", config.USING_SVML))\n print(fmt % (\"SVML library found and loaded\", have_svml_library))\n print(fmt % (\"llvmlite using SVML patched LLVM\", llvm_svml_patched))\n print(fmt % (\"SVML operational:\", svml_operational))\n\n # Look for conda and conda information\n print(\"\")\n print(\"__Conda Information__\")\n cmd = [\"conda\", \"info\", \"--json\"]\n try:\n conda_out = check_output(cmd)\n except Exception as e:\n print(\n \"Conda not present/not working.\\nError was %s\\n\" % e)\n else:\n data = ''.join(conda_out.decode(\"utf-8\").splitlines())\n jsond = json.loads(data)\n keys = ['conda_build_version',\n 'conda_env_version',\n 'platform',\n 'python_version',\n 'root_writable']\n for k in keys:\n try:\n print(fmt % (k, jsond[k]))\n except KeyError:\n pass\n\n # get info about current environment\n cmd = [\"conda\", \"list\"]\n try:\n conda_out = check_output(cmd)\n except CalledProcessError as e:\n print(\"Error: Conda command failed. Error was %s\\n\" % e.output)\n else:\n print(\"\")\n print(\"__Current Conda Env__\")\n data = conda_out.decode(\"utf-8\").splitlines()\n for k in data:\n if k[0] != '#': # don't show where the env is, personal data\n print(k)\n\n print(\"-\" * 80)\n\n except Exception as e:\n print(\"Error: The system reporting tool has failed unexpectedly.\")\n print(\"Exception was:\")\n print(e)\n\n finally:\n print(\n \"%s\" %\n \"If requested, please copy and paste the information between\\n\"\n \"the dashed (----) lines, or from a given specific section as\\n\"\n \"appropriate.\\n\\n\"\n \"=============================================================\\n\"\n \"IMPORTANT: Please ensure that you are happy with sharing the\\n\"\n \"contents of the information present, any information that you\\n\"\n \"wish to keep private you should remove before sharing.\\n\"\n \"=============================================================\\n\")\n\n\ndef make_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--annotate', help='Annotate source',\n action='store_true')\n parser.add_argument('--dump-llvm', action=\"store_true\",\n help='Print generated llvm assembly')\n parser.add_argument('--dump-optimized', action='store_true',\n help='Dump the optimized llvm assembly')\n parser.add_argument('--dump-assembly', action='store_true',\n help='Dump the LLVM generated assembly')\n parser.add_argument('--dump-cfg', action=\"store_true\",\n help='[Deprecated] Dump the control flow graph')\n parser.add_argument('--dump-ast', action=\"store_true\",\n help='[Deprecated] Dump the AST')\n parser.add_argument('--annotate-html', nargs=1,\n help='Output source annotation as html')\n parser.add_argument('-s', '--sysinfo', action=\"store_true\",\n help='Output system information for bug reporting')\n parser.add_argument('filename', nargs='?', help='Python source filename')\n return parser\n\n\ndef main():\n parser = make_parser()\n args = parser.parse_args()\n\n if args.dump_cfg:\n print(\"CFG dump is removed.\")\n sys.exit(1)\n if args.dump_ast:\n print(\"AST dump is removed. Numba no longer depends on AST.\")\n sys.exit(1)\n\n if args.sysinfo:\n print(\"System info:\")\n get_sys_info()\n sys.exit(0)\n\n os.environ['NUMBA_DUMP_ANNOTATION'] = str(int(args.annotate))\n if args.annotate_html is not None:\n try:\n from jinja2 import Template\n except ImportError:\n raise ImportError(\"Please install the 'jinja2' package\")\n os.environ['NUMBA_DUMP_HTML'] = str(args.annotate_html[0])\n os.environ['NUMBA_DUMP_LLVM'] = str(int(args.dump_llvm))\n os.environ['NUMBA_DUMP_OPTIMIZED'] = str(int(args.dump_optimized))\n os.environ['NUMBA_DUMP_ASSEMBLY'] = str(int(args.dump_assembly))\n\n if args.filename:\n cmd = [sys.executable, args.filename]\n subprocess.call(cmd)\n else:\n print(\"numba: error: the following arguments are required: filename\")\n sys.exit(1)\n", "path": "numba/numba_entry.py"}]}
| 3,761 | 750 |
gh_patches_debug_28794
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-970
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rotating TextItem dosen't work
I want to dynamically rotate TextItem but cannot get it to work. Changing position or anchor with setPos and setAnchor updates the item, but wanting to change angel with setAngle doesn't update the text. The strangest thing is that it does update once I drag the canvas. I am wondering what signal or function to call so that the item updates immediately.
Below is some code that should move and rotate a TextItem.
```python
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
app = QtGui.QApplication([])
w = pg.GraphicsView()
w.show()
w.resize(800,800)
view = pg.ViewBox()
w.setCentralItem(view)
view.setAspectLocked(True)
view.setRange(QtCore.QRectF(0, 0, 200, 200))
anchor = pg.TextItem()
anchor.setText("hey")
anchor.setColor(QtGui.QColor(255, 255, 255))
view.addItem(anchor)
def rotate():
x, y = anchor.pos()
anchor.setPos(x + 1, y + 1)
anchor.setAngle(anchor.angle + 10)
timer = QtCore.QTimer()
timer.timeout.connect(rotate)
timer.start(1000)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/graphicsItems/TextItem.py`
Content:
```
1 import numpy as np
2 from ..Qt import QtCore, QtGui
3 from ..Point import Point
4 from .. import functions as fn
5 from .GraphicsObject import GraphicsObject
6
7
8 class TextItem(GraphicsObject):
9 """
10 GraphicsItem displaying unscaled text (the text will always appear normal even inside a scaled ViewBox).
11 """
12 def __init__(self, text='', color=(200,200,200), html=None, anchor=(0,0),
13 border=None, fill=None, angle=0, rotateAxis=None):
14 """
15 ============== =================================================================================
16 **Arguments:**
17 *text* The text to display
18 *color* The color of the text (any format accepted by pg.mkColor)
19 *html* If specified, this overrides both *text* and *color*
20 *anchor* A QPointF or (x,y) sequence indicating what region of the text box will
21 be anchored to the item's position. A value of (0,0) sets the upper-left corner
22 of the text box to be at the position specified by setPos(), while a value of (1,1)
23 sets the lower-right corner.
24 *border* A pen to use when drawing the border
25 *fill* A brush to use when filling within the border
26 *angle* Angle in degrees to rotate text. Default is 0; text will be displayed upright.
27 *rotateAxis* If None, then a text angle of 0 always points along the +x axis of the scene.
28 If a QPointF or (x,y) sequence is given, then it represents a vector direction
29 in the parent's coordinate system that the 0-degree line will be aligned to. This
30 Allows text to follow both the position and orientation of its parent while still
31 discarding any scale and shear factors.
32 ============== =================================================================================
33
34
35 The effects of the `rotateAxis` and `angle` arguments are added independently. So for example:
36
37 * rotateAxis=None, angle=0 -> normal horizontal text
38 * rotateAxis=None, angle=90 -> normal vertical text
39 * rotateAxis=(1, 0), angle=0 -> text aligned with x axis of its parent
40 * rotateAxis=(0, 1), angle=0 -> text aligned with y axis of its parent
41 * rotateAxis=(1, 0), angle=90 -> text orthogonal to x axis of its parent
42 """
43
44 self.anchor = Point(anchor)
45 self.rotateAxis = None if rotateAxis is None else Point(rotateAxis)
46 #self.angle = 0
47 GraphicsObject.__init__(self)
48 self.textItem = QtGui.QGraphicsTextItem()
49 self.textItem.setParentItem(self)
50 self._lastTransform = None
51 self._lastScene = None
52 self._bounds = QtCore.QRectF()
53 if html is None:
54 self.setColor(color)
55 self.setText(text)
56 else:
57 self.setHtml(html)
58 self.fill = fn.mkBrush(fill)
59 self.border = fn.mkPen(border)
60 self.setAngle(angle)
61
62 def setText(self, text, color=None):
63 """
64 Set the text of this item.
65
66 This method sets the plain text of the item; see also setHtml().
67 """
68 if color is not None:
69 self.setColor(color)
70 self.textItem.setPlainText(text)
71 self.updateTextPos()
72
73 def setPlainText(self, *args):
74 """
75 Set the plain text to be rendered by this item.
76
77 See QtGui.QGraphicsTextItem.setPlainText().
78 """
79 self.textItem.setPlainText(*args)
80 self.updateTextPos()
81
82 def setHtml(self, *args):
83 """
84 Set the HTML code to be rendered by this item.
85
86 See QtGui.QGraphicsTextItem.setHtml().
87 """
88 self.textItem.setHtml(*args)
89 self.updateTextPos()
90
91 def setTextWidth(self, *args):
92 """
93 Set the width of the text.
94
95 If the text requires more space than the width limit, then it will be
96 wrapped into multiple lines.
97
98 See QtGui.QGraphicsTextItem.setTextWidth().
99 """
100 self.textItem.setTextWidth(*args)
101 self.updateTextPos()
102
103 def setFont(self, *args):
104 """
105 Set the font for this text.
106
107 See QtGui.QGraphicsTextItem.setFont().
108 """
109 self.textItem.setFont(*args)
110 self.updateTextPos()
111
112 def setAngle(self, angle):
113 self.angle = angle
114 self.updateTransform()
115
116 def setAnchor(self, anchor):
117 self.anchor = Point(anchor)
118 self.updateTextPos()
119
120 def setColor(self, color):
121 """
122 Set the color for this text.
123
124 See QtGui.QGraphicsItem.setDefaultTextColor().
125 """
126 self.color = fn.mkColor(color)
127 self.textItem.setDefaultTextColor(self.color)
128
129 def updateTextPos(self):
130 # update text position to obey anchor
131 r = self.textItem.boundingRect()
132 tl = self.textItem.mapToParent(r.topLeft())
133 br = self.textItem.mapToParent(r.bottomRight())
134 offset = (br - tl) * self.anchor
135 self.textItem.setPos(-offset)
136
137 ### Needed to maintain font size when rendering to image with increased resolution
138 #self.textItem.resetTransform()
139 ##self.textItem.rotate(self.angle)
140 #if self._exportOpts is not False and 'resolutionScale' in self._exportOpts:
141 #s = self._exportOpts['resolutionScale']
142 #self.textItem.scale(s, s)
143
144 def boundingRect(self):
145 return self.textItem.mapToParent(self.textItem.boundingRect()).boundingRect()
146
147 def viewTransformChanged(self):
148 # called whenever view transform has changed.
149 # Do this here to avoid double-updates when view changes.
150 self.updateTransform()
151
152 def paint(self, p, *args):
153 # this is not ideal because it requires the transform to be updated at every draw.
154 # ideally, we would have a sceneTransformChanged event to react to..
155 s = self.scene()
156 ls = self._lastScene
157 if s is not ls:
158 if ls is not None:
159 ls.sigPrepareForPaint.disconnect(self.updateTransform)
160 self._lastScene = s
161 if s is not None:
162 s.sigPrepareForPaint.connect(self.updateTransform)
163 self.updateTransform()
164 p.setTransform(self.sceneTransform())
165
166 if self.border.style() != QtCore.Qt.NoPen or self.fill.style() != QtCore.Qt.NoBrush:
167 p.setPen(self.border)
168 p.setBrush(self.fill)
169 p.setRenderHint(p.Antialiasing, True)
170 p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))
171
172 def updateTransform(self):
173 # update transform such that this item has the correct orientation
174 # and scaling relative to the scene, but inherits its position from its
175 # parent.
176 # This is similar to setting ItemIgnoresTransformations = True, but
177 # does not break mouse interaction and collision detection.
178 p = self.parentItem()
179 if p is None:
180 pt = QtGui.QTransform()
181 else:
182 pt = p.sceneTransform()
183
184 if pt == self._lastTransform:
185 return
186
187 t = pt.inverted()[0]
188 # reset translation
189 t.setMatrix(t.m11(), t.m12(), t.m13(), t.m21(), t.m22(), t.m23(), 0, 0, t.m33())
190
191 # apply rotation
192 angle = -self.angle
193 if self.rotateAxis is not None:
194 d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))
195 a = np.arctan2(d.y(), d.x()) * 180 / np.pi
196 angle += a
197 t.rotate(angle)
198
199 self.setTransform(t)
200
201 self._lastTransform = pt
202
203 self.updateTextPos()
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/graphicsItems/TextItem.py b/pyqtgraph/graphicsItems/TextItem.py
--- a/pyqtgraph/graphicsItems/TextItem.py
+++ b/pyqtgraph/graphicsItems/TextItem.py
@@ -110,9 +110,16 @@
self.updateTextPos()
def setAngle(self, angle):
+ """
+ Set the angle of the text in degrees.
+
+ This sets the rotation angle of the text as a whole, measured
+ counter-clockwise from the x axis of the parent. Note that this rotation
+ angle does not depend on horizontal/vertical scaling of the parent.
+ """
self.angle = angle
- self.updateTransform()
-
+ self.updateTransform(force=True)
+
def setAnchor(self, anchor):
self.anchor = Point(anchor)
self.updateTextPos()
@@ -169,7 +176,7 @@
p.setRenderHint(p.Antialiasing, True)
p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))
- def updateTransform(self):
+ def updateTransform(self, force=False):
# update transform such that this item has the correct orientation
# and scaling relative to the scene, but inherits its position from its
# parent.
@@ -181,7 +188,7 @@
else:
pt = p.sceneTransform()
- if pt == self._lastTransform:
+ if not force and pt == self._lastTransform:
return
t = pt.inverted()[0]
|
{"golden_diff": "diff --git a/pyqtgraph/graphicsItems/TextItem.py b/pyqtgraph/graphicsItems/TextItem.py\n--- a/pyqtgraph/graphicsItems/TextItem.py\n+++ b/pyqtgraph/graphicsItems/TextItem.py\n@@ -110,9 +110,16 @@\n self.updateTextPos()\n \n def setAngle(self, angle):\n+ \"\"\"\n+ Set the angle of the text in degrees.\n+\n+ This sets the rotation angle of the text as a whole, measured\n+ counter-clockwise from the x axis of the parent. Note that this rotation\n+ angle does not depend on horizontal/vertical scaling of the parent.\n+ \"\"\"\n self.angle = angle\n- self.updateTransform()\n- \n+ self.updateTransform(force=True)\n+\n def setAnchor(self, anchor):\n self.anchor = Point(anchor)\n self.updateTextPos()\n@@ -169,7 +176,7 @@\n p.setRenderHint(p.Antialiasing, True)\n p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))\n \n- def updateTransform(self):\n+ def updateTransform(self, force=False):\n # update transform such that this item has the correct orientation\n # and scaling relative to the scene, but inherits its position from its\n # parent.\n@@ -181,7 +188,7 @@\n else:\n pt = p.sceneTransform()\n \n- if pt == self._lastTransform:\n+ if not force and pt == self._lastTransform:\n return\n \n t = pt.inverted()[0]\n", "issue": "Rotating TextItem dosen't work\nI want to dynamically rotate TextItem but cannot get it to work. Changing position or anchor with setPos and setAnchor updates the item, but wanting to change angel with setAngle doesn't update the text. The strangest thing is that it does update once I drag the canvas. I am wondering what signal or function to call so that the item updates immediately. \r\nBelow is some code that should move and rotate a TextItem.\r\n```python\r\nfrom pyqtgraph.Qt import QtCore, QtGui\r\nimport pyqtgraph as pg\r\n\r\napp = QtGui.QApplication([])\r\n\r\nw = pg.GraphicsView()\r\nw.show()\r\nw.resize(800,800)\r\n\r\nview = pg.ViewBox()\r\nw.setCentralItem(view)\r\nview.setAspectLocked(True)\r\nview.setRange(QtCore.QRectF(0, 0, 200, 200))\r\n\r\nanchor = pg.TextItem()\r\nanchor.setText(\"hey\")\r\nanchor.setColor(QtGui.QColor(255, 255, 255))\r\n\r\nview.addItem(anchor)\r\n\r\n\r\ndef rotate():\r\n x, y = anchor.pos()\r\n anchor.setPos(x + 1, y + 1)\r\n anchor.setAngle(anchor.angle + 10)\r\n\r\n\r\ntimer = QtCore.QTimer()\r\ntimer.timeout.connect(rotate)\r\ntimer.start(1000)\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n```\n", "before_files": [{"content": "import numpy as np\nfrom ..Qt import QtCore, QtGui\nfrom ..Point import Point\nfrom .. import functions as fn\nfrom .GraphicsObject import GraphicsObject\n\n\nclass TextItem(GraphicsObject):\n \"\"\"\n GraphicsItem displaying unscaled text (the text will always appear normal even inside a scaled ViewBox). \n \"\"\"\n def __init__(self, text='', color=(200,200,200), html=None, anchor=(0,0),\n border=None, fill=None, angle=0, rotateAxis=None):\n \"\"\"\n ============== =================================================================================\n **Arguments:**\n *text* The text to display\n *color* The color of the text (any format accepted by pg.mkColor)\n *html* If specified, this overrides both *text* and *color*\n *anchor* A QPointF or (x,y) sequence indicating what region of the text box will\n be anchored to the item's position. A value of (0,0) sets the upper-left corner\n of the text box to be at the position specified by setPos(), while a value of (1,1)\n sets the lower-right corner.\n *border* A pen to use when drawing the border\n *fill* A brush to use when filling within the border\n *angle* Angle in degrees to rotate text. Default is 0; text will be displayed upright.\n *rotateAxis* If None, then a text angle of 0 always points along the +x axis of the scene.\n If a QPointF or (x,y) sequence is given, then it represents a vector direction\n in the parent's coordinate system that the 0-degree line will be aligned to. This\n Allows text to follow both the position and orientation of its parent while still\n discarding any scale and shear factors.\n ============== =================================================================================\n\n\n The effects of the `rotateAxis` and `angle` arguments are added independently. So for example:\n\n * rotateAxis=None, angle=0 -> normal horizontal text\n * rotateAxis=None, angle=90 -> normal vertical text\n * rotateAxis=(1, 0), angle=0 -> text aligned with x axis of its parent\n * rotateAxis=(0, 1), angle=0 -> text aligned with y axis of its parent\n * rotateAxis=(1, 0), angle=90 -> text orthogonal to x axis of its parent \n \"\"\"\n \n self.anchor = Point(anchor)\n self.rotateAxis = None if rotateAxis is None else Point(rotateAxis)\n #self.angle = 0\n GraphicsObject.__init__(self)\n self.textItem = QtGui.QGraphicsTextItem()\n self.textItem.setParentItem(self)\n self._lastTransform = None\n self._lastScene = None\n self._bounds = QtCore.QRectF()\n if html is None:\n self.setColor(color)\n self.setText(text)\n else:\n self.setHtml(html)\n self.fill = fn.mkBrush(fill)\n self.border = fn.mkPen(border)\n self.setAngle(angle)\n\n def setText(self, text, color=None):\n \"\"\"\n Set the text of this item. \n \n This method sets the plain text of the item; see also setHtml().\n \"\"\"\n if color is not None:\n self.setColor(color)\n self.textItem.setPlainText(text)\n self.updateTextPos()\n \n def setPlainText(self, *args):\n \"\"\"\n Set the plain text to be rendered by this item. \n \n See QtGui.QGraphicsTextItem.setPlainText().\n \"\"\"\n self.textItem.setPlainText(*args)\n self.updateTextPos()\n \n def setHtml(self, *args):\n \"\"\"\n Set the HTML code to be rendered by this item. \n \n See QtGui.QGraphicsTextItem.setHtml().\n \"\"\"\n self.textItem.setHtml(*args)\n self.updateTextPos()\n \n def setTextWidth(self, *args):\n \"\"\"\n Set the width of the text.\n \n If the text requires more space than the width limit, then it will be\n wrapped into multiple lines.\n \n See QtGui.QGraphicsTextItem.setTextWidth().\n \"\"\"\n self.textItem.setTextWidth(*args)\n self.updateTextPos()\n \n def setFont(self, *args):\n \"\"\"\n Set the font for this text. \n \n See QtGui.QGraphicsTextItem.setFont().\n \"\"\"\n self.textItem.setFont(*args)\n self.updateTextPos()\n \n def setAngle(self, angle):\n self.angle = angle\n self.updateTransform()\n \n def setAnchor(self, anchor):\n self.anchor = Point(anchor)\n self.updateTextPos()\n\n def setColor(self, color):\n \"\"\"\n Set the color for this text.\n \n See QtGui.QGraphicsItem.setDefaultTextColor().\n \"\"\"\n self.color = fn.mkColor(color)\n self.textItem.setDefaultTextColor(self.color)\n \n def updateTextPos(self):\n # update text position to obey anchor\n r = self.textItem.boundingRect()\n tl = self.textItem.mapToParent(r.topLeft())\n br = self.textItem.mapToParent(r.bottomRight())\n offset = (br - tl) * self.anchor\n self.textItem.setPos(-offset)\n \n ### Needed to maintain font size when rendering to image with increased resolution\n #self.textItem.resetTransform()\n ##self.textItem.rotate(self.angle)\n #if self._exportOpts is not False and 'resolutionScale' in self._exportOpts:\n #s = self._exportOpts['resolutionScale']\n #self.textItem.scale(s, s)\n \n def boundingRect(self):\n return self.textItem.mapToParent(self.textItem.boundingRect()).boundingRect()\n\n def viewTransformChanged(self):\n # called whenever view transform has changed.\n # Do this here to avoid double-updates when view changes.\n self.updateTransform()\n \n def paint(self, p, *args):\n # this is not ideal because it requires the transform to be updated at every draw.\n # ideally, we would have a sceneTransformChanged event to react to..\n s = self.scene()\n ls = self._lastScene\n if s is not ls:\n if ls is not None:\n ls.sigPrepareForPaint.disconnect(self.updateTransform)\n self._lastScene = s\n if s is not None:\n s.sigPrepareForPaint.connect(self.updateTransform)\n self.updateTransform()\n p.setTransform(self.sceneTransform())\n \n if self.border.style() != QtCore.Qt.NoPen or self.fill.style() != QtCore.Qt.NoBrush:\n p.setPen(self.border)\n p.setBrush(self.fill)\n p.setRenderHint(p.Antialiasing, True)\n p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))\n \n def updateTransform(self):\n # update transform such that this item has the correct orientation\n # and scaling relative to the scene, but inherits its position from its\n # parent.\n # This is similar to setting ItemIgnoresTransformations = True, but \n # does not break mouse interaction and collision detection.\n p = self.parentItem()\n if p is None:\n pt = QtGui.QTransform()\n else:\n pt = p.sceneTransform()\n \n if pt == self._lastTransform:\n return\n\n t = pt.inverted()[0]\n # reset translation\n t.setMatrix(t.m11(), t.m12(), t.m13(), t.m21(), t.m22(), t.m23(), 0, 0, t.m33())\n \n # apply rotation\n angle = -self.angle\n if self.rotateAxis is not None:\n d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))\n a = np.arctan2(d.y(), d.x()) * 180 / np.pi\n angle += a\n t.rotate(angle)\n \n self.setTransform(t)\n \n self._lastTransform = pt\n \n self.updateTextPos()\n", "path": "pyqtgraph/graphicsItems/TextItem.py"}], "after_files": [{"content": "import numpy as np\nfrom ..Qt import QtCore, QtGui\nfrom ..Point import Point\nfrom .. import functions as fn\nfrom .GraphicsObject import GraphicsObject\n\n\nclass TextItem(GraphicsObject):\n \"\"\"\n GraphicsItem displaying unscaled text (the text will always appear normal even inside a scaled ViewBox). \n \"\"\"\n def __init__(self, text='', color=(200,200,200), html=None, anchor=(0,0),\n border=None, fill=None, angle=0, rotateAxis=None):\n \"\"\"\n ============== =================================================================================\n **Arguments:**\n *text* The text to display\n *color* The color of the text (any format accepted by pg.mkColor)\n *html* If specified, this overrides both *text* and *color*\n *anchor* A QPointF or (x,y) sequence indicating what region of the text box will\n be anchored to the item's position. A value of (0,0) sets the upper-left corner\n of the text box to be at the position specified by setPos(), while a value of (1,1)\n sets the lower-right corner.\n *border* A pen to use when drawing the border\n *fill* A brush to use when filling within the border\n *angle* Angle in degrees to rotate text. Default is 0; text will be displayed upright.\n *rotateAxis* If None, then a text angle of 0 always points along the +x axis of the scene.\n If a QPointF or (x,y) sequence is given, then it represents a vector direction\n in the parent's coordinate system that the 0-degree line will be aligned to. This\n Allows text to follow both the position and orientation of its parent while still\n discarding any scale and shear factors.\n ============== =================================================================================\n\n\n The effects of the `rotateAxis` and `angle` arguments are added independently. So for example:\n\n * rotateAxis=None, angle=0 -> normal horizontal text\n * rotateAxis=None, angle=90 -> normal vertical text\n * rotateAxis=(1, 0), angle=0 -> text aligned with x axis of its parent\n * rotateAxis=(0, 1), angle=0 -> text aligned with y axis of its parent\n * rotateAxis=(1, 0), angle=90 -> text orthogonal to x axis of its parent \n \"\"\"\n \n self.anchor = Point(anchor)\n self.rotateAxis = None if rotateAxis is None else Point(rotateAxis)\n #self.angle = 0\n GraphicsObject.__init__(self)\n self.textItem = QtGui.QGraphicsTextItem()\n self.textItem.setParentItem(self)\n self._lastTransform = None\n self._lastScene = None\n self._bounds = QtCore.QRectF()\n if html is None:\n self.setColor(color)\n self.setText(text)\n else:\n self.setHtml(html)\n self.fill = fn.mkBrush(fill)\n self.border = fn.mkPen(border)\n self.setAngle(angle)\n\n def setText(self, text, color=None):\n \"\"\"\n Set the text of this item. \n \n This method sets the plain text of the item; see also setHtml().\n \"\"\"\n if color is not None:\n self.setColor(color)\n self.textItem.setPlainText(text)\n self.updateTextPos()\n \n def setPlainText(self, *args):\n \"\"\"\n Set the plain text to be rendered by this item. \n \n See QtGui.QGraphicsTextItem.setPlainText().\n \"\"\"\n self.textItem.setPlainText(*args)\n self.updateTextPos()\n \n def setHtml(self, *args):\n \"\"\"\n Set the HTML code to be rendered by this item. \n \n See QtGui.QGraphicsTextItem.setHtml().\n \"\"\"\n self.textItem.setHtml(*args)\n self.updateTextPos()\n \n def setTextWidth(self, *args):\n \"\"\"\n Set the width of the text.\n \n If the text requires more space than the width limit, then it will be\n wrapped into multiple lines.\n \n See QtGui.QGraphicsTextItem.setTextWidth().\n \"\"\"\n self.textItem.setTextWidth(*args)\n self.updateTextPos()\n \n def setFont(self, *args):\n \"\"\"\n Set the font for this text. \n \n See QtGui.QGraphicsTextItem.setFont().\n \"\"\"\n self.textItem.setFont(*args)\n self.updateTextPos()\n \n def setAngle(self, angle):\n \"\"\"\n Set the angle of the text in degrees.\n\n This sets the rotation angle of the text as a whole, measured\n counter-clockwise from the x axis of the parent. Note that this rotation\n angle does not depend on horizontal/vertical scaling of the parent.\n \"\"\"\n self.angle = angle\n self.updateTransform(force=True)\n\n def setAnchor(self, anchor):\n self.anchor = Point(anchor)\n self.updateTextPos()\n\n def setColor(self, color):\n \"\"\"\n Set the color for this text.\n \n See QtGui.QGraphicsItem.setDefaultTextColor().\n \"\"\"\n self.color = fn.mkColor(color)\n self.textItem.setDefaultTextColor(self.color)\n \n def updateTextPos(self):\n # update text position to obey anchor\n r = self.textItem.boundingRect()\n tl = self.textItem.mapToParent(r.topLeft())\n br = self.textItem.mapToParent(r.bottomRight())\n offset = (br - tl) * self.anchor\n self.textItem.setPos(-offset)\n \n ### Needed to maintain font size when rendering to image with increased resolution\n #self.textItem.resetTransform()\n ##self.textItem.rotate(self.angle)\n #if self._exportOpts is not False and 'resolutionScale' in self._exportOpts:\n #s = self._exportOpts['resolutionScale']\n #self.textItem.scale(s, s)\n \n def boundingRect(self):\n return self.textItem.mapToParent(self.textItem.boundingRect()).boundingRect()\n\n def viewTransformChanged(self):\n # called whenever view transform has changed.\n # Do this here to avoid double-updates when view changes.\n self.updateTransform()\n \n def paint(self, p, *args):\n # this is not ideal because it requires the transform to be updated at every draw.\n # ideally, we would have a sceneTransformChanged event to react to..\n s = self.scene()\n ls = self._lastScene\n if s is not ls:\n if ls is not None:\n ls.sigPrepareForPaint.disconnect(self.updateTransform)\n self._lastScene = s\n if s is not None:\n s.sigPrepareForPaint.connect(self.updateTransform)\n self.updateTransform()\n p.setTransform(self.sceneTransform())\n \n if self.border.style() != QtCore.Qt.NoPen or self.fill.style() != QtCore.Qt.NoBrush:\n p.setPen(self.border)\n p.setBrush(self.fill)\n p.setRenderHint(p.Antialiasing, True)\n p.drawPolygon(self.textItem.mapToParent(self.textItem.boundingRect()))\n \n def updateTransform(self, force=False):\n # update transform such that this item has the correct orientation\n # and scaling relative to the scene, but inherits its position from its\n # parent.\n # This is similar to setting ItemIgnoresTransformations = True, but \n # does not break mouse interaction and collision detection.\n p = self.parentItem()\n if p is None:\n pt = QtGui.QTransform()\n else:\n pt = p.sceneTransform()\n \n if not force and pt == self._lastTransform:\n return\n\n t = pt.inverted()[0]\n # reset translation\n t.setMatrix(t.m11(), t.m12(), t.m13(), t.m21(), t.m22(), t.m23(), 0, 0, t.m33())\n \n # apply rotation\n angle = -self.angle\n if self.rotateAxis is not None:\n d = pt.map(self.rotateAxis) - pt.map(Point(0, 0))\n a = np.arctan2(d.y(), d.x()) * 180 / np.pi\n angle += a\n t.rotate(angle)\n \n self.setTransform(t)\n \n self._lastTransform = pt\n \n self.updateTextPos()\n", "path": "pyqtgraph/graphicsItems/TextItem.py"}]}
| 2,800 | 348 |
gh_patches_debug_16627
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-3551
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support .json and .ubj model format for XGBoost server image
/kind feature
**Description**
In the XGBoost image, the only supported model format is .bst: https://github.com/kserve/kserve/blob/56b8fe0d189fc0d557e9a8af07eab0c12852d5fd/python/xgbserver/xgbserver/model.py#L28
This format has been deprecated for a while and is not backwards compatible between xgboost framework versions. The recommended model format is .json or .ubj: https://xgboost.readthedocs.io/en/stable/tutorials/saving_model.html
Users that want to use the recommended model format for XGBoost models, are currently not able to do so.
**Proposed solution**
Support the recommended file formats, while also keeping support for the old .bst format.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/xgbserver/xgbserver/model.py`
Content:
```
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import os
17 from typing import Dict, Union
18
19 import xgboost as xgb
20 from kserve.errors import InferenceError, ModelMissingError
21 from kserve.protocol.infer_type import InferRequest, InferResponse
22 from kserve.utils.utils import get_predict_input, get_predict_response
23 from xgboost import XGBModel
24
25 from kserve import Model
26 from kserve.storage import Storage
27
28 BOOSTER_FILE_EXTENSION = ".bst"
29
30
31 class XGBoostModel(Model):
32 def __init__(
33 self, name: str, model_dir: str, nthread: int, booster: XGBModel = None
34 ):
35 super().__init__(name)
36 self.name = name
37 self.model_dir = model_dir
38 self.nthread = nthread
39 if booster is not None:
40 self._booster = booster
41 self.ready = True
42
43 def load(self) -> bool:
44 model_path = Storage.download(self.model_dir)
45 model_files = []
46 for file in os.listdir(model_path):
47 file_path = os.path.join(model_path, file)
48 if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSION):
49 model_files.append(file_path)
50 if len(model_files) == 0:
51 raise ModelMissingError(model_path)
52 elif len(model_files) > 1:
53 raise RuntimeError(
54 "More than one model file is detected, "
55 f"Only one is allowed within model_dir: {model_files}"
56 )
57
58 self._booster = xgb.Booster(
59 params={"nthread": self.nthread}, model_file=model_files[0]
60 )
61 self.ready = True
62 return self.ready
63
64 def predict(
65 self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None
66 ) -> Union[Dict, InferResponse]:
67 try:
68 # Use of list as input is deprecated see https://github.com/dmlc/xgboost/pull/3970
69 instances = get_predict_input(payload)
70 dmatrix = xgb.DMatrix(instances, nthread=self.nthread)
71 result = self._booster.predict(dmatrix)
72 return get_predict_response(payload, result, self.name)
73 except Exception as e:
74 raise InferenceError(str(e))
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/xgbserver/xgbserver/model.py b/python/xgbserver/xgbserver/model.py
--- a/python/xgbserver/xgbserver/model.py
+++ b/python/xgbserver/xgbserver/model.py
@@ -25,7 +25,7 @@
from kserve import Model
from kserve.storage import Storage
-BOOSTER_FILE_EXTENSION = ".bst"
+BOOSTER_FILE_EXTENSIONS = (".bst", ".json", ".ubj")
class XGBoostModel(Model):
@@ -45,7 +45,7 @@
model_files = []
for file in os.listdir(model_path):
file_path = os.path.join(model_path, file)
- if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSION):
+ if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSIONS):
model_files.append(file_path)
if len(model_files) == 0:
raise ModelMissingError(model_path)
|
{"golden_diff": "diff --git a/python/xgbserver/xgbserver/model.py b/python/xgbserver/xgbserver/model.py\n--- a/python/xgbserver/xgbserver/model.py\n+++ b/python/xgbserver/xgbserver/model.py\n@@ -25,7 +25,7 @@\n from kserve import Model\n from kserve.storage import Storage\n \n-BOOSTER_FILE_EXTENSION = \".bst\"\n+BOOSTER_FILE_EXTENSIONS = (\".bst\", \".json\", \".ubj\")\n \n \n class XGBoostModel(Model):\n@@ -45,7 +45,7 @@\n model_files = []\n for file in os.listdir(model_path):\n file_path = os.path.join(model_path, file)\n- if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSION):\n+ if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSIONS):\n model_files.append(file_path)\n if len(model_files) == 0:\n raise ModelMissingError(model_path)\n", "issue": "Support .json and .ubj model format for XGBoost server image\n/kind feature\r\n\r\n\r\n**Description**\r\nIn the XGBoost image, the only supported model format is .bst: https://github.com/kserve/kserve/blob/56b8fe0d189fc0d557e9a8af07eab0c12852d5fd/python/xgbserver/xgbserver/model.py#L28\r\n\r\nThis format has been deprecated for a while and is not backwards compatible between xgboost framework versions. The recommended model format is .json or .ubj: https://xgboost.readthedocs.io/en/stable/tutorials/saving_model.html\r\n\r\nUsers that want to use the recommended model format for XGBoost models, are currently not able to do so.\r\n\r\n\r\n**Proposed solution**\r\nSupport the recommended file formats, while also keeping support for the old .bst format. \r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom typing import Dict, Union\n\nimport xgboost as xgb\nfrom kserve.errors import InferenceError, ModelMissingError\nfrom kserve.protocol.infer_type import InferRequest, InferResponse\nfrom kserve.utils.utils import get_predict_input, get_predict_response\nfrom xgboost import XGBModel\n\nfrom kserve import Model\nfrom kserve.storage import Storage\n\nBOOSTER_FILE_EXTENSION = \".bst\"\n\n\nclass XGBoostModel(Model):\n def __init__(\n self, name: str, model_dir: str, nthread: int, booster: XGBModel = None\n ):\n super().__init__(name)\n self.name = name\n self.model_dir = model_dir\n self.nthread = nthread\n if booster is not None:\n self._booster = booster\n self.ready = True\n\n def load(self) -> bool:\n model_path = Storage.download(self.model_dir)\n model_files = []\n for file in os.listdir(model_path):\n file_path = os.path.join(model_path, file)\n if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSION):\n model_files.append(file_path)\n if len(model_files) == 0:\n raise ModelMissingError(model_path)\n elif len(model_files) > 1:\n raise RuntimeError(\n \"More than one model file is detected, \"\n f\"Only one is allowed within model_dir: {model_files}\"\n )\n\n self._booster = xgb.Booster(\n params={\"nthread\": self.nthread}, model_file=model_files[0]\n )\n self.ready = True\n return self.ready\n\n def predict(\n self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None\n ) -> Union[Dict, InferResponse]:\n try:\n # Use of list as input is deprecated see https://github.com/dmlc/xgboost/pull/3970\n instances = get_predict_input(payload)\n dmatrix = xgb.DMatrix(instances, nthread=self.nthread)\n result = self._booster.predict(dmatrix)\n return get_predict_response(payload, result, self.name)\n except Exception as e:\n raise InferenceError(str(e))\n", "path": "python/xgbserver/xgbserver/model.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom typing import Dict, Union\n\nimport xgboost as xgb\nfrom kserve.errors import InferenceError, ModelMissingError\nfrom kserve.protocol.infer_type import InferRequest, InferResponse\nfrom kserve.utils.utils import get_predict_input, get_predict_response\nfrom xgboost import XGBModel\n\nfrom kserve import Model\nfrom kserve.storage import Storage\n\nBOOSTER_FILE_EXTENSIONS = (\".bst\", \".json\", \".ubj\")\n\n\nclass XGBoostModel(Model):\n def __init__(\n self, name: str, model_dir: str, nthread: int, booster: XGBModel = None\n ):\n super().__init__(name)\n self.name = name\n self.model_dir = model_dir\n self.nthread = nthread\n if booster is not None:\n self._booster = booster\n self.ready = True\n\n def load(self) -> bool:\n model_path = Storage.download(self.model_dir)\n model_files = []\n for file in os.listdir(model_path):\n file_path = os.path.join(model_path, file)\n if os.path.isfile(file_path) and file.endswith(BOOSTER_FILE_EXTENSIONS):\n model_files.append(file_path)\n if len(model_files) == 0:\n raise ModelMissingError(model_path)\n elif len(model_files) > 1:\n raise RuntimeError(\n \"More than one model file is detected, \"\n f\"Only one is allowed within model_dir: {model_files}\"\n )\n\n self._booster = xgb.Booster(\n params={\"nthread\": self.nthread}, model_file=model_files[0]\n )\n self.ready = True\n return self.ready\n\n def predict(\n self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None\n ) -> Union[Dict, InferResponse]:\n try:\n # Use of list as input is deprecated see https://github.com/dmlc/xgboost/pull/3970\n instances = get_predict_input(payload)\n dmatrix = xgb.DMatrix(instances, nthread=self.nthread)\n result = self._booster.predict(dmatrix)\n return get_predict_response(payload, result, self.name)\n except Exception as e:\n raise InferenceError(str(e))\n", "path": "python/xgbserver/xgbserver/model.py"}]}
| 1,222 | 206 |
gh_patches_debug_47979
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-10664
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/power_using_recursion.py`
Content:
```
1 """
2 == Raise base to the power of exponent using recursion ==
3 Input -->
4 Enter the base: 3
5 Enter the exponent: 4
6 Output -->
7 3 to the power of 4 is 81
8 Input -->
9 Enter the base: 2
10 Enter the exponent: 0
11 Output -->
12 2 to the power of 0 is 1
13 """
14
15
16 def power(base: int, exponent: int) -> float:
17 """
18 >>> power(3, 4)
19 81
20 >>> power(2, 0)
21 1
22 >>> all(power(base, exponent) == pow(base, exponent)
23 ... for base in range(-10, 10) for exponent in range(10))
24 True
25 >>> power('a', 1)
26 'a'
27 >>> power('a', 2)
28 Traceback (most recent call last):
29 ...
30 TypeError: can't multiply sequence by non-int of type 'str'
31 >>> power('a', 'b')
32 Traceback (most recent call last):
33 ...
34 TypeError: unsupported operand type(s) for -: 'str' and 'int'
35 >>> power(2, -1)
36 Traceback (most recent call last):
37 ...
38 RecursionError: maximum recursion depth exceeded
39 """
40 return base * power(base, (exponent - 1)) if exponent else 1
41
42
43 if __name__ == "__main__":
44 from doctests import testmod
45
46 testmod()
47 print("Raise base to the power of exponent using recursion...")
48 base = int(input("Enter the base: ").strip())
49 exponent = int(input("Enter the exponent: ").strip())
50 result = power(base, abs(exponent))
51 if exponent < 0: # power() does not properly deal w/ negative exponents
52 result = 1 / result
53 print(f"{base} to the power of {exponent} is {result}")
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py
--- a/maths/power_using_recursion.py
+++ b/maths/power_using_recursion.py
@@ -15,6 +15,8 @@
def power(base: int, exponent: int) -> float:
"""
+ Calculate the power of a base raised to an exponent.
+
>>> power(3, 4)
81
>>> power(2, 0)
|
{"golden_diff": "diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py\n--- a/maths/power_using_recursion.py\n+++ b/maths/power_using_recursion.py\n@@ -15,6 +15,8 @@\n \n def power(base: int, exponent: int) -> float:\n \"\"\"\n+ Calculate the power of a base raised to an exponent.\n+\n >>> power(3, 4)\n 81\n >>> power(2, 0)\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\n== Raise base to the power of exponent using recursion ==\n Input -->\n Enter the base: 3\n Enter the exponent: 4\n Output -->\n 3 to the power of 4 is 81\n Input -->\n Enter the base: 2\n Enter the exponent: 0\n Output -->\n 2 to the power of 0 is 1\n\"\"\"\n\n\ndef power(base: int, exponent: int) -> float:\n \"\"\"\n >>> power(3, 4)\n 81\n >>> power(2, 0)\n 1\n >>> all(power(base, exponent) == pow(base, exponent)\n ... for base in range(-10, 10) for exponent in range(10))\n True\n >>> power('a', 1)\n 'a'\n >>> power('a', 2)\n Traceback (most recent call last):\n ...\n TypeError: can't multiply sequence by non-int of type 'str'\n >>> power('a', 'b')\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand type(s) for -: 'str' and 'int'\n >>> power(2, -1)\n Traceback (most recent call last):\n ...\n RecursionError: maximum recursion depth exceeded\n \"\"\"\n return base * power(base, (exponent - 1)) if exponent else 1\n\n\nif __name__ == \"__main__\":\n from doctests import testmod\n\n testmod()\n print(\"Raise base to the power of exponent using recursion...\")\n base = int(input(\"Enter the base: \").strip())\n exponent = int(input(\"Enter the exponent: \").strip())\n result = power(base, abs(exponent))\n if exponent < 0: # power() does not properly deal w/ negative exponents\n result = 1 / result\n print(f\"{base} to the power of {exponent} is {result}\")\n", "path": "maths/power_using_recursion.py"}], "after_files": [{"content": "\"\"\"\n== Raise base to the power of exponent using recursion ==\n Input -->\n Enter the base: 3\n Enter the exponent: 4\n Output -->\n 3 to the power of 4 is 81\n Input -->\n Enter the base: 2\n Enter the exponent: 0\n Output -->\n 2 to the power of 0 is 1\n\"\"\"\n\n\ndef power(base: int, exponent: int) -> float:\n \"\"\"\n Calculate the power of a base raised to an exponent.\n\n >>> power(3, 4)\n 81\n >>> power(2, 0)\n 1\n >>> all(power(base, exponent) == pow(base, exponent)\n ... for base in range(-10, 10) for exponent in range(10))\n True\n >>> power('a', 1)\n 'a'\n >>> power('a', 2)\n Traceback (most recent call last):\n ...\n TypeError: can't multiply sequence by non-int of type 'str'\n >>> power('a', 'b')\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand type(s) for -: 'str' and 'int'\n >>> power(2, -1)\n Traceback (most recent call last):\n ...\n RecursionError: maximum recursion depth exceeded\n \"\"\"\n return base * power(base, (exponent - 1)) if exponent else 1\n\n\nif __name__ == \"__main__\":\n from doctests import testmod\n\n testmod()\n print(\"Raise base to the power of exponent using recursion...\")\n base = int(input(\"Enter the base: \").strip())\n exponent = int(input(\"Enter the exponent: \").strip())\n result = power(base, abs(exponent))\n if exponent < 0: # power() does not properly deal w/ negative exponents\n result = 1 / result\n print(f\"{base} to the power of {exponent} is {result}\")\n", "path": "maths/power_using_recursion.py"}]}
| 1,627 | 105 |
gh_patches_debug_40632
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-3296
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add fill parameter to utils.draw_bounding_boxes.
## 🚀 Feature
Fill parameter allows creating a semi-transparent box. This is particularly useful for Mask RCNN Model.
This would complete utils for Object detection and Instance Segmentation (least with rectangular boxes)
## Motivation
In Instance segmentation models, we also care about masks, not just the bounding box. Fill parameter allows us to fill in a semi-transparent way. Also, this parameter is optional hence it does not affect performance.
## Pitch
Add a param fill as follows
```
fill: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
```
Here is complete running code with a few edits
```
@torch.no_grad()
def draw_bounding_boxes(
image: torch.Tensor,
boxes: torch.Tensor,
labels: Optional[List[str]] = None,
colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
fill: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
width: int = 1,
font: Optional[str] = None,
font_size: int = 10
) -> torch.Tensor:
"""
Draws bounding boxes on given image.
The values of the input image should be uint8 between 0 and 255.
Args:
image (Tensor): Tensor of shape (C x H x W)
bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
`0 <= ymin < ymax < H`.
labels (List[str]): List containing the labels of bounding boxes.
colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
be represented as `str` or `Tuple[int, int, int]`.
fill: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
width (int): Width of bounding box.
font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
`/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
font_size (int): The requested font size in points.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Tensor expected, got {type(image)}")
elif image.dtype != torch.uint8:
raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
elif image.dim() != 3:
raise ValueError("Pass individual images, not batches")
ndarr = image.permute(1, 2, 0).numpy()
img_to_draw = Image.fromarray(ndarr)
img_boxes = boxes.to(torch.int64).tolist()
draw = ImageDraw.Draw(img_to_draw, "RGBA")
txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)
for i, bbox in enumerate(img_boxes):
color = None if colors is None else colors[i]
draw.rectangle(bbox, width=width, outline=color, fill=fill)
if labels is not None:
draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)
```
This makes mask RCNN output more clear, and people can play with fill parameter such as confidence based fill, fill with colors different per class, etc.
## Additional context
I can send PR for this :sweat_smile: I'm attaching outputs of above code.

(Sorry PyTorch logo :pray: )

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/utils.py`
Content:
```
1 from typing import Union, Optional, List, Tuple, Text, BinaryIO
2 import pathlib
3 import torch
4 import math
5 import warnings
6 import numpy as np
7 from PIL import Image, ImageDraw
8 from PIL import ImageFont
9
10 __all__ = ["make_grid", "save_image", "draw_bounding_boxes"]
11
12
13 @torch.no_grad()
14 def make_grid(
15 tensor: Union[torch.Tensor, List[torch.Tensor]],
16 nrow: int = 8,
17 padding: int = 2,
18 normalize: bool = False,
19 value_range: Optional[Tuple[int, int]] = None,
20 scale_each: bool = False,
21 pad_value: int = 0,
22 **kwargs
23 ) -> torch.Tensor:
24 """Make a grid of images.
25
26 Args:
27 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
28 or a list of images all of the same size.
29 nrow (int, optional): Number of images displayed in each row of the grid.
30 The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
31 padding (int, optional): amount of padding. Default: ``2``.
32 normalize (bool, optional): If True, shift the image to the range (0, 1),
33 by the min and max values specified by :attr:`range`. Default: ``False``.
34 value_range (tuple, optional): tuple (min, max) where min and max are numbers,
35 then these numbers are used to normalize the image. By default, min and max
36 are computed from the tensor.
37 scale_each (bool, optional): If ``True``, scale each image in the batch of
38 images separately rather than the (min, max) over all images. Default: ``False``.
39 pad_value (float, optional): Value for the padded pixels. Default: ``0``.
40
41 Example:
42 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
43
44 """
45 if not (torch.is_tensor(tensor) or
46 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
47 raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
48
49 if "range" in kwargs.keys():
50 warning = "range will be deprecated, please use value_range instead."
51 warnings.warn(warning)
52 value_range = kwargs["range"]
53
54 # if list of tensors, convert to a 4D mini-batch Tensor
55 if isinstance(tensor, list):
56 tensor = torch.stack(tensor, dim=0)
57
58 if tensor.dim() == 2: # single image H x W
59 tensor = tensor.unsqueeze(0)
60 if tensor.dim() == 3: # single image
61 if tensor.size(0) == 1: # if single-channel, convert to 3-channel
62 tensor = torch.cat((tensor, tensor, tensor), 0)
63 tensor = tensor.unsqueeze(0)
64
65 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
66 tensor = torch.cat((tensor, tensor, tensor), 1)
67
68 if normalize is True:
69 tensor = tensor.clone() # avoid modifying tensor in-place
70 if value_range is not None:
71 assert isinstance(value_range, tuple), \
72 "value_range has to be a tuple (min, max) if specified. min and max are numbers"
73
74 def norm_ip(img, low, high):
75 img.clamp_(min=low, max=high)
76 img.sub_(low).div_(max(high - low, 1e-5))
77
78 def norm_range(t, value_range):
79 if value_range is not None:
80 norm_ip(t, value_range[0], value_range[1])
81 else:
82 norm_ip(t, float(t.min()), float(t.max()))
83
84 if scale_each is True:
85 for t in tensor: # loop over mini-batch dimension
86 norm_range(t, value_range)
87 else:
88 norm_range(tensor, value_range)
89
90 if tensor.size(0) == 1:
91 return tensor.squeeze(0)
92
93 # make the mini-batch of images into a grid
94 nmaps = tensor.size(0)
95 xmaps = min(nrow, nmaps)
96 ymaps = int(math.ceil(float(nmaps) / xmaps))
97 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
98 num_channels = tensor.size(1)
99 grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
100 k = 0
101 for y in range(ymaps):
102 for x in range(xmaps):
103 if k >= nmaps:
104 break
105 # Tensor.copy_() is a valid method but seems to be missing from the stubs
106 # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_
107 grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]
108 2, x * width + padding, width - padding
109 ).copy_(tensor[k])
110 k = k + 1
111 return grid
112
113
114 @torch.no_grad()
115 def save_image(
116 tensor: Union[torch.Tensor, List[torch.Tensor]],
117 fp: Union[Text, pathlib.Path, BinaryIO],
118 format: Optional[str] = None,
119 **kwargs
120 ) -> None:
121 """Save a given Tensor into an image file.
122
123 Args:
124 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
125 saves the tensor as a grid of images by calling ``make_grid``.
126 fp (string or file object): A filename or a file object
127 format(Optional): If omitted, the format to use is determined from the filename extension.
128 If a file object was used instead of a filename, this parameter should always be used.
129 **kwargs: Other arguments are documented in ``make_grid``.
130 """
131
132 grid = make_grid(tensor, **kwargs)
133 # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
134 ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
135 im = Image.fromarray(ndarr)
136 im.save(fp, format=format)
137
138
139 @torch.no_grad()
140 def draw_bounding_boxes(
141 image: torch.Tensor,
142 boxes: torch.Tensor,
143 labels: Optional[List[str]] = None,
144 colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
145 width: int = 1,
146 font: Optional[str] = None,
147 font_size: int = 10
148 ) -> torch.Tensor:
149
150 """
151 Draws bounding boxes on given image.
152 The values of the input image should be uint8 between 0 and 255.
153
154 Args:
155 image (Tensor): Tensor of shape (C x H x W)
156 boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
157 the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
158 `0 <= ymin < ymax < H`.
159 labels (List[str]): List containing the labels of bounding boxes.
160 colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
161 be represented as `str` or `Tuple[int, int, int]`.
162 width (int): Width of bounding box.
163 font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
164 also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
165 `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
166 font_size (int): The requested font size in points.
167 """
168
169 if not isinstance(image, torch.Tensor):
170 raise TypeError(f"Tensor expected, got {type(image)}")
171 elif image.dtype != torch.uint8:
172 raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
173 elif image.dim() != 3:
174 raise ValueError("Pass individual images, not batches")
175
176 ndarr = image.permute(1, 2, 0).numpy()
177 img_to_draw = Image.fromarray(ndarr)
178
179 img_boxes = boxes.to(torch.int64).tolist()
180
181 draw = ImageDraw.Draw(img_to_draw)
182 txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)
183
184 for i, bbox in enumerate(img_boxes):
185 color = None if colors is None else colors[i]
186 draw.rectangle(bbox, width=width, outline=color)
187
188 if labels is not None:
189 draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
190
191 return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/utils.py b/torchvision/utils.py
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -4,8 +4,7 @@
import math
import warnings
import numpy as np
-from PIL import Image, ImageDraw
-from PIL import ImageFont
+from PIL import Image, ImageDraw, ImageFont, ImageColor
__all__ = ["make_grid", "save_image", "draw_bounding_boxes"]
@@ -142,6 +141,7 @@
boxes: torch.Tensor,
labels: Optional[List[str]] = None,
colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
+ fill: Optional[bool] = False,
width: int = 1,
font: Optional[str] = None,
font_size: int = 10
@@ -150,6 +150,7 @@
"""
Draws bounding boxes on given image.
The values of the input image should be uint8 between 0 and 255.
+ If filled, Resulting Tensor should be saved as PNG image.
Args:
image (Tensor): Tensor of shape (C x H x W)
@@ -159,6 +160,7 @@
labels (List[str]): List containing the labels of bounding boxes.
colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
be represented as `str` or `Tuple[int, int, int]`.
+ fill (bool): If `True` fills the bounding box with specified color.
width (int): Width of bounding box.
font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
@@ -178,12 +180,31 @@
img_boxes = boxes.to(torch.int64).tolist()
- draw = ImageDraw.Draw(img_to_draw)
+ if fill:
+ draw = ImageDraw.Draw(img_to_draw, "RGBA")
+
+ else:
+ draw = ImageDraw.Draw(img_to_draw)
+
txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)
for i, bbox in enumerate(img_boxes):
- color = None if colors is None else colors[i]
- draw.rectangle(bbox, width=width, outline=color)
+ if colors is None:
+ color = None
+ else:
+ color = colors[i]
+
+ if fill:
+ if color is None:
+ fill_color = (255, 255, 255, 100)
+ elif isinstance(color, str):
+ # This will automatically raise Error if rgb cannot be parsed.
+ fill_color = ImageColor.getrgb(color) + (100,)
+ elif isinstance(color, tuple):
+ fill_color = color + (100,)
+ draw.rectangle(bbox, width=width, outline=color, fill=fill_color)
+ else:
+ draw.rectangle(bbox, width=width, outline=color)
if labels is not None:
draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
|
{"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -4,8 +4,7 @@\n import math\n import warnings\n import numpy as np\n-from PIL import Image, ImageDraw\n-from PIL import ImageFont\n+from PIL import Image, ImageDraw, ImageFont, ImageColor\n \n __all__ = [\"make_grid\", \"save_image\", \"draw_bounding_boxes\"]\n \n@@ -142,6 +141,7 @@\n boxes: torch.Tensor,\n labels: Optional[List[str]] = None,\n colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\n+ fill: Optional[bool] = False,\n width: int = 1,\n font: Optional[str] = None,\n font_size: int = 10\n@@ -150,6 +150,7 @@\n \"\"\"\n Draws bounding boxes on given image.\n The values of the input image should be uint8 between 0 and 255.\n+ If filled, Resulting Tensor should be saved as PNG image.\n \n Args:\n image (Tensor): Tensor of shape (C x H x W)\n@@ -159,6 +160,7 @@\n labels (List[str]): List containing the labels of bounding boxes.\n colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can\n be represented as `str` or `Tuple[int, int, int]`.\n+ fill (bool): If `True` fills the bounding box with specified color.\n width (int): Width of bounding box.\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n@@ -178,12 +180,31 @@\n \n img_boxes = boxes.to(torch.int64).tolist()\n \n- draw = ImageDraw.Draw(img_to_draw)\n+ if fill:\n+ draw = ImageDraw.Draw(img_to_draw, \"RGBA\")\n+\n+ else:\n+ draw = ImageDraw.Draw(img_to_draw)\n+\n txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\n \n for i, bbox in enumerate(img_boxes):\n- color = None if colors is None else colors[i]\n- draw.rectangle(bbox, width=width, outline=color)\n+ if colors is None:\n+ color = None\n+ else:\n+ color = colors[i]\n+\n+ if fill:\n+ if color is None:\n+ fill_color = (255, 255, 255, 100)\n+ elif isinstance(color, str):\n+ # This will automatically raise Error if rgb cannot be parsed.\n+ fill_color = ImageColor.getrgb(color) + (100,)\n+ elif isinstance(color, tuple):\n+ fill_color = color + (100,)\n+ draw.rectangle(bbox, width=width, outline=color, fill=fill_color)\n+ else:\n+ draw.rectangle(bbox, width=width, outline=color)\n \n if labels is not None:\n draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)\n", "issue": "Add fill parameter to utils.draw_bounding_boxes.\n## \ud83d\ude80 Feature\r\n\r\nFill parameter allows creating a semi-transparent box. This is particularly useful for Mask RCNN Model.\r\nThis would complete utils for Object detection and Instance Segmentation (least with rectangular boxes)\r\n\r\n## Motivation\r\n\r\nIn Instance segmentation models, we also care about masks, not just the bounding box. Fill parameter allows us to fill in a semi-transparent way. Also, this parameter is optional hence it does not affect performance.\r\n\r\n## Pitch\r\n\r\nAdd a param fill as follows\r\n```\r\nfill: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\r\n\r\n```\r\n\r\nHere is complete running code with a few edits\r\n\r\n```\r\[email protected]_grad()\r\ndef draw_bounding_boxes(\r\n image: torch.Tensor,\r\n boxes: torch.Tensor,\r\n labels: Optional[List[str]] = None,\r\n colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\r\n fill: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\r\n width: int = 1,\r\n font: Optional[str] = None,\r\n font_size: int = 10\r\n) -> torch.Tensor:\r\n\r\n \"\"\"\r\n Draws bounding boxes on given image.\r\n The values of the input image should be uint8 between 0 and 255.\r\n Args:\r\n image (Tensor): Tensor of shape (C x H x W)\r\n bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\r\n the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\r\n `0 <= ymin < ymax < H`.\r\n labels (List[str]): List containing the labels of bounding boxes.\r\n colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can\r\n be represented as `str` or `Tuple[int, int, int]`.\r\n fill: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\r\n width (int): Width of bounding box.\r\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\r\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\r\n `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\r\n font_size (int): The requested font size in points.\r\n \"\"\"\r\n\r\n if not isinstance(image, torch.Tensor):\r\n raise TypeError(f\"Tensor expected, got {type(image)}\")\r\n elif image.dtype != torch.uint8:\r\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\r\n elif image.dim() != 3:\r\n raise ValueError(\"Pass individual images, not batches\")\r\n\r\n ndarr = image.permute(1, 2, 0).numpy()\r\n img_to_draw = Image.fromarray(ndarr)\r\n\r\n img_boxes = boxes.to(torch.int64).tolist()\r\n\r\n draw = ImageDraw.Draw(img_to_draw, \"RGBA\")\r\n\r\n txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\r\n\r\n for i, bbox in enumerate(img_boxes):\r\n color = None if colors is None else colors[i]\r\n draw.rectangle(bbox, width=width, outline=color, fill=fill)\r\n\r\n if labels is not None:\r\n draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)\r\n\r\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)\r\n```\r\n\r\nThis makes mask RCNN output more clear, and people can play with fill parameter such as confidence based fill, fill with colors different per class, etc.\r\n\r\n## Additional context\r\n\r\nI can send PR for this :sweat_smile: I'm attaching outputs of above code.\r\n\r\n\r\n\r\n(Sorry PyTorch logo :pray: )\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Union, Optional, List, Tuple, Text, BinaryIO\nimport pathlib\nimport torch\nimport math\nimport warnings\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom PIL import ImageFont\n\n__all__ = [\"make_grid\", \"save_image\", \"draw_bounding_boxes\"]\n\n\[email protected]_grad()\ndef make_grid(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n value_range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n **kwargs\n) -> torch.Tensor:\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by :attr:`range`. Default: ``False``.\n value_range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')\n\n if \"range\" in kwargs.keys():\n warning = \"range will be deprecated, please use value_range instead.\"\n warnings.warn(warning)\n value_range = kwargs[\"range\"]\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if value_range is not None:\n assert isinstance(value_range, tuple), \\\n \"value_range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, low, high):\n img.clamp_(min=low, max=high)\n img.sub_(low).div_(max(high - low, 1e-5))\n\n def norm_range(t, value_range):\n if value_range is not None:\n norm_ip(t, value_range[0], value_range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, value_range)\n else:\n norm_range(tensor, value_range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n num_channels = tensor.size(1)\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n # Tensor.copy_() is a valid method but seems to be missing from the stubs\n # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_\n grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]\n 2, x * width + padding, width - padding\n ).copy_(tensor[k])\n k = k + 1\n return grid\n\n\[email protected]_grad()\ndef save_image(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n fp: Union[Text, pathlib.Path, BinaryIO],\n format: Optional[str] = None,\n **kwargs\n) -> None:\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp (string or file object): A filename or a file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n\n grid = make_grid(tensor, **kwargs)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n\n\[email protected]_grad()\ndef draw_bounding_boxes(\n image: torch.Tensor,\n boxes: torch.Tensor,\n labels: Optional[List[str]] = None,\n colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\n width: int = 1,\n font: Optional[str] = None,\n font_size: int = 10\n) -> torch.Tensor:\n\n \"\"\"\n Draws bounding boxes on given image.\n The values of the input image should be uint8 between 0 and 255.\n\n Args:\n image (Tensor): Tensor of shape (C x H x W)\n boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\n the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\n `0 <= ymin < ymax < H`.\n labels (List[str]): List containing the labels of bounding boxes.\n colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can\n be represented as `str` or `Tuple[int, int, int]`.\n width (int): Width of bounding box.\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\n font_size (int): The requested font size in points.\n \"\"\"\n\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Tensor expected, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n\n ndarr = image.permute(1, 2, 0).numpy()\n img_to_draw = Image.fromarray(ndarr)\n\n img_boxes = boxes.to(torch.int64).tolist()\n\n draw = ImageDraw.Draw(img_to_draw)\n txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\n\n for i, bbox in enumerate(img_boxes):\n color = None if colors is None else colors[i]\n draw.rectangle(bbox, width=width, outline=color)\n\n if labels is not None:\n draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)\n", "path": "torchvision/utils.py"}], "after_files": [{"content": "from typing import Union, Optional, List, Tuple, Text, BinaryIO\nimport pathlib\nimport torch\nimport math\nimport warnings\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont, ImageColor\n\n__all__ = [\"make_grid\", \"save_image\", \"draw_bounding_boxes\"]\n\n\[email protected]_grad()\ndef make_grid(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n nrow: int = 8,\n padding: int = 2,\n normalize: bool = False,\n value_range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n **kwargs\n) -> torch.Tensor:\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by :attr:`range`. Default: ``False``.\n value_range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')\n\n if \"range\" in kwargs.keys():\n warning = \"range will be deprecated, please use value_range instead.\"\n warnings.warn(warning)\n value_range = kwargs[\"range\"]\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if value_range is not None:\n assert isinstance(value_range, tuple), \\\n \"value_range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, low, high):\n img.clamp_(min=low, max=high)\n img.sub_(low).div_(max(high - low, 1e-5))\n\n def norm_range(t, value_range):\n if value_range is not None:\n norm_ip(t, value_range[0], value_range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, value_range)\n else:\n norm_range(tensor, value_range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n num_channels = tensor.size(1)\n grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n # Tensor.copy_() is a valid method but seems to be missing from the stubs\n # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_\n grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined]\n 2, x * width + padding, width - padding\n ).copy_(tensor[k])\n k = k + 1\n return grid\n\n\[email protected]_grad()\ndef save_image(\n tensor: Union[torch.Tensor, List[torch.Tensor]],\n fp: Union[Text, pathlib.Path, BinaryIO],\n format: Optional[str] = None,\n **kwargs\n) -> None:\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp (string or file object): A filename or a file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n\n grid = make_grid(tensor, **kwargs)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n\n\[email protected]_grad()\ndef draw_bounding_boxes(\n image: torch.Tensor,\n boxes: torch.Tensor,\n labels: Optional[List[str]] = None,\n colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,\n fill: Optional[bool] = False,\n width: int = 1,\n font: Optional[str] = None,\n font_size: int = 10\n) -> torch.Tensor:\n\n \"\"\"\n Draws bounding boxes on given image.\n The values of the input image should be uint8 between 0 and 255.\n If filled, Resulting Tensor should be saved as PNG image.\n\n Args:\n image (Tensor): Tensor of shape (C x H x W)\n boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that\n the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and\n `0 <= ymin < ymax < H`.\n labels (List[str]): List containing the labels of bounding boxes.\n colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can\n be represented as `str` or `Tuple[int, int, int]`.\n fill (bool): If `True` fills the bounding box with specified color.\n width (int): Width of bounding box.\n font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may\n also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,\n `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.\n font_size (int): The requested font size in points.\n \"\"\"\n\n if not isinstance(image, torch.Tensor):\n raise TypeError(f\"Tensor expected, got {type(image)}\")\n elif image.dtype != torch.uint8:\n raise ValueError(f\"Tensor uint8 expected, got {image.dtype}\")\n elif image.dim() != 3:\n raise ValueError(\"Pass individual images, not batches\")\n\n ndarr = image.permute(1, 2, 0).numpy()\n img_to_draw = Image.fromarray(ndarr)\n\n img_boxes = boxes.to(torch.int64).tolist()\n\n if fill:\n draw = ImageDraw.Draw(img_to_draw, \"RGBA\")\n\n else:\n draw = ImageDraw.Draw(img_to_draw)\n\n txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size)\n\n for i, bbox in enumerate(img_boxes):\n if colors is None:\n color = None\n else:\n color = colors[i]\n\n if fill:\n if color is None:\n fill_color = (255, 255, 255, 100)\n elif isinstance(color, str):\n # This will automatically raise Error if rgb cannot be parsed.\n fill_color = ImageColor.getrgb(color) + (100,)\n elif isinstance(color, tuple):\n fill_color = color + (100,)\n draw.rectangle(bbox, width=width, outline=color, fill=fill_color)\n else:\n draw.rectangle(bbox, width=width, outline=color)\n\n if labels is not None:\n draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)\n\n return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1)\n", "path": "torchvision/utils.py"}]}
| 3,751 | 748 |
gh_patches_debug_1403
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-402
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sort list of users when adding marks
When adding a mark, the list of user which the mark should relate to is not sorted. It should be. (It is probably sorted on realname instead of username)
- Change the list to display realname instead of username.
- Make sure it's sorted.
(Bonus would be to have a select2js-ish search on it as well, but don't use time on it.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/authentication/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 from pytz import timezone
5
6 from django.conf import settings
7 from django.contrib.auth.models import AbstractUser
8 from django.db import models
9 from django.utils.translation import ugettext as _
10 from django.utils import timezone
11
12
13 # If this list is changed, remember to check that the year property on
14 # OnlineUser is still correct!
15 FIELD_OF_STUDY_CHOICES = [
16 (0, _(u'Gjest')),
17 (1, _(u'Bachelor i Informatikk (BIT)')),
18 # master degrees take up the interval [10,30>
19 (10, _(u'Software (SW)')),
20 (11, _(u'Informasjonsforvaltning (DIF)')),
21 (12, _(u'Komplekse Datasystemer (KDS)')),
22 (13, _(u'Spillteknologi (SPT)')),
23 (14, _(u'Intelligente Systemer (IRS)')),
24 (15, _(u'Helseinformatikk (MSMEDTEK)')),
25 (30, _(u'Annen mastergrad')),
26 (80, _(u'PhD')),
27 (90, _(u'International')),
28 (100, _(u'Annet Onlinemedlem')),
29 ]
30
31 class OnlineUser(AbstractUser):
32
33 IMAGE_FOLDER = "images/profiles"
34 IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']
35
36 # Online related fields
37 field_of_study = models.SmallIntegerField(_(u"studieretning"), choices=FIELD_OF_STUDY_CHOICES, default=0)
38 started_date = models.DateField(_(u"startet studie"), default=timezone.now().date())
39 compiled = models.BooleanField(_(u"kompilert"), default=False)
40
41 # Email
42 infomail = models.BooleanField(_(u"vil ha infomail"), default=True)
43
44 # Address
45 phone_number = models.CharField(_(u"telefonnummer"), max_length=20, blank=True, null=True)
46 address = models.CharField(_(u"adresse"), max_length=30, blank=True, null=True)
47 zip_code = models.CharField(_(u"postnummer"), max_length=4, blank=True, null=True)
48
49 # Other
50 allergies = models.TextField(_(u"allergier"), blank=True, null=True)
51 mark_rules = models.BooleanField(_(u"godtatt prikkeregler"), default=False)
52 rfid = models.CharField(_(u"RFID"), max_length=50, blank=True, null=True)
53 nickname = models.CharField(_(u"nickname"), max_length=50, blank=True, null=True)
54 website = models.URLField(_(u"hjemmeside"), blank=True, null=True)
55
56
57 image = models.ImageField(_(u"bilde"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,
58 default=settings.DEFAULT_PROFILE_PICTURE_URL)
59
60 # NTNU credentials
61 ntnu_username = models.CharField(_(u"NTNU-brukernavn"), max_length=10, blank=True, null=True)
62
63 # TODO profile pictures
64 # TODO checkbox for forwarding of @online.ntnu.no mail
65
66 @property
67 def is_member(self):
68 """
69 Returns true if the User object is associated with Online.
70 """
71 if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=timezone.now()).count() > 0:
72 return True
73 return False
74
75 def get_full_name(self):
76 """
77 Returns the first_name plus the last_name, with a space in between.
78 """
79 full_name = u'%s %s' % (self.first_name, self.last_name)
80 return full_name.strip()
81
82 def get_email(self):
83 return self.get_emails().filter(primary = True)[0]
84
85 def get_emails(self):
86 return Email.objects.all().filter(user = self)
87
88 @property
89 def year(self):
90 today = timezone.now().date()
91 started = self.started_date
92
93 # We say that a year is 360 days incase we are a bit slower to
94 # add users one year.
95 year = ((today - started).days / 360) + 1
96
97 if self.field_of_study == 0 or self.field_of_study == 100: # others
98 return 0
99 # dont return a bachelor student as 4th or 5th grade
100 elif self.field_of_study == 1: # bachelor
101 if year > 3:
102 return 3
103 return year
104 elif 9 < self.field_of_study < 30: # 10-29 is considered master
105 if year >= 2:
106 return 5
107 return 4
108 elif self.field_of_study == 80: # phd
109 return year + 5
110 elif self.field_of_study == 90: # international
111 if year == 1:
112 return 1
113 return 4
114
115 def __unicode__(self):
116 return self.get_full_name()
117
118 class Meta:
119 verbose_name = _(u"brukerprofil")
120 verbose_name_plural = _(u"brukerprofiler")
121
122
123 class Email(models.Model):
124 user = models.ForeignKey(OnlineUser, related_name="email_user")
125 email = models.EmailField(_(u"epostadresse"), unique=True)
126 primary = models.BooleanField(_(u"aktiv"), default=False)
127 verified = models.BooleanField(_(u"verifisert"), default=False)
128
129 def __unicode__(self):
130 return self.email
131
132 class Meta:
133 verbose_name = _(u"epostadresse")
134 verbose_name_plural = _(u"epostadresser")
135
136
137 class RegisterToken(models.Model):
138 user = models.ForeignKey(OnlineUser, related_name="register_user")
139 email = models.EmailField(_(u"epost"), max_length=254)
140 token = models.CharField(_(u"token"), max_length=32)
141 created = models.DateTimeField(_(u"opprettet dato"), editable=False, auto_now_add=True)
142
143 @property
144 def is_valid(self):
145 valid_period = datetime.timedelta(days=1)
146 now = timezone.now()
147 return now < self.created + valid_period
148
149
150 class AllowedUsername(models.Model):
151 """
152 Holds usernames that are considered valid members of Online and the time they expire.
153 """
154 username = models.CharField(_(u"brukernavn"), max_length=10)
155 registered = models.DateField(_(u"registrert"))
156 note = models.CharField(_(u"notat"), max_length=100)
157 description = models.TextField(_(u"beskrivelse"), blank=True, null=True)
158 expiration_date = models.DateField(_(u"utløpsdato"))
159
160 @property
161 def is_active(self):
162 return timezone.now().date() < self.expiration_date
163
164 def __unicode__(self):
165 return self.username
166
167 class Meta:
168 verbose_name = _(u"tillatt brukernavn")
169 verbose_name_plural = _(u"tillatte brukernavn")
170 ordering = (u"username",)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/authentication/models.py b/apps/authentication/models.py
--- a/apps/authentication/models.py
+++ b/apps/authentication/models.py
@@ -116,6 +116,7 @@
return self.get_full_name()
class Meta:
+ ordering = ['first_name', 'last_name']
verbose_name = _(u"brukerprofil")
verbose_name_plural = _(u"brukerprofiler")
|
{"golden_diff": "diff --git a/apps/authentication/models.py b/apps/authentication/models.py\n--- a/apps/authentication/models.py\n+++ b/apps/authentication/models.py\n@@ -116,6 +116,7 @@\n return self.get_full_name()\n \n class Meta:\n+ ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n", "issue": "Sort list of users when adding marks\nWhen adding a mark, the list of user which the mark should relate to is not sorted. It should be. (It is probably sorted on realname instead of username)\n- Change the list to display realname instead of username.\n- Make sure it's sorted.\n\n(Bonus would be to have a select2js-ish search on it as well, but don't use time on it.)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nfrom pytz import timezone\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30>\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n\n\n image = models.ImageField(_(u\"bilde\"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,\n default=settings.DEFAULT_PROFILE_PICTURE_URL)\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True)\n\n # TODO profile pictures\n # TODO checkbox for forwarding of @online.ntnu.no mail\n \n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n return self.get_emails().filter(primary = True)[0]\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 9 < self.field_of_study < 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n\n def __unicode__(self):\n return self.get_full_name()\n\n class Meta:\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"aktiv\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"brukernavn\"), max_length=10)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"tillatt brukernavn\")\n verbose_name_plural = _(u\"tillatte brukernavn\")\n ordering = (u\"username\",)\n", "path": "apps/authentication/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nfrom pytz import timezone\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30>\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n\n\n image = models.ImageField(_(u\"bilde\"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,\n default=settings.DEFAULT_PROFILE_PICTURE_URL)\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True)\n\n # TODO profile pictures\n # TODO checkbox for forwarding of @online.ntnu.no mail\n \n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n return self.get_emails().filter(primary = True)[0]\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 9 < self.field_of_study < 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n\n def __unicode__(self):\n return self.get_full_name()\n\n class Meta:\n ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"aktiv\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"brukernavn\"), max_length=10)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"tillatt brukernavn\")\n verbose_name_plural = _(u\"tillatte brukernavn\")\n ordering = (u\"username\",)\n", "path": "apps/authentication/models.py"}]}
| 2,340 | 92 |
gh_patches_debug_25973
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-435
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pex 1.2.14 breaks entrypoint targeting when PEX_PYTHON is present
with 1.2.13, targeting an entrypoint with `-e` et al results in an attempt to load that entrypoint at runtime:
```
[omerta ~]$ pip install pex==1.2.13 2>&1 >/dev/null
[omerta ~]$ pex --version
pex 1.2.13
[omerta ~]$ pex -e 'pants.bin.pants_loader:main' pantsbuild.pants -o /tmp/pants.pex
[omerta ~]$ /tmp/pants.pex
Traceback (most recent call last):
File ".bootstrap/_pex/pex.py", line 365, in execute
File ".bootstrap/_pex/pex.py", line 293, in _wrap_coverage
File ".bootstrap/_pex/pex.py", line 325, in _wrap_profiling
File ".bootstrap/_pex/pex.py", line 408, in _execute
File ".bootstrap/_pex/pex.py", line 466, in execute_entry
File ".bootstrap/_pex/pex.py", line 480, in execute_pkg_resources
File ".bootstrap/pkg_resources/__init__.py", line 2297, in resolve
ImportError: No module named pants_loader
```
with 1.2.14, it seems to be re-execing against the `PEX_PYTHON` interpreter sans args which results in a bare repl when the pex is run:
```
[omerta ~]$ pip install pex==1.2.14 2>&1 >/dev/null
[omerta ~]$ pex --version
pex 1.2.14
[omerta ~]$ pex -e 'pants.bin.pants_loader:main' pantsbuild.pants -o /tmp/pants.pex
[omerta ~]$ PEX_VERBOSE=9 /tmp/pants.pex
pex: Please build pex with the subprocess32 module for more reliable requirement installation and interpreter execution.
pex: Selecting runtime interpreter based on pexrc: 0.1ms
pex: Re-executing: cmdline="['/opt/ee/python/2.7/bin/python2.7']", sys.executable="/Users/kwilson/Python/CPython-2.7.13/bin/python2.7", PEX_PYTHON="None", PEX_PYTHON_PATH="None", COMPATIBILITY_CONSTRAINTS="[]"
Python 2.7.10 (default, Dec 16 2015, 14:09:45)
[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>>
```
cc @CMLivingston since this appears related to #427
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/pex_bootstrapper.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from __future__ import print_function
4 import os
5 import sys
6
7 from .common import die, open_zip
8 from .executor import Executor
9 from .interpreter import PythonInterpreter
10 from .interpreter_constraints import matched_interpreters
11 from .tracer import TRACER
12 from .variables import ENV
13
14 __all__ = ('bootstrap_pex',)
15
16
17 def pex_info_name(entry_point):
18 """Return the PEX-INFO for an entry_point"""
19 return os.path.join(entry_point, 'PEX-INFO')
20
21
22 def is_compressed(entry_point):
23 return os.path.exists(entry_point) and not os.path.exists(pex_info_name(entry_point))
24
25
26 def read_pexinfo_from_directory(entry_point):
27 with open(pex_info_name(entry_point), 'rb') as fp:
28 return fp.read()
29
30
31 def read_pexinfo_from_zip(entry_point):
32 with open_zip(entry_point) as zf:
33 return zf.read('PEX-INFO')
34
35
36 def read_pex_info_content(entry_point):
37 """Return the raw content of a PEX-INFO."""
38 if is_compressed(entry_point):
39 return read_pexinfo_from_zip(entry_point)
40 else:
41 return read_pexinfo_from_directory(entry_point)
42
43
44 def get_pex_info(entry_point):
45 """Return the PexInfo object for an entry point."""
46 from . import pex_info
47
48 pex_info_content = read_pex_info_content(entry_point)
49 if pex_info_content:
50 return pex_info.PexInfo.from_json(pex_info_content)
51 raise ValueError('Invalid entry_point: %s' % entry_point)
52
53
54 def find_in_path(target_interpreter):
55 if os.path.exists(target_interpreter):
56 return target_interpreter
57
58 for directory in os.getenv('PATH', '').split(os.pathsep):
59 try_path = os.path.join(directory, target_interpreter)
60 if os.path.exists(try_path):
61 return try_path
62
63
64 def find_compatible_interpreters(pex_python_path, compatibility_constraints):
65 """Find all compatible interpreters on the system within the supplied constraints and use
66 PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.
67 """
68 if pex_python_path:
69 interpreters = []
70 for binary in pex_python_path.split(os.pathsep):
71 try:
72 interpreters.append(PythonInterpreter.from_binary(binary))
73 except Executor.ExecutionError:
74 print("Python interpreter %s in PEX_PYTHON_PATH failed to load properly." % binary,
75 file=sys.stderr)
76 if not interpreters:
77 die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')
78 else:
79 if not os.getenv('PATH', ''):
80 # no $PATH, use sys.executable
81 interpreters = [PythonInterpreter.get()]
82 else:
83 # get all qualifying interpreters found in $PATH
84 interpreters = PythonInterpreter.all()
85
86 return list(matched_interpreters(
87 interpreters, compatibility_constraints, meet_all_constraints=True))
88
89
90 def _select_pex_python_interpreter(target_python, compatibility_constraints):
91 target = find_in_path(target_python)
92
93 if not target:
94 die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)
95 if compatibility_constraints:
96 pi = PythonInterpreter.from_binary(target)
97 if not list(matched_interpreters([pi], compatibility_constraints, meet_all_constraints=True)):
98 die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '
99 'interpreter constraints: %s' % (target, str(compatibility_constraints)))
100 if not os.path.exists(target):
101 die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)
102 return target
103
104
105 def _select_interpreter(pex_python_path, compatibility_constraints):
106 compatible_interpreters = find_compatible_interpreters(
107 pex_python_path, compatibility_constraints)
108
109 if not compatible_interpreters:
110 die('Failed to find compatible interpreter for constraints: %s'
111 % str(compatibility_constraints))
112 # TODO: https://github.com/pantsbuild/pex/issues/430
113 target = min(compatible_interpreters).binary
114
115 if os.path.exists(target) and os.path.realpath(target) != os.path.realpath(sys.executable):
116 return target
117
118
119 def maybe_reexec_pex(compatibility_constraints):
120 """
121 Handle environment overrides for the Python interpreter to use when executing this pex.
122
123 This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO
124 metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the
125 interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for
126 a matching interpreter in accordance with the interpreter constraints. If both variables are
127 present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no
128 compatible interpreters can be found on said path. If neither variable is set, fall through to
129 plain pex execution using PATH searching or the currently executing interpreter.
130
131 :param compatibility_constraints: list of requirements-style strings that constrain the
132 Python interpreter to re-exec this pex with.
133
134 """
135 if ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC:
136 return
137
138 selected_interpreter = None
139 with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):
140 if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:
141 # preserve PEX_PYTHON re-exec for backwards compatibility
142 # TODO: Kill this off completely in favor of PEX_PYTHON_PATH
143 # https://github.com/pantsbuild/pex/issues/431
144 selected_interpreter = _select_pex_python_interpreter(ENV.PEX_PYTHON,
145 compatibility_constraints)
146 elif ENV.PEX_PYTHON_PATH:
147 selected_interpreter = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)
148
149 if selected_interpreter:
150 ENV.delete('PEX_PYTHON')
151 ENV.delete('PEX_PYTHON_PATH')
152 ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True
153 cmdline = [selected_interpreter] + sys.argv[1:]
154 TRACER.log('Re-executing: cmdline="%s", sys.executable="%s", PEX_PYTHON="%s", '
155 'PEX_PYTHON_PATH="%s", COMPATIBILITY_CONSTRAINTS="%s"'
156 % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,
157 compatibility_constraints))
158 os.execve(selected_interpreter, cmdline, ENV.copy())
159
160
161 def bootstrap_pex(entry_point):
162 from .finders import register_finders
163 register_finders()
164 pex_info = get_pex_info(entry_point)
165 maybe_reexec_pex(pex_info.interpreter_constraints)
166
167 from . import pex
168 pex.PEX(entry_point).execute()
169
170
171 def bootstrap_pex_env(entry_point):
172 """Bootstrap the current runtime environment using a given pex."""
173 from .environment import PEXEnvironment
174 from .finders import register_finders
175 from .pex_info import PexInfo
176
177 register_finders()
178
179 PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py
--- a/pex/pex_bootstrapper.py
+++ b/pex/pex_bootstrapper.py
@@ -1,6 +1,7 @@
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import print_function
+
import os
import sys
@@ -147,14 +148,14 @@
selected_interpreter = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)
if selected_interpreter:
- ENV.delete('PEX_PYTHON')
- ENV.delete('PEX_PYTHON_PATH')
- ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True
- cmdline = [selected_interpreter] + sys.argv[1:]
+ cmdline = [selected_interpreter] + sys.argv
TRACER.log('Re-executing: cmdline="%s", sys.executable="%s", PEX_PYTHON="%s", '
'PEX_PYTHON_PATH="%s", COMPATIBILITY_CONSTRAINTS="%s"'
% (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,
compatibility_constraints))
+ ENV.delete('PEX_PYTHON')
+ ENV.delete('PEX_PYTHON_PATH')
+ ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True
os.execve(selected_interpreter, cmdline, ENV.copy())
|
{"golden_diff": "diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py\n--- a/pex/pex_bootstrapper.py\n+++ b/pex/pex_bootstrapper.py\n@@ -1,6 +1,7 @@\n # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n from __future__ import print_function\n+\n import os\n import sys\n \n@@ -147,14 +148,14 @@\n selected_interpreter = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)\n \n if selected_interpreter:\n- ENV.delete('PEX_PYTHON')\n- ENV.delete('PEX_PYTHON_PATH')\n- ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True\n- cmdline = [selected_interpreter] + sys.argv[1:]\n+ cmdline = [selected_interpreter] + sys.argv\n TRACER.log('Re-executing: cmdline=\"%s\", sys.executable=\"%s\", PEX_PYTHON=\"%s\", '\n 'PEX_PYTHON_PATH=\"%s\", COMPATIBILITY_CONSTRAINTS=\"%s\"'\n % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,\n compatibility_constraints))\n+ ENV.delete('PEX_PYTHON')\n+ ENV.delete('PEX_PYTHON_PATH')\n+ ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True\n os.execve(selected_interpreter, cmdline, ENV.copy())\n", "issue": "pex 1.2.14 breaks entrypoint targeting when PEX_PYTHON is present\nwith 1.2.13, targeting an entrypoint with `-e` et al results in an attempt to load that entrypoint at runtime:\r\n\r\n```\r\n[omerta ~]$ pip install pex==1.2.13 2>&1 >/dev/null\r\n[omerta ~]$ pex --version\r\npex 1.2.13\r\n[omerta ~]$ pex -e 'pants.bin.pants_loader:main' pantsbuild.pants -o /tmp/pants.pex\r\n[omerta ~]$ /tmp/pants.pex\r\nTraceback (most recent call last):\r\n File \".bootstrap/_pex/pex.py\", line 365, in execute\r\n File \".bootstrap/_pex/pex.py\", line 293, in _wrap_coverage\r\n File \".bootstrap/_pex/pex.py\", line 325, in _wrap_profiling\r\n File \".bootstrap/_pex/pex.py\", line 408, in _execute\r\n File \".bootstrap/_pex/pex.py\", line 466, in execute_entry\r\n File \".bootstrap/_pex/pex.py\", line 480, in execute_pkg_resources\r\n File \".bootstrap/pkg_resources/__init__.py\", line 2297, in resolve\r\nImportError: No module named pants_loader\r\n```\r\n\r\nwith 1.2.14, it seems to be re-execing against the `PEX_PYTHON` interpreter sans args which results in a bare repl when the pex is run:\r\n\r\n```\r\n[omerta ~]$ pip install pex==1.2.14 2>&1 >/dev/null\r\n[omerta ~]$ pex --version\r\npex 1.2.14\r\n[omerta ~]$ pex -e 'pants.bin.pants_loader:main' pantsbuild.pants -o /tmp/pants.pex\r\n[omerta ~]$ PEX_VERBOSE=9 /tmp/pants.pex\r\npex: Please build pex with the subprocess32 module for more reliable requirement installation and interpreter execution.\r\npex: Selecting runtime interpreter based on pexrc: 0.1ms\r\npex: Re-executing: cmdline=\"['/opt/ee/python/2.7/bin/python2.7']\", sys.executable=\"/Users/kwilson/Python/CPython-2.7.13/bin/python2.7\", PEX_PYTHON=\"None\", PEX_PYTHON_PATH=\"None\", COMPATIBILITY_CONSTRAINTS=\"[]\"\r\nPython 2.7.10 (default, Dec 16 2015, 14:09:45) \r\n[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> \r\n```\r\n\r\ncc @CMLivingston since this appears related to #427 \n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import print_function\nimport os\nimport sys\n\nfrom .common import die, open_zip\nfrom .executor import Executor\nfrom .interpreter import PythonInterpreter\nfrom .interpreter_constraints import matched_interpreters\nfrom .tracer import TRACER\nfrom .variables import ENV\n\n__all__ = ('bootstrap_pex',)\n\n\ndef pex_info_name(entry_point):\n \"\"\"Return the PEX-INFO for an entry_point\"\"\"\n return os.path.join(entry_point, 'PEX-INFO')\n\n\ndef is_compressed(entry_point):\n return os.path.exists(entry_point) and not os.path.exists(pex_info_name(entry_point))\n\n\ndef read_pexinfo_from_directory(entry_point):\n with open(pex_info_name(entry_point), 'rb') as fp:\n return fp.read()\n\n\ndef read_pexinfo_from_zip(entry_point):\n with open_zip(entry_point) as zf:\n return zf.read('PEX-INFO')\n\n\ndef read_pex_info_content(entry_point):\n \"\"\"Return the raw content of a PEX-INFO.\"\"\"\n if is_compressed(entry_point):\n return read_pexinfo_from_zip(entry_point)\n else:\n return read_pexinfo_from_directory(entry_point)\n\n\ndef get_pex_info(entry_point):\n \"\"\"Return the PexInfo object for an entry point.\"\"\"\n from . import pex_info\n\n pex_info_content = read_pex_info_content(entry_point)\n if pex_info_content:\n return pex_info.PexInfo.from_json(pex_info_content)\n raise ValueError('Invalid entry_point: %s' % entry_point)\n\n\ndef find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n\n for directory in os.getenv('PATH', '').split(os.pathsep):\n try_path = os.path.join(directory, target_interpreter)\n if os.path.exists(try_path):\n return try_path\n\n\ndef find_compatible_interpreters(pex_python_path, compatibility_constraints):\n \"\"\"Find all compatible interpreters on the system within the supplied constraints and use\n PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.\n \"\"\"\n if pex_python_path:\n interpreters = []\n for binary in pex_python_path.split(os.pathsep):\n try:\n interpreters.append(PythonInterpreter.from_binary(binary))\n except Executor.ExecutionError:\n print(\"Python interpreter %s in PEX_PYTHON_PATH failed to load properly.\" % binary,\n file=sys.stderr)\n if not interpreters:\n die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')\n else:\n if not os.getenv('PATH', ''):\n # no $PATH, use sys.executable\n interpreters = [PythonInterpreter.get()]\n else:\n # get all qualifying interpreters found in $PATH\n interpreters = PythonInterpreter.all()\n\n return list(matched_interpreters(\n interpreters, compatibility_constraints, meet_all_constraints=True))\n\n\ndef _select_pex_python_interpreter(target_python, compatibility_constraints):\n target = find_in_path(target_python)\n\n if not target:\n die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)\n if compatibility_constraints:\n pi = PythonInterpreter.from_binary(target)\n if not list(matched_interpreters([pi], compatibility_constraints, meet_all_constraints=True)):\n die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '\n 'interpreter constraints: %s' % (target, str(compatibility_constraints)))\n if not os.path.exists(target):\n die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)\n return target\n\n\ndef _select_interpreter(pex_python_path, compatibility_constraints):\n compatible_interpreters = find_compatible_interpreters(\n pex_python_path, compatibility_constraints)\n\n if not compatible_interpreters:\n die('Failed to find compatible interpreter for constraints: %s'\n % str(compatibility_constraints))\n # TODO: https://github.com/pantsbuild/pex/issues/430\n target = min(compatible_interpreters).binary\n\n if os.path.exists(target) and os.path.realpath(target) != os.path.realpath(sys.executable):\n return target\n\n\ndef maybe_reexec_pex(compatibility_constraints):\n \"\"\"\n Handle environment overrides for the Python interpreter to use when executing this pex.\n\n This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO\n metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the\n interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for\n a matching interpreter in accordance with the interpreter constraints. If both variables are\n present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no\n compatible interpreters can be found on said path. If neither variable is set, fall through to\n plain pex execution using PATH searching or the currently executing interpreter.\n\n :param compatibility_constraints: list of requirements-style strings that constrain the\n Python interpreter to re-exec this pex with.\n\n \"\"\"\n if ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC:\n return\n\n selected_interpreter = None\n with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):\n if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:\n # preserve PEX_PYTHON re-exec for backwards compatibility\n # TODO: Kill this off completely in favor of PEX_PYTHON_PATH\n # https://github.com/pantsbuild/pex/issues/431\n selected_interpreter = _select_pex_python_interpreter(ENV.PEX_PYTHON,\n compatibility_constraints)\n elif ENV.PEX_PYTHON_PATH:\n selected_interpreter = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)\n\n if selected_interpreter:\n ENV.delete('PEX_PYTHON')\n ENV.delete('PEX_PYTHON_PATH')\n ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True\n cmdline = [selected_interpreter] + sys.argv[1:]\n TRACER.log('Re-executing: cmdline=\"%s\", sys.executable=\"%s\", PEX_PYTHON=\"%s\", '\n 'PEX_PYTHON_PATH=\"%s\", COMPATIBILITY_CONSTRAINTS=\"%s\"'\n % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,\n compatibility_constraints))\n os.execve(selected_interpreter, cmdline, ENV.copy())\n\n\ndef bootstrap_pex(entry_point):\n from .finders import register_finders\n register_finders()\n pex_info = get_pex_info(entry_point)\n maybe_reexec_pex(pex_info.interpreter_constraints)\n\n from . import pex\n pex.PEX(entry_point).execute()\n\n\ndef bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n from .environment import PEXEnvironment\n from .finders import register_finders\n from .pex_info import PexInfo\n\n register_finders()\n\n PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()\n", "path": "pex/pex_bootstrapper.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom .common import die, open_zip\nfrom .executor import Executor\nfrom .interpreter import PythonInterpreter\nfrom .interpreter_constraints import matched_interpreters\nfrom .tracer import TRACER\nfrom .variables import ENV\n\n__all__ = ('bootstrap_pex',)\n\n\ndef pex_info_name(entry_point):\n \"\"\"Return the PEX-INFO for an entry_point\"\"\"\n return os.path.join(entry_point, 'PEX-INFO')\n\n\ndef is_compressed(entry_point):\n return os.path.exists(entry_point) and not os.path.exists(pex_info_name(entry_point))\n\n\ndef read_pexinfo_from_directory(entry_point):\n with open(pex_info_name(entry_point), 'rb') as fp:\n return fp.read()\n\n\ndef read_pexinfo_from_zip(entry_point):\n with open_zip(entry_point) as zf:\n return zf.read('PEX-INFO')\n\n\ndef read_pex_info_content(entry_point):\n \"\"\"Return the raw content of a PEX-INFO.\"\"\"\n if is_compressed(entry_point):\n return read_pexinfo_from_zip(entry_point)\n else:\n return read_pexinfo_from_directory(entry_point)\n\n\ndef get_pex_info(entry_point):\n \"\"\"Return the PexInfo object for an entry point.\"\"\"\n from . import pex_info\n\n pex_info_content = read_pex_info_content(entry_point)\n if pex_info_content:\n return pex_info.PexInfo.from_json(pex_info_content)\n raise ValueError('Invalid entry_point: %s' % entry_point)\n\n\ndef find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n\n for directory in os.getenv('PATH', '').split(os.pathsep):\n try_path = os.path.join(directory, target_interpreter)\n if os.path.exists(try_path):\n return try_path\n\n\ndef find_compatible_interpreters(pex_python_path, compatibility_constraints):\n \"\"\"Find all compatible interpreters on the system within the supplied constraints and use\n PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.\n \"\"\"\n if pex_python_path:\n interpreters = []\n for binary in pex_python_path.split(os.pathsep):\n try:\n interpreters.append(PythonInterpreter.from_binary(binary))\n except Executor.ExecutionError:\n print(\"Python interpreter %s in PEX_PYTHON_PATH failed to load properly.\" % binary,\n file=sys.stderr)\n if not interpreters:\n die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')\n else:\n if not os.getenv('PATH', ''):\n # no $PATH, use sys.executable\n interpreters = [PythonInterpreter.get()]\n else:\n # get all qualifying interpreters found in $PATH\n interpreters = PythonInterpreter.all()\n\n return list(matched_interpreters(\n interpreters, compatibility_constraints, meet_all_constraints=True))\n\n\ndef _select_pex_python_interpreter(target_python, compatibility_constraints):\n target = find_in_path(target_python)\n\n if not target:\n die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)\n if compatibility_constraints:\n pi = PythonInterpreter.from_binary(target)\n if not list(matched_interpreters([pi], compatibility_constraints, meet_all_constraints=True)):\n die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '\n 'interpreter constraints: %s' % (target, str(compatibility_constraints)))\n if not os.path.exists(target):\n die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)\n return target\n\n\ndef _select_interpreter(pex_python_path, compatibility_constraints):\n compatible_interpreters = find_compatible_interpreters(\n pex_python_path, compatibility_constraints)\n\n if not compatible_interpreters:\n die('Failed to find compatible interpreter for constraints: %s'\n % str(compatibility_constraints))\n # TODO: https://github.com/pantsbuild/pex/issues/430\n target = min(compatible_interpreters).binary\n\n if os.path.exists(target) and os.path.realpath(target) != os.path.realpath(sys.executable):\n return target\n\n\ndef maybe_reexec_pex(compatibility_constraints):\n \"\"\"\n Handle environment overrides for the Python interpreter to use when executing this pex.\n\n This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO\n metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the\n interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for\n a matching interpreter in accordance with the interpreter constraints. If both variables are\n present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no\n compatible interpreters can be found on said path. If neither variable is set, fall through to\n plain pex execution using PATH searching or the currently executing interpreter.\n\n :param compatibility_constraints: list of requirements-style strings that constrain the\n Python interpreter to re-exec this pex with.\n\n \"\"\"\n if ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC:\n return\n\n selected_interpreter = None\n with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):\n if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:\n # preserve PEX_PYTHON re-exec for backwards compatibility\n # TODO: Kill this off completely in favor of PEX_PYTHON_PATH\n # https://github.com/pantsbuild/pex/issues/431\n selected_interpreter = _select_pex_python_interpreter(ENV.PEX_PYTHON,\n compatibility_constraints)\n elif ENV.PEX_PYTHON_PATH:\n selected_interpreter = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)\n\n if selected_interpreter:\n cmdline = [selected_interpreter] + sys.argv\n TRACER.log('Re-executing: cmdline=\"%s\", sys.executable=\"%s\", PEX_PYTHON=\"%s\", '\n 'PEX_PYTHON_PATH=\"%s\", COMPATIBILITY_CONSTRAINTS=\"%s\"'\n % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,\n compatibility_constraints))\n ENV.delete('PEX_PYTHON')\n ENV.delete('PEX_PYTHON_PATH')\n ENV.SHOULD_EXIT_BOOTSTRAP_REEXEC = True\n os.execve(selected_interpreter, cmdline, ENV.copy())\n\n\ndef bootstrap_pex(entry_point):\n from .finders import register_finders\n register_finders()\n pex_info = get_pex_info(entry_point)\n maybe_reexec_pex(pex_info.interpreter_constraints)\n\n from . import pex\n pex.PEX(entry_point).execute()\n\n\ndef bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n from .environment import PEXEnvironment\n from .finders import register_finders\n from .pex_info import PexInfo\n\n register_finders()\n\n PEXEnvironment(entry_point, PexInfo.from_pex(entry_point)).activate()\n", "path": "pex/pex_bootstrapper.py"}]}
| 2,965 | 330 |
gh_patches_debug_28309
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1142
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
include version information in error log
would be useful to include things like:
- pre-commit version
- sys.version
- sys.executable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/error_handler.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 import contextlib
6 import os.path
7 import traceback
8
9 import six
10
11 from pre_commit import five
12 from pre_commit import output
13 from pre_commit.store import Store
14
15
16 class FatalError(RuntimeError):
17 pass
18
19
20 def _to_bytes(exc):
21 try:
22 return bytes(exc)
23 except Exception:
24 return six.text_type(exc).encode('UTF-8')
25
26
27 def _log_and_exit(msg, exc, formatted):
28 error_msg = b''.join((
29 five.to_bytes(msg), b': ',
30 five.to_bytes(type(exc).__name__), b': ',
31 _to_bytes(exc), b'\n',
32 ))
33 output.write(error_msg)
34 store = Store()
35 log_path = os.path.join(store.directory, 'pre-commit.log')
36 output.write_line('Check the log at {}'.format(log_path))
37 with open(log_path, 'wb') as log:
38 output.write(error_msg, stream=log)
39 output.write_line(formatted, stream=log)
40 raise SystemExit(1)
41
42
43 @contextlib.contextmanager
44 def error_handler():
45 try:
46 yield
47 except (Exception, KeyboardInterrupt) as e:
48 if isinstance(e, FatalError):
49 msg = 'An error has occurred'
50 elif isinstance(e, KeyboardInterrupt):
51 msg = 'Interrupted (^C)'
52 else:
53 msg = 'An unexpected error has occurred'
54 _log_and_exit(msg, e, traceback.format_exc())
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py
--- a/pre_commit/error_handler.py
+++ b/pre_commit/error_handler.py
@@ -4,10 +4,12 @@
import contextlib
import os.path
+import sys
import traceback
import six
+import pre_commit.constants as C
from pre_commit import five
from pre_commit import output
from pre_commit.store import Store
@@ -34,9 +36,36 @@
store = Store()
log_path = os.path.join(store.directory, 'pre-commit.log')
output.write_line('Check the log at {}'.format(log_path))
+
with open(log_path, 'wb') as log:
+ output.write_line(
+ '### version information\n```', stream=log,
+ )
+ output.write_line(
+ 'pre-commit.version: {}'.format(C.VERSION), stream=log,
+ )
+ output.write_line(
+ 'sys.version:\n{}'.format(
+ '\n'.join(
+ [
+ ' {}'.format(line)
+ for line in sys.version.splitlines()
+ ],
+ ),
+ ),
+ stream=log,
+ )
+ output.write_line(
+ 'sys.executable: {}'.format(sys.executable), stream=log,
+ )
+ output.write_line('os.name: {}'.format(os.name), stream=log)
+ output.write_line(
+ 'sys.platform: {}\n```'.format(sys.platform), stream=log,
+ )
+ output.write_line('### error information\n```', stream=log)
output.write(error_msg, stream=log)
output.write_line(formatted, stream=log)
+ output.write('\n```\n', stream=log)
raise SystemExit(1)
|
{"golden_diff": "diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py\n--- a/pre_commit/error_handler.py\n+++ b/pre_commit/error_handler.py\n@@ -4,10 +4,12 @@\n \n import contextlib\n import os.path\n+import sys\n import traceback\n \n import six\n \n+import pre_commit.constants as C\n from pre_commit import five\n from pre_commit import output\n from pre_commit.store import Store\n@@ -34,9 +36,36 @@\n store = Store()\n log_path = os.path.join(store.directory, 'pre-commit.log')\n output.write_line('Check the log at {}'.format(log_path))\n+\n with open(log_path, 'wb') as log:\n+ output.write_line(\n+ '### version information\\n```', stream=log,\n+ )\n+ output.write_line(\n+ 'pre-commit.version: {}'.format(C.VERSION), stream=log,\n+ )\n+ output.write_line(\n+ 'sys.version:\\n{}'.format(\n+ '\\n'.join(\n+ [\n+ ' {}'.format(line)\n+ for line in sys.version.splitlines()\n+ ],\n+ ),\n+ ),\n+ stream=log,\n+ )\n+ output.write_line(\n+ 'sys.executable: {}'.format(sys.executable), stream=log,\n+ )\n+ output.write_line('os.name: {}'.format(os.name), stream=log)\n+ output.write_line(\n+ 'sys.platform: {}\\n```'.format(sys.platform), stream=log,\n+ )\n+ output.write_line('### error information\\n```', stream=log)\n output.write(error_msg, stream=log)\n output.write_line(formatted, stream=log)\n+ output.write('\\n```\\n', stream=log)\n raise SystemExit(1)\n", "issue": "include version information in error log\nwould be useful to include things like:\r\n\r\n- pre-commit version\r\n- sys.version\r\n- sys.executable\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport contextlib\nimport os.path\nimport traceback\n\nimport six\n\nfrom pre_commit import five\nfrom pre_commit import output\nfrom pre_commit.store import Store\n\n\nclass FatalError(RuntimeError):\n pass\n\n\ndef _to_bytes(exc):\n try:\n return bytes(exc)\n except Exception:\n return six.text_type(exc).encode('UTF-8')\n\n\ndef _log_and_exit(msg, exc, formatted):\n error_msg = b''.join((\n five.to_bytes(msg), b': ',\n five.to_bytes(type(exc).__name__), b': ',\n _to_bytes(exc), b'\\n',\n ))\n output.write(error_msg)\n store = Store()\n log_path = os.path.join(store.directory, 'pre-commit.log')\n output.write_line('Check the log at {}'.format(log_path))\n with open(log_path, 'wb') as log:\n output.write(error_msg, stream=log)\n output.write_line(formatted, stream=log)\n raise SystemExit(1)\n\n\[email protected]\ndef error_handler():\n try:\n yield\n except (Exception, KeyboardInterrupt) as e:\n if isinstance(e, FatalError):\n msg = 'An error has occurred'\n elif isinstance(e, KeyboardInterrupt):\n msg = 'Interrupted (^C)'\n else:\n msg = 'An unexpected error has occurred'\n _log_and_exit(msg, e, traceback.format_exc())\n", "path": "pre_commit/error_handler.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport contextlib\nimport os.path\nimport sys\nimport traceback\n\nimport six\n\nimport pre_commit.constants as C\nfrom pre_commit import five\nfrom pre_commit import output\nfrom pre_commit.store import Store\n\n\nclass FatalError(RuntimeError):\n pass\n\n\ndef _to_bytes(exc):\n try:\n return bytes(exc)\n except Exception:\n return six.text_type(exc).encode('UTF-8')\n\n\ndef _log_and_exit(msg, exc, formatted):\n error_msg = b''.join((\n five.to_bytes(msg), b': ',\n five.to_bytes(type(exc).__name__), b': ',\n _to_bytes(exc), b'\\n',\n ))\n output.write(error_msg)\n store = Store()\n log_path = os.path.join(store.directory, 'pre-commit.log')\n output.write_line('Check the log at {}'.format(log_path))\n\n with open(log_path, 'wb') as log:\n output.write_line(\n '### version information\\n```', stream=log,\n )\n output.write_line(\n 'pre-commit.version: {}'.format(C.VERSION), stream=log,\n )\n output.write_line(\n 'sys.version:\\n{}'.format(\n '\\n'.join(\n [\n ' {}'.format(line)\n for line in sys.version.splitlines()\n ],\n ),\n ),\n stream=log,\n )\n output.write_line(\n 'sys.executable: {}'.format(sys.executable), stream=log,\n )\n output.write_line('os.name: {}'.format(os.name), stream=log)\n output.write_line(\n 'sys.platform: {}\\n```'.format(sys.platform), stream=log,\n )\n output.write_line('### error information\\n```', stream=log)\n output.write(error_msg, stream=log)\n output.write_line(formatted, stream=log)\n output.write('\\n```\\n', stream=log)\n raise SystemExit(1)\n\n\[email protected]\ndef error_handler():\n try:\n yield\n except (Exception, KeyboardInterrupt) as e:\n if isinstance(e, FatalError):\n msg = 'An error has occurred'\n elif isinstance(e, KeyboardInterrupt):\n msg = 'Interrupted (^C)'\n else:\n msg = 'An unexpected error has occurred'\n _log_and_exit(msg, e, traceback.format_exc())\n", "path": "pre_commit/error_handler.py"}]}
| 712 | 378 |
gh_patches_debug_32024
|
rasdani/github-patches
|
git_diff
|
medtagger__MedTagger-391
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove error about not picking category properly
## Current Behavior
When user access labeling page without choosing the category via the category page he/she receives an error about not choosing the category properly. While this is necessary for preventing users accessing this page, it makes development more difficult. Every time when front-end loads, developer has to go back to category page.
## Expected Behavior
There shouldn't be an error about not picking category properly.
## Steps to Reproduce the Problem
1. Go to labeling page `/labeling` without going through category page.
## Additional comment (optional)
We should probably get category using `queryParams` like before and load current category on marker page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/medtagger/api/tasks/service_rest.py`
Content:
```
1 """Module responsible for definition of Tasks service available via HTTP REST API."""
2 from typing import Any
3
4 from flask import request
5 from flask_restplus import Resource
6
7 from medtagger.api import api
8 from medtagger.api.tasks import business, serializers
9 from medtagger.api.security import login_required, role_required
10 from medtagger.database.models import LabelTag
11
12 tasks_ns = api.namespace('tasks', 'Methods related with tasks')
13
14
15 @tasks_ns.route('')
16 class Tasks(Resource):
17 """Endpoint that manages tasks."""
18
19 @staticmethod
20 @login_required
21 @tasks_ns.marshal_with(serializers.out__task)
22 @tasks_ns.doc(security='token')
23 @tasks_ns.doc(description='Return all available tasks.')
24 @tasks_ns.doc(responses={200: 'Success'})
25 def get() -> Any:
26 """Return all available tasks."""
27 return business.get_tasks()
28
29 @staticmethod
30 @login_required
31 @role_required('admin')
32 @tasks_ns.expect(serializers.in__task)
33 @tasks_ns.marshal_with(serializers.out__task)
34 @tasks_ns.doc(security='token')
35 @tasks_ns.doc(description='Create new Task.')
36 @tasks_ns.doc(responses={201: 'Success'})
37 def post() -> Any:
38 """Create new Task."""
39 payload = request.json
40
41 key = payload['key']
42 name = payload['name']
43 image_path = payload['image_path']
44 datasets_keys = payload['datasets_keys']
45 tags = [LabelTag(tag['key'], tag['name'], tag['tools']) for tag in payload['tags']]
46
47 return business.create_task(key, name, image_path, datasets_keys, tags), 201
48
```
Path: `backend/medtagger/api/tasks/business.py`
Content:
```
1 """Module responsible for business logic in all Tasks endpoints."""
2 from typing import List
3
4 from medtagger.database.models import Task, LabelTag
5 from medtagger.repositories import (
6 tasks as TasksRepository,
7 )
8
9
10 def get_tasks() -> List[Task]:
11 """Fetch all tasks.
12
13 :return: list of tasks
14 """
15 return TasksRepository.get_all_tasks()
16
17
18 def create_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:
19 """Create new Task.
20
21 :param key: unique key representing Task
22 :param name: name which describes this Task
23 :param image_path: path to the image which is located on the frontend
24 :param datasets_keys: Keys of Datasets that Task takes Scans from
25 :param tags: Label Tags that will be created and assigned to Task
26 :return: Task object
27 """
28 return TasksRepository.add_task(key, name, image_path, datasets_keys, tags)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/medtagger/api/tasks/business.py b/backend/medtagger/api/tasks/business.py
--- a/backend/medtagger/api/tasks/business.py
+++ b/backend/medtagger/api/tasks/business.py
@@ -1,6 +1,9 @@
"""Module responsible for business logic in all Tasks endpoints."""
from typing import List
+from sqlalchemy.orm.exc import NoResultFound
+
+from medtagger.api.exceptions import NotFoundException
from medtagger.database.models import Task, LabelTag
from medtagger.repositories import (
tasks as TasksRepository,
@@ -15,6 +18,17 @@
return TasksRepository.get_all_tasks()
+def get_task_for_key(task_key: str) -> Task:
+ """Fetch Task for given key.
+
+ :return: Task
+ """
+ try:
+ return TasksRepository.get_task_by_key(task_key)
+ except NoResultFound:
+ raise NotFoundException('Did not found task for {} key!'.format(task_key))
+
+
def create_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:
"""Create new Task.
diff --git a/backend/medtagger/api/tasks/service_rest.py b/backend/medtagger/api/tasks/service_rest.py
--- a/backend/medtagger/api/tasks/service_rest.py
+++ b/backend/medtagger/api/tasks/service_rest.py
@@ -43,5 +43,19 @@
image_path = payload['image_path']
datasets_keys = payload['datasets_keys']
tags = [LabelTag(tag['key'], tag['name'], tag['tools']) for tag in payload['tags']]
-
return business.create_task(key, name, image_path, datasets_keys, tags), 201
+
+
+@tasks_ns.route('/<string:task_key>')
+class Task(Resource):
+ """Endpoint that manages single task."""
+
+ @staticmethod
+ @login_required
+ @tasks_ns.marshal_with(serializers.out__task)
+ @tasks_ns.doc(security='token')
+ @tasks_ns.doc(description='Get task for given key.')
+ @tasks_ns.doc(responses={200: 'Success', 404: 'Could not find task'})
+ def get(task_key: str) -> Any:
+ """Return task for given key."""
+ return business.get_task_for_key(task_key)
|
{"golden_diff": "diff --git a/backend/medtagger/api/tasks/business.py b/backend/medtagger/api/tasks/business.py\n--- a/backend/medtagger/api/tasks/business.py\n+++ b/backend/medtagger/api/tasks/business.py\n@@ -1,6 +1,9 @@\n \"\"\"Module responsible for business logic in all Tasks endpoints.\"\"\"\n from typing import List\n \n+from sqlalchemy.orm.exc import NoResultFound\n+\n+from medtagger.api.exceptions import NotFoundException\n from medtagger.database.models import Task, LabelTag\n from medtagger.repositories import (\n tasks as TasksRepository,\n@@ -15,6 +18,17 @@\n return TasksRepository.get_all_tasks()\n \n \n+def get_task_for_key(task_key: str) -> Task:\n+ \"\"\"Fetch Task for given key.\n+\n+ :return: Task\n+ \"\"\"\n+ try:\n+ return TasksRepository.get_task_by_key(task_key)\n+ except NoResultFound:\n+ raise NotFoundException('Did not found task for {} key!'.format(task_key))\n+\n+\n def create_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Create new Task.\n \ndiff --git a/backend/medtagger/api/tasks/service_rest.py b/backend/medtagger/api/tasks/service_rest.py\n--- a/backend/medtagger/api/tasks/service_rest.py\n+++ b/backend/medtagger/api/tasks/service_rest.py\n@@ -43,5 +43,19 @@\n image_path = payload['image_path']\n datasets_keys = payload['datasets_keys']\n tags = [LabelTag(tag['key'], tag['name'], tag['tools']) for tag in payload['tags']]\n-\n return business.create_task(key, name, image_path, datasets_keys, tags), 201\n+\n+\n+@tasks_ns.route('/<string:task_key>')\n+class Task(Resource):\n+ \"\"\"Endpoint that manages single task.\"\"\"\n+\n+ @staticmethod\n+ @login_required\n+ @tasks_ns.marshal_with(serializers.out__task)\n+ @tasks_ns.doc(security='token')\n+ @tasks_ns.doc(description='Get task for given key.')\n+ @tasks_ns.doc(responses={200: 'Success', 404: 'Could not find task'})\n+ def get(task_key: str) -> Any:\n+ \"\"\"Return task for given key.\"\"\"\n+ return business.get_task_for_key(task_key)\n", "issue": "Remove error about not picking category properly\n## Current Behavior\r\n\r\nWhen user access labeling page without choosing the category via the category page he/she receives an error about not choosing the category properly. While this is necessary for preventing users accessing this page, it makes development more difficult. Every time when front-end loads, developer has to go back to category page.\r\n\r\n## Expected Behavior\r\n\r\nThere shouldn't be an error about not picking category properly. \r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Go to labeling page `/labeling` without going through category page.\r\n\r\n## Additional comment (optional)\r\n\r\nWe should probably get category using `queryParams` like before and load current category on marker page.\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for definition of Tasks service available via HTTP REST API.\"\"\"\nfrom typing import Any\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom medtagger.api import api\nfrom medtagger.api.tasks import business, serializers\nfrom medtagger.api.security import login_required, role_required\nfrom medtagger.database.models import LabelTag\n\ntasks_ns = api.namespace('tasks', 'Methods related with tasks')\n\n\n@tasks_ns.route('')\nclass Tasks(Resource):\n \"\"\"Endpoint that manages tasks.\"\"\"\n\n @staticmethod\n @login_required\n @tasks_ns.marshal_with(serializers.out__task)\n @tasks_ns.doc(security='token')\n @tasks_ns.doc(description='Return all available tasks.')\n @tasks_ns.doc(responses={200: 'Success'})\n def get() -> Any:\n \"\"\"Return all available tasks.\"\"\"\n return business.get_tasks()\n\n @staticmethod\n @login_required\n @role_required('admin')\n @tasks_ns.expect(serializers.in__task)\n @tasks_ns.marshal_with(serializers.out__task)\n @tasks_ns.doc(security='token')\n @tasks_ns.doc(description='Create new Task.')\n @tasks_ns.doc(responses={201: 'Success'})\n def post() -> Any:\n \"\"\"Create new Task.\"\"\"\n payload = request.json\n\n key = payload['key']\n name = payload['name']\n image_path = payload['image_path']\n datasets_keys = payload['datasets_keys']\n tags = [LabelTag(tag['key'], tag['name'], tag['tools']) for tag in payload['tags']]\n\n return business.create_task(key, name, image_path, datasets_keys, tags), 201\n", "path": "backend/medtagger/api/tasks/service_rest.py"}, {"content": "\"\"\"Module responsible for business logic in all Tasks endpoints.\"\"\"\nfrom typing import List\n\nfrom medtagger.database.models import Task, LabelTag\nfrom medtagger.repositories import (\n tasks as TasksRepository,\n)\n\n\ndef get_tasks() -> List[Task]:\n \"\"\"Fetch all tasks.\n\n :return: list of tasks\n \"\"\"\n return TasksRepository.get_all_tasks()\n\n\ndef create_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Create new Task.\n\n :param key: unique key representing Task\n :param name: name which describes this Task\n :param image_path: path to the image which is located on the frontend\n :param datasets_keys: Keys of Datasets that Task takes Scans from\n :param tags: Label Tags that will be created and assigned to Task\n :return: Task object\n \"\"\"\n return TasksRepository.add_task(key, name, image_path, datasets_keys, tags)\n", "path": "backend/medtagger/api/tasks/business.py"}], "after_files": [{"content": "\"\"\"Module responsible for definition of Tasks service available via HTTP REST API.\"\"\"\nfrom typing import Any\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom medtagger.api import api\nfrom medtagger.api.tasks import business, serializers\nfrom medtagger.api.security import login_required, role_required\nfrom medtagger.database.models import LabelTag\n\ntasks_ns = api.namespace('tasks', 'Methods related with tasks')\n\n\n@tasks_ns.route('')\nclass Tasks(Resource):\n \"\"\"Endpoint that manages tasks.\"\"\"\n\n @staticmethod\n @login_required\n @tasks_ns.marshal_with(serializers.out__task)\n @tasks_ns.doc(security='token')\n @tasks_ns.doc(description='Return all available tasks.')\n @tasks_ns.doc(responses={200: 'Success'})\n def get() -> Any:\n \"\"\"Return all available tasks.\"\"\"\n return business.get_tasks()\n\n @staticmethod\n @login_required\n @role_required('admin')\n @tasks_ns.expect(serializers.in__task)\n @tasks_ns.marshal_with(serializers.out__task)\n @tasks_ns.doc(security='token')\n @tasks_ns.doc(description='Create new Task.')\n @tasks_ns.doc(responses={201: 'Success'})\n def post() -> Any:\n \"\"\"Create new Task.\"\"\"\n payload = request.json\n\n key = payload['key']\n name = payload['name']\n image_path = payload['image_path']\n datasets_keys = payload['datasets_keys']\n tags = [LabelTag(tag['key'], tag['name'], tag['tools']) for tag in payload['tags']]\n return business.create_task(key, name, image_path, datasets_keys, tags), 201\n\n\n@tasks_ns.route('/<string:task_key>')\nclass Task(Resource):\n \"\"\"Endpoint that manages single task.\"\"\"\n\n @staticmethod\n @login_required\n @tasks_ns.marshal_with(serializers.out__task)\n @tasks_ns.doc(security='token')\n @tasks_ns.doc(description='Get task for given key.')\n @tasks_ns.doc(responses={200: 'Success', 404: 'Could not find task'})\n def get(task_key: str) -> Any:\n \"\"\"Return task for given key.\"\"\"\n return business.get_task_for_key(task_key)\n", "path": "backend/medtagger/api/tasks/service_rest.py"}, {"content": "\"\"\"Module responsible for business logic in all Tasks endpoints.\"\"\"\nfrom typing import List\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom medtagger.api.exceptions import NotFoundException\nfrom medtagger.database.models import Task, LabelTag\nfrom medtagger.repositories import (\n tasks as TasksRepository,\n)\n\n\ndef get_tasks() -> List[Task]:\n \"\"\"Fetch all tasks.\n\n :return: list of tasks\n \"\"\"\n return TasksRepository.get_all_tasks()\n\n\ndef get_task_for_key(task_key: str) -> Task:\n \"\"\"Fetch Task for given key.\n\n :return: Task\n \"\"\"\n try:\n return TasksRepository.get_task_by_key(task_key)\n except NoResultFound:\n raise NotFoundException('Did not found task for {} key!'.format(task_key))\n\n\ndef create_task(key: str, name: str, image_path: str, datasets_keys: List[str], tags: List[LabelTag]) -> Task:\n \"\"\"Create new Task.\n\n :param key: unique key representing Task\n :param name: name which describes this Task\n :param image_path: path to the image which is located on the frontend\n :param datasets_keys: Keys of Datasets that Task takes Scans from\n :param tags: Label Tags that will be created and assigned to Task\n :return: Task object\n \"\"\"\n return TasksRepository.add_task(key, name, image_path, datasets_keys, tags)\n", "path": "backend/medtagger/api/tasks/business.py"}]}
| 1,140 | 526 |
gh_patches_debug_7569
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1881
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parallel execution ran into "patch does not apply"
I modified a bunch of repos using `pre-commit` in parallel and ran `git commit` at the same time, with unstaged changes. The pre-commit processes did `[WARNING] Unstaged files detected.`, stashed the changes in a pach, ran, and then tried to reapply the patches.
Some repos failed with:
```
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
An unexpected error has occurred: CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')
return code: 1
expected return code: 0
stdout: (none)
stderr:
error: patch failed: .github/workflows/main.yml:21
error: .github/workflows/main.yml: patch does not apply
Check the log at /Users/chainz/.cache/pre-commit/pre-commit.log
```
It looks like this is due to use of the unix timestamp as the only differentiator in patch file paths, causing the parallely-created patches to clobber each other.
`pre-commit.log` says:
### version information
```
pre-commit version: 2.12.0
sys.version:
3.9.4 (default, Apr 5 2021, 01:49:30)
[Clang 12.0.0 (clang-1200.0.32.29)]
sys.executable: /usr/local/Cellar/pre-commit/2.12.0/libexec/bin/python3
os.name: posix
sys.platform: darwin
```
### error information
```
An unexpected error has occurred: CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')
return code: 1
expected return code: 0
stdout: (none)
stderr:
error: patch failed: .github/workflows/main.yml:21
error: .github/workflows/main.yml: patch does not apply
```
```
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 20, in _git_apply
cmd_output_b('git', *args)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')
return code: 1
expected return code: 0
stdout: (none)
stderr:
error: patch failed: .github/workflows/main.yml:21
error: .github/workflows/main.yml: patch does not apply
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 68, in _unstaged_changes_cleared
_git_apply(patch_filename)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 23, in _git_apply
cmd_output_b('git', '-c', 'core.autocrlf=false', *args)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')
return code: 1
expected return code: 0
stdout: (none)
stderr:
error: patch failed: .github/workflows/main.yml:21
error: .github/workflows/main.yml: patch does not apply
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 20, in _git_apply
cmd_output_b('git', *args)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')
return code: 1
expected return code: 0
stdout: (none)
stderr:
error: patch failed: .github/workflows/main.yml:21
error: .github/workflows/main.yml: patch does not apply
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 65, in error_handler
yield
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 357, in main
return hook_impl(
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/commands/hook_impl.py", line 227, in hook_impl
return retv | run(config, store, ns)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 408, in run
return _run_hooks(config, hooks, args, environ)
File "/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py", line 513, in __exit__
raise exc_details[1]
File "/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py", line 498, in __exit__
if cb(*exc_details):
File "/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py", line 124, in __exit__
next(self.gen)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 93, in staged_files_only
yield
File "/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py", line 124, in __exit__
next(self.gen)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 78, in _unstaged_changes_cleared
_git_apply(patch_filename)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py", line 23, in _git_apply
cmd_output_b('git', '-c', 'core.autocrlf=false', *args)
File "/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')
return code: 1
expected return code: 0
stdout: (none)
stderr:
error: patch failed: .github/workflows/main.yml:21
error: .github/workflows/main.yml: patch does not apply
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/staged_files_only.py`
Content:
```
1 import contextlib
2 import logging
3 import os.path
4 import time
5 from typing import Generator
6
7 from pre_commit import git
8 from pre_commit.util import CalledProcessError
9 from pre_commit.util import cmd_output
10 from pre_commit.util import cmd_output_b
11 from pre_commit.xargs import xargs
12
13
14 logger = logging.getLogger('pre_commit')
15
16
17 def _git_apply(patch: str) -> None:
18 args = ('apply', '--whitespace=nowarn', patch)
19 try:
20 cmd_output_b('git', *args)
21 except CalledProcessError:
22 # Retry with autocrlf=false -- see #570
23 cmd_output_b('git', '-c', 'core.autocrlf=false', *args)
24
25
26 @contextlib.contextmanager
27 def _intent_to_add_cleared() -> Generator[None, None, None]:
28 intent_to_add = git.intent_to_add_files()
29 if intent_to_add:
30 logger.warning('Unstaged intent-to-add files detected.')
31
32 xargs(('git', 'rm', '--cached', '--'), intent_to_add)
33 try:
34 yield
35 finally:
36 xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)
37 else:
38 yield
39
40
41 @contextlib.contextmanager
42 def _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:
43 tree = cmd_output('git', 'write-tree')[1].strip()
44 retcode, diff_stdout_binary, _ = cmd_output_b(
45 'git', 'diff-index', '--ignore-submodules', '--binary',
46 '--exit-code', '--no-color', '--no-ext-diff', tree, '--',
47 retcode=None,
48 )
49 if retcode and diff_stdout_binary.strip():
50 patch_filename = f'patch{int(time.time())}'
51 patch_filename = os.path.join(patch_dir, patch_filename)
52 logger.warning('Unstaged files detected.')
53 logger.info(f'Stashing unstaged files to {patch_filename}.')
54 # Save the current unstaged changes as a patch
55 os.makedirs(patch_dir, exist_ok=True)
56 with open(patch_filename, 'wb') as patch_file:
57 patch_file.write(diff_stdout_binary)
58
59 # prevent recursive post-checkout hooks (#1418)
60 no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')
61 cmd_output_b('git', 'checkout', '--', '.', env=no_checkout_env)
62
63 try:
64 yield
65 finally:
66 # Try to apply the patch we saved
67 try:
68 _git_apply(patch_filename)
69 except CalledProcessError:
70 logger.warning(
71 'Stashed changes conflicted with hook auto-fixes... '
72 'Rolling back fixes...',
73 )
74 # We failed to apply the patch, presumably due to fixes made
75 # by hooks.
76 # Roll back the changes made by hooks.
77 cmd_output_b('git', 'checkout', '--', '.', env=no_checkout_env)
78 _git_apply(patch_filename)
79
80 logger.info(f'Restored changes from {patch_filename}.')
81 else:
82 # There weren't any staged files so we don't need to do anything
83 # special
84 yield
85
86
87 @contextlib.contextmanager
88 def staged_files_only(patch_dir: str) -> Generator[None, None, None]:
89 """Clear any unstaged changes from the git working directory inside this
90 context.
91 """
92 with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):
93 yield
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -47,7 +47,7 @@
retcode=None,
)
if retcode and diff_stdout_binary.strip():
- patch_filename = f'patch{int(time.time())}'
+ patch_filename = f'patch{int(time.time())}-{os.getpid()}'
patch_filename = os.path.join(patch_dir, patch_filename)
logger.warning('Unstaged files detected.')
logger.info(f'Stashing unstaged files to {patch_filename}.')
|
{"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -47,7 +47,7 @@\n retcode=None,\n )\n if retcode and diff_stdout_binary.strip():\n- patch_filename = f'patch{int(time.time())}'\n+ patch_filename = f'patch{int(time.time())}-{os.getpid()}'\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(f'Stashing unstaged files to {patch_filename}.')\n", "issue": "Parallel execution ran into \"patch does not apply\"\nI modified a bunch of repos using `pre-commit` in parallel and ran `git commit` at the same time, with unstaged changes. The pre-commit processes did `[WARNING] Unstaged files detected.`, stashed the changes in a pach, ran, and then tried to reapply the patches.\r\n\r\nSome repos failed with:\r\n\r\n\r\n```\r\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\r\nAn unexpected error has occurred: CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n error: patch failed: .github/workflows/main.yml:21\r\n error: .github/workflows/main.yml: patch does not apply\r\n\r\nCheck the log at /Users/chainz/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\nIt looks like this is due to use of the unix timestamp as the only differentiator in patch file paths, causing the parallely-created patches to clobber each other.\r\n\r\n`pre-commit.log` says:\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.12.0\r\nsys.version:\r\n 3.9.4 (default, Apr 5 2021, 01:49:30) \r\n [Clang 12.0.0 (clang-1200.0.32.29)]\r\nsys.executable: /usr/local/Cellar/pre-commit/2.12.0/libexec/bin/python3\r\nos.name: posix\r\nsys.platform: darwin\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n error: patch failed: .github/workflows/main.yml:21\r\n error: .github/workflows/main.yml: patch does not apply\r\n \r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 20, in _git_apply\r\n cmd_output_b('git', *args)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n error: patch failed: .github/workflows/main.yml:21\r\n error: .github/workflows/main.yml: patch does not apply\r\n \r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 68, in _unstaged_changes_cleared\r\n _git_apply(patch_filename)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 23, in _git_apply\r\n cmd_output_b('git', '-c', 'core.autocrlf=false', *args)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n error: patch failed: .github/workflows/main.yml:21\r\n error: .github/workflows/main.yml: patch does not apply\r\n \r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 20, in _git_apply\r\n cmd_output_b('git', *args)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n error: patch failed: .github/workflows/main.yml:21\r\n error: .github/workflows/main.yml: patch does not apply\r\n \r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py\", line 65, in error_handler\r\n yield\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/main.py\", line 357, in main\r\n return hook_impl(\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/commands/hook_impl.py\", line 227, in hook_impl\r\n return retv | run(config, store, ns)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py\", line 408, in run\r\n return _run_hooks(config, hooks, args, environ)\r\n File \"/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py\", line 513, in __exit__\r\n raise exc_details[1]\r\n File \"/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py\", line 498, in __exit__\r\n if cb(*exc_details):\r\n File \"/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py\", line 124, in __exit__\r\n next(self.gen)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 93, in staged_files_only\r\n yield\r\n File \"/usr/local/Cellar/[email protected]/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/contextlib.py\", line 124, in __exit__\r\n next(self.gen)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 78, in _unstaged_changes_cleared\r\n _git_apply(patch_filename)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/staged_files_only.py\", line 23, in _git_apply\r\n cmd_output_b('git', '-c', 'core.autocrlf=false', *args)\r\n File \"/usr/local/Cellar/pre-commit/2.12.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/Cellar/git/2.31.1/libexec/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/Users/chainz/.cache/pre-commit/patch1618586253')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n error: patch failed: .github/workflows/main.yml:21\r\n error: .github/workflows/main.yml: patch does not apply\r\n```\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os.path\nimport time\nfrom typing import Generator\n\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.xargs import xargs\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch: str) -> None:\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output_b('git', *args)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output_b('git', '-c', 'core.autocrlf=false', *args)\n\n\[email protected]\ndef _intent_to_add_cleared() -> Generator[None, None, None]:\n intent_to_add = git.intent_to_add_files()\n if intent_to_add:\n logger.warning('Unstaged intent-to-add files detected.')\n\n xargs(('git', 'rm', '--cached', '--'), intent_to_add)\n try:\n yield\n finally:\n xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)\n else:\n yield\n\n\[email protected]\ndef _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output_b(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = f'patch{int(time.time())}'\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(f'Stashing unstaged files to {patch_filename}.')\n # Save the current unstaged changes as a patch\n os.makedirs(patch_dir, exist_ok=True)\n with open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # prevent recursive post-checkout hooks (#1418)\n no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')\n cmd_output_b('git', 'checkout', '--', '.', env=no_checkout_env)\n\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output_b('git', 'checkout', '--', '.', env=no_checkout_env)\n _git_apply(patch_filename)\n\n logger.info(f'Restored changes from {patch_filename}.')\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n\n\[email protected]\ndef staged_files_only(patch_dir: str) -> Generator[None, None, None]:\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "import contextlib\nimport logging\nimport os.path\nimport time\nfrom typing import Generator\n\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.xargs import xargs\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch: str) -> None:\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output_b('git', *args)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output_b('git', '-c', 'core.autocrlf=false', *args)\n\n\[email protected]\ndef _intent_to_add_cleared() -> Generator[None, None, None]:\n intent_to_add = git.intent_to_add_files()\n if intent_to_add:\n logger.warning('Unstaged intent-to-add files detected.')\n\n xargs(('git', 'rm', '--cached', '--'), intent_to_add)\n try:\n yield\n finally:\n xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)\n else:\n yield\n\n\[email protected]\ndef _unstaged_changes_cleared(patch_dir: str) -> Generator[None, None, None]:\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output_b(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = f'patch{int(time.time())}-{os.getpid()}'\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(f'Stashing unstaged files to {patch_filename}.')\n # Save the current unstaged changes as a patch\n os.makedirs(patch_dir, exist_ok=True)\n with open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # prevent recursive post-checkout hooks (#1418)\n no_checkout_env = dict(os.environ, _PRE_COMMIT_SKIP_POST_CHECKOUT='1')\n cmd_output_b('git', 'checkout', '--', '.', env=no_checkout_env)\n\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output_b('git', 'checkout', '--', '.', env=no_checkout_env)\n _git_apply(patch_filename)\n\n logger.info(f'Restored changes from {patch_filename}.')\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n\n\[email protected]\ndef staged_files_only(patch_dir: str) -> Generator[None, None, None]:\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):\n yield\n", "path": "pre_commit/staged_files_only.py"}]}
| 3,338 | 141 |
gh_patches_debug_9568
|
rasdani/github-patches
|
git_diff
|
yt-project__yt-3532
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: wrong axis labels representation in SlicePlot for spherical data
### Bug report
**Bug summary**
There's an inconsistency in math representation (or lack thereof) of axis labels when performing SlicePlot along the "r" direction for spherical data.
**Code for reproduction**
```python
import yt
ds = yt.load_sample("bw_spherical_2d")
p = yt.SlicePlot(ds, "r", "density")
p.save("/tmp/bug.png")
```
**Actual outcome**

**Expected outcome**
I expect axis labels to use `\theta` and `\phi` latex glyphs instead of "theta" and "phi" strings, consistently with what happens when slicing along the "theta" direction, for instance

(here the frame is wrong, this was already reported as #3529)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/geometry/coordinates/spherical_coordinates.py`
Content:
```
1 import numpy as np
2
3 from yt.utilities.lib.pixelization_routines import pixelize_aitoff, pixelize_cylinder
4
5 from .coordinate_handler import CoordinateHandler, _get_coord_fields, _unknown_coord
6
7
8 class SphericalCoordinateHandler(CoordinateHandler):
9 name = "spherical"
10
11 def __init__(self, ds, ordering=("r", "theta", "phi")):
12 super().__init__(ds, ordering)
13 # Generate
14 self.image_units = {}
15 self.image_units[self.axis_id["r"]] = ("rad", "rad")
16 self.image_units[self.axis_id["theta"]] = (None, None)
17 self.image_units[self.axis_id["phi"]] = (None, None)
18
19 def setup_fields(self, registry):
20 # return the fields for r, z, theta
21 registry.add_field(
22 ("index", "dx"), sampling_type="cell", function=_unknown_coord
23 )
24
25 registry.add_field(
26 ("index", "dy"), sampling_type="cell", function=_unknown_coord
27 )
28
29 registry.add_field(
30 ("index", "dz"), sampling_type="cell", function=_unknown_coord
31 )
32
33 registry.add_field(
34 ("index", "x"), sampling_type="cell", function=_unknown_coord
35 )
36
37 registry.add_field(
38 ("index", "y"), sampling_type="cell", function=_unknown_coord
39 )
40
41 registry.add_field(
42 ("index", "z"), sampling_type="cell", function=_unknown_coord
43 )
44
45 f1, f2 = _get_coord_fields(self.axis_id["r"])
46 registry.add_field(
47 ("index", "dr"),
48 sampling_type="cell",
49 function=f1,
50 display_field=False,
51 units="code_length",
52 )
53
54 registry.add_field(
55 ("index", "r"),
56 sampling_type="cell",
57 function=f2,
58 display_field=False,
59 units="code_length",
60 )
61
62 f1, f2 = _get_coord_fields(self.axis_id["theta"], "")
63 registry.add_field(
64 ("index", "dtheta"),
65 sampling_type="cell",
66 function=f1,
67 display_field=False,
68 units="",
69 )
70
71 registry.add_field(
72 ("index", "theta"),
73 sampling_type="cell",
74 function=f2,
75 display_field=False,
76 units="",
77 )
78
79 f1, f2 = _get_coord_fields(self.axis_id["phi"], "")
80 registry.add_field(
81 ("index", "dphi"),
82 sampling_type="cell",
83 function=f1,
84 display_field=False,
85 units="",
86 )
87
88 registry.add_field(
89 ("index", "phi"),
90 sampling_type="cell",
91 function=f2,
92 display_field=False,
93 units="",
94 )
95
96 def _SphericalVolume(field, data):
97 # Here we compute the spherical volume element exactly
98 r = data["index", "r"]
99 dr = data["index", "dr"]
100 theta = data["index", "theta"]
101 dtheta = data["index", "dtheta"]
102 vol = ((r + 0.5 * dr) ** 3 - (r - 0.5 * dr) ** 3) / 3.0
103 vol *= np.cos(theta - 0.5 * dtheta) - np.cos(theta + 0.5 * dtheta)
104 vol *= data["index", "dphi"]
105 return vol
106
107 registry.add_field(
108 ("index", "cell_volume"),
109 sampling_type="cell",
110 function=_SphericalVolume,
111 units="code_length**3",
112 )
113 registry.alias(("index", "volume"), ("index", "cell_volume"))
114
115 def _path_r(field, data):
116 return data["index", "dr"]
117
118 registry.add_field(
119 ("index", "path_element_r"),
120 sampling_type="cell",
121 function=_path_r,
122 units="code_length",
123 )
124
125 def _path_theta(field, data):
126 # Note: this already assumes cell-centered
127 return data["index", "r"] * data["index", "dtheta"]
128
129 registry.add_field(
130 ("index", "path_element_theta"),
131 sampling_type="cell",
132 function=_path_theta,
133 units="code_length",
134 )
135
136 def _path_phi(field, data):
137 # Note: this already assumes cell-centered
138 return (
139 data["index", "r"]
140 * data["index", "dphi"]
141 * np.sin(data["index", "theta"])
142 )
143
144 registry.add_field(
145 ("index", "path_element_phi"),
146 sampling_type="cell",
147 function=_path_phi,
148 units="code_length",
149 )
150
151 def pixelize(
152 self, dimension, data_source, field, bounds, size, antialias=True, periodic=True
153 ):
154 self.period
155 name = self.axis_name[dimension]
156 if name == "r":
157 return self._ortho_pixelize(
158 data_source, field, bounds, size, antialias, dimension, periodic
159 )
160 elif name in ("theta", "phi"):
161 return self._cyl_pixelize(
162 data_source, field, bounds, size, antialias, dimension
163 )
164 else:
165 raise NotImplementedError
166
167 def pixelize_line(self, field, start_point, end_point, npoints):
168 raise NotImplementedError
169
170 def _ortho_pixelize(
171 self, data_source, field, bounds, size, antialias, dim, periodic
172 ):
173 buff = pixelize_aitoff(
174 data_source["py"],
175 data_source["pdy"],
176 data_source["px"],
177 data_source["pdx"],
178 size,
179 data_source[field],
180 None,
181 None,
182 theta_offset=0,
183 phi_offset=0,
184 ).transpose()
185 return buff
186
187 def _cyl_pixelize(self, data_source, field, bounds, size, antialias, dimension):
188 name = self.axis_name[dimension]
189 buff = np.full((size[1], size[0]), np.nan, dtype="f8")
190 if name == "theta":
191 pixelize_cylinder(
192 buff,
193 data_source["px"],
194 data_source["pdx"],
195 data_source["py"],
196 data_source["pdy"],
197 data_source[field],
198 bounds,
199 )
200 elif name == "phi":
201 # Note that we feed in buff.T here
202 pixelize_cylinder(
203 buff.T,
204 data_source["px"],
205 data_source["pdx"],
206 data_source["py"],
207 data_source["pdy"],
208 data_source[field],
209 bounds,
210 )
211 else:
212 raise RuntimeError
213 self.sanitize_buffer_fill_values(buff)
214 return buff
215
216 def convert_from_cartesian(self, coord):
217 raise NotImplementedError
218
219 def convert_to_cartesian(self, coord):
220 if isinstance(coord, np.ndarray) and len(coord.shape) > 1:
221 ri = self.axis_id["r"]
222 thetai = self.axis_id["theta"]
223 phii = self.axis_id["phi"]
224 r = coord[:, ri]
225 theta = coord[:, thetai]
226 phi = coord[:, phii]
227 nc = np.zeros_like(coord)
228 # r, theta, phi
229 nc[:, ri] = np.cos(phi) * np.sin(theta) * r
230 nc[:, thetai] = np.sin(phi) * np.sin(theta) * r
231 nc[:, phii] = np.cos(theta) * r
232 else:
233 r, theta, phi = coord
234 nc = (
235 np.cos(phi) * np.sin(theta) * r,
236 np.sin(phi) * np.sin(theta) * r,
237 np.cos(theta) * r,
238 )
239 return nc
240
241 def convert_to_cylindrical(self, coord):
242 raise NotImplementedError
243
244 def convert_from_cylindrical(self, coord):
245 raise NotImplementedError
246
247 def convert_to_spherical(self, coord):
248 raise NotImplementedError
249
250 def convert_from_spherical(self, coord):
251 raise NotImplementedError
252
253 _image_axis_name = None
254
255 @property
256 def image_axis_name(self):
257 if self._image_axis_name is not None:
258 return self._image_axis_name
259 # This is the x and y axes labels that get displayed. For
260 # non-Cartesian coordinates, we usually want to override these for
261 # Cartesian coordinates, since we transform them.
262 rv = {
263 self.axis_id["r"]: ("theta", "phi"),
264 self.axis_id["theta"]: ("x / \\sin(\\theta)", "y / \\sin(\\theta)"),
265 self.axis_id["phi"]: ("R", "z"),
266 }
267 for i in list(rv.keys()):
268 rv[self.axis_name[i]] = rv[i]
269 rv[self.axis_name[i].capitalize()] = rv[i]
270 self._image_axis_name = rv
271 return rv
272
273 _x_pairs = (("r", "theta"), ("theta", "r"), ("phi", "r"))
274 _y_pairs = (("r", "phi"), ("theta", "phi"), ("phi", "theta"))
275
276 @property
277 def period(self):
278 return self.ds.domain_width
279
280 def sanitize_center(self, center, axis):
281 center, display_center = super().sanitize_center(center, axis)
282 name = self.axis_name[axis]
283 if name == "r":
284 display_center = center
285 elif name == "theta":
286 display_center = (
287 0.0 * display_center[0],
288 0.0 * display_center[1],
289 0.0 * display_center[2],
290 )
291 elif name == "phi":
292 display_center = [
293 self.ds.domain_width[0] / 2.0 + self.ds.domain_left_edge[0],
294 0.0 * display_center[1],
295 0.0 * display_center[2],
296 ]
297 ri = self.axis_id["r"]
298 c = self.ds.domain_width[ri] / 2.0 + self.ds.domain_left_edge[ri]
299 display_center[ri] = c
300 display_center = tuple(display_center)
301 return center, display_center
302
303 def sanitize_width(self, axis, width, depth):
304 name = self.axis_name[axis]
305 if width is not None:
306 width = super().sanitize_width(axis, width, depth)
307 elif name == "r":
308 width = [
309 self.ds.domain_width[self.x_axis["r"]],
310 self.ds.domain_width[self.y_axis["r"]],
311 ]
312 elif name == "theta":
313 ri = self.axis_id["r"]
314 # Remember, in spherical coordinates when we cut in theta,
315 # we create a conic section
316 width = [2.0 * self.ds.domain_width[ri], 2.0 * self.ds.domain_width[ri]]
317 elif name == "phi":
318 ri = self.axis_id["r"]
319 width = [self.ds.domain_right_edge[ri], 2.0 * self.ds.domain_width[ri]]
320 return width
321
322 def _sanity_check(self):
323 """This prints out a handful of diagnostics that help verify the
324 dataset is well formed."""
325 # We just check a few things here.
326 dd = self.ds.all_data()
327 r0 = self.ds.domain_left_edge[self.axis_id["r"]]
328 r1 = self.ds.domain_right_edge[self.axis_id["r"]]
329 v1 = 4.0 * np.pi / 3.0 * (r1 ** 3 - r0 ** 3)
330 print(f"Total volume should be 4*pi*r**3 = {v1:0.16e}")
331 v2 = dd.quantities.total_quantity("cell_volume")
332 print(f"Actual volume is {v2:0.16e}")
333 print(f"Relative difference: {np.abs(v2 - v1) / (v2 + v1):0.16e}")
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt/geometry/coordinates/spherical_coordinates.py b/yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -260,7 +260,7 @@
# non-Cartesian coordinates, we usually want to override these for
# Cartesian coordinates, since we transform them.
rv = {
- self.axis_id["r"]: ("theta", "phi"),
+ self.axis_id["r"]: ("\\theta", "\\phi"),
self.axis_id["theta"]: ("x / \\sin(\\theta)", "y / \\sin(\\theta)"),
self.axis_id["phi"]: ("R", "z"),
}
|
{"golden_diff": "diff --git a/yt/geometry/coordinates/spherical_coordinates.py b/yt/geometry/coordinates/spherical_coordinates.py\n--- a/yt/geometry/coordinates/spherical_coordinates.py\n+++ b/yt/geometry/coordinates/spherical_coordinates.py\n@@ -260,7 +260,7 @@\n # non-Cartesian coordinates, we usually want to override these for\n # Cartesian coordinates, since we transform them.\n rv = {\n- self.axis_id[\"r\"]: (\"theta\", \"phi\"),\n+ self.axis_id[\"r\"]: (\"\\\\theta\", \"\\\\phi\"),\n self.axis_id[\"theta\"]: (\"x / \\\\sin(\\\\theta)\", \"y / \\\\sin(\\\\theta)\"),\n self.axis_id[\"phi\"]: (\"R\", \"z\"),\n }\n", "issue": "BUG: wrong axis labels representation in SlicePlot for spherical data\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nThere's an inconsistency in math representation (or lack thereof) of axis labels when performing SlicePlot along the \"r\" direction for spherical data.\r\n\r\n**Code for reproduction**\r\n\r\n\r\n```python\r\nimport yt\r\nds = yt.load_sample(\"bw_spherical_2d\")\r\np = yt.SlicePlot(ds, \"r\", \"density\")\r\np.save(\"/tmp/bug.png\")\r\n```\r\n\r\n**Actual outcome**\r\n\r\n\r\n\r\n**Expected outcome**\r\n\r\nI expect axis labels to use `\\theta` and `\\phi` latex glyphs instead of \"theta\" and \"phi\" strings, consistently with what happens when slicing along the \"theta\" direction, for instance\r\n\r\n\r\n(here the frame is wrong, this was already reported as #3529)\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom yt.utilities.lib.pixelization_routines import pixelize_aitoff, pixelize_cylinder\n\nfrom .coordinate_handler import CoordinateHandler, _get_coord_fields, _unknown_coord\n\n\nclass SphericalCoordinateHandler(CoordinateHandler):\n name = \"spherical\"\n\n def __init__(self, ds, ordering=(\"r\", \"theta\", \"phi\")):\n super().__init__(ds, ordering)\n # Generate\n self.image_units = {}\n self.image_units[self.axis_id[\"r\"]] = (\"rad\", \"rad\")\n self.image_units[self.axis_id[\"theta\"]] = (None, None)\n self.image_units[self.axis_id[\"phi\"]] = (None, None)\n\n def setup_fields(self, registry):\n # return the fields for r, z, theta\n registry.add_field(\n (\"index\", \"dx\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"dy\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"dz\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"x\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"y\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"z\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n f1, f2 = _get_coord_fields(self.axis_id[\"r\"])\n registry.add_field(\n (\"index\", \"dr\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"code_length\",\n )\n\n registry.add_field(\n (\"index\", \"r\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"code_length\",\n )\n\n f1, f2 = _get_coord_fields(self.axis_id[\"theta\"], \"\")\n registry.add_field(\n (\"index\", \"dtheta\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"\",\n )\n\n registry.add_field(\n (\"index\", \"theta\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"\",\n )\n\n f1, f2 = _get_coord_fields(self.axis_id[\"phi\"], \"\")\n registry.add_field(\n (\"index\", \"dphi\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"\",\n )\n\n registry.add_field(\n (\"index\", \"phi\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"\",\n )\n\n def _SphericalVolume(field, data):\n # Here we compute the spherical volume element exactly\n r = data[\"index\", \"r\"]\n dr = data[\"index\", \"dr\"]\n theta = data[\"index\", \"theta\"]\n dtheta = data[\"index\", \"dtheta\"]\n vol = ((r + 0.5 * dr) ** 3 - (r - 0.5 * dr) ** 3) / 3.0\n vol *= np.cos(theta - 0.5 * dtheta) - np.cos(theta + 0.5 * dtheta)\n vol *= data[\"index\", \"dphi\"]\n return vol\n\n registry.add_field(\n (\"index\", \"cell_volume\"),\n sampling_type=\"cell\",\n function=_SphericalVolume,\n units=\"code_length**3\",\n )\n registry.alias((\"index\", \"volume\"), (\"index\", \"cell_volume\"))\n\n def _path_r(field, data):\n return data[\"index\", \"dr\"]\n\n registry.add_field(\n (\"index\", \"path_element_r\"),\n sampling_type=\"cell\",\n function=_path_r,\n units=\"code_length\",\n )\n\n def _path_theta(field, data):\n # Note: this already assumes cell-centered\n return data[\"index\", \"r\"] * data[\"index\", \"dtheta\"]\n\n registry.add_field(\n (\"index\", \"path_element_theta\"),\n sampling_type=\"cell\",\n function=_path_theta,\n units=\"code_length\",\n )\n\n def _path_phi(field, data):\n # Note: this already assumes cell-centered\n return (\n data[\"index\", \"r\"]\n * data[\"index\", \"dphi\"]\n * np.sin(data[\"index\", \"theta\"])\n )\n\n registry.add_field(\n (\"index\", \"path_element_phi\"),\n sampling_type=\"cell\",\n function=_path_phi,\n units=\"code_length\",\n )\n\n def pixelize(\n self, dimension, data_source, field, bounds, size, antialias=True, periodic=True\n ):\n self.period\n name = self.axis_name[dimension]\n if name == \"r\":\n return self._ortho_pixelize(\n data_source, field, bounds, size, antialias, dimension, periodic\n )\n elif name in (\"theta\", \"phi\"):\n return self._cyl_pixelize(\n data_source, field, bounds, size, antialias, dimension\n )\n else:\n raise NotImplementedError\n\n def pixelize_line(self, field, start_point, end_point, npoints):\n raise NotImplementedError\n\n def _ortho_pixelize(\n self, data_source, field, bounds, size, antialias, dim, periodic\n ):\n buff = pixelize_aitoff(\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[\"px\"],\n data_source[\"pdx\"],\n size,\n data_source[field],\n None,\n None,\n theta_offset=0,\n phi_offset=0,\n ).transpose()\n return buff\n\n def _cyl_pixelize(self, data_source, field, bounds, size, antialias, dimension):\n name = self.axis_name[dimension]\n buff = np.full((size[1], size[0]), np.nan, dtype=\"f8\")\n if name == \"theta\":\n pixelize_cylinder(\n buff,\n data_source[\"px\"],\n data_source[\"pdx\"],\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[field],\n bounds,\n )\n elif name == \"phi\":\n # Note that we feed in buff.T here\n pixelize_cylinder(\n buff.T,\n data_source[\"px\"],\n data_source[\"pdx\"],\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[field],\n bounds,\n )\n else:\n raise RuntimeError\n self.sanitize_buffer_fill_values(buff)\n return buff\n\n def convert_from_cartesian(self, coord):\n raise NotImplementedError\n\n def convert_to_cartesian(self, coord):\n if isinstance(coord, np.ndarray) and len(coord.shape) > 1:\n ri = self.axis_id[\"r\"]\n thetai = self.axis_id[\"theta\"]\n phii = self.axis_id[\"phi\"]\n r = coord[:, ri]\n theta = coord[:, thetai]\n phi = coord[:, phii]\n nc = np.zeros_like(coord)\n # r, theta, phi\n nc[:, ri] = np.cos(phi) * np.sin(theta) * r\n nc[:, thetai] = np.sin(phi) * np.sin(theta) * r\n nc[:, phii] = np.cos(theta) * r\n else:\n r, theta, phi = coord\n nc = (\n np.cos(phi) * np.sin(theta) * r,\n np.sin(phi) * np.sin(theta) * r,\n np.cos(theta) * r,\n )\n return nc\n\n def convert_to_cylindrical(self, coord):\n raise NotImplementedError\n\n def convert_from_cylindrical(self, coord):\n raise NotImplementedError\n\n def convert_to_spherical(self, coord):\n raise NotImplementedError\n\n def convert_from_spherical(self, coord):\n raise NotImplementedError\n\n _image_axis_name = None\n\n @property\n def image_axis_name(self):\n if self._image_axis_name is not None:\n return self._image_axis_name\n # This is the x and y axes labels that get displayed. For\n # non-Cartesian coordinates, we usually want to override these for\n # Cartesian coordinates, since we transform them.\n rv = {\n self.axis_id[\"r\"]: (\"theta\", \"phi\"),\n self.axis_id[\"theta\"]: (\"x / \\\\sin(\\\\theta)\", \"y / \\\\sin(\\\\theta)\"),\n self.axis_id[\"phi\"]: (\"R\", \"z\"),\n }\n for i in list(rv.keys()):\n rv[self.axis_name[i]] = rv[i]\n rv[self.axis_name[i].capitalize()] = rv[i]\n self._image_axis_name = rv\n return rv\n\n _x_pairs = ((\"r\", \"theta\"), (\"theta\", \"r\"), (\"phi\", \"r\"))\n _y_pairs = ((\"r\", \"phi\"), (\"theta\", \"phi\"), (\"phi\", \"theta\"))\n\n @property\n def period(self):\n return self.ds.domain_width\n\n def sanitize_center(self, center, axis):\n center, display_center = super().sanitize_center(center, axis)\n name = self.axis_name[axis]\n if name == \"r\":\n display_center = center\n elif name == \"theta\":\n display_center = (\n 0.0 * display_center[0],\n 0.0 * display_center[1],\n 0.0 * display_center[2],\n )\n elif name == \"phi\":\n display_center = [\n self.ds.domain_width[0] / 2.0 + self.ds.domain_left_edge[0],\n 0.0 * display_center[1],\n 0.0 * display_center[2],\n ]\n ri = self.axis_id[\"r\"]\n c = self.ds.domain_width[ri] / 2.0 + self.ds.domain_left_edge[ri]\n display_center[ri] = c\n display_center = tuple(display_center)\n return center, display_center\n\n def sanitize_width(self, axis, width, depth):\n name = self.axis_name[axis]\n if width is not None:\n width = super().sanitize_width(axis, width, depth)\n elif name == \"r\":\n width = [\n self.ds.domain_width[self.x_axis[\"r\"]],\n self.ds.domain_width[self.y_axis[\"r\"]],\n ]\n elif name == \"theta\":\n ri = self.axis_id[\"r\"]\n # Remember, in spherical coordinates when we cut in theta,\n # we create a conic section\n width = [2.0 * self.ds.domain_width[ri], 2.0 * self.ds.domain_width[ri]]\n elif name == \"phi\":\n ri = self.axis_id[\"r\"]\n width = [self.ds.domain_right_edge[ri], 2.0 * self.ds.domain_width[ri]]\n return width\n\n def _sanity_check(self):\n \"\"\"This prints out a handful of diagnostics that help verify the\n dataset is well formed.\"\"\"\n # We just check a few things here.\n dd = self.ds.all_data()\n r0 = self.ds.domain_left_edge[self.axis_id[\"r\"]]\n r1 = self.ds.domain_right_edge[self.axis_id[\"r\"]]\n v1 = 4.0 * np.pi / 3.0 * (r1 ** 3 - r0 ** 3)\n print(f\"Total volume should be 4*pi*r**3 = {v1:0.16e}\")\n v2 = dd.quantities.total_quantity(\"cell_volume\")\n print(f\"Actual volume is {v2:0.16e}\")\n print(f\"Relative difference: {np.abs(v2 - v1) / (v2 + v1):0.16e}\")\n", "path": "yt/geometry/coordinates/spherical_coordinates.py"}], "after_files": [{"content": "import numpy as np\n\nfrom yt.utilities.lib.pixelization_routines import pixelize_aitoff, pixelize_cylinder\n\nfrom .coordinate_handler import CoordinateHandler, _get_coord_fields, _unknown_coord\n\n\nclass SphericalCoordinateHandler(CoordinateHandler):\n name = \"spherical\"\n\n def __init__(self, ds, ordering=(\"r\", \"theta\", \"phi\")):\n super().__init__(ds, ordering)\n # Generate\n self.image_units = {}\n self.image_units[self.axis_id[\"r\"]] = (\"rad\", \"rad\")\n self.image_units[self.axis_id[\"theta\"]] = (None, None)\n self.image_units[self.axis_id[\"phi\"]] = (None, None)\n\n def setup_fields(self, registry):\n # return the fields for r, z, theta\n registry.add_field(\n (\"index\", \"dx\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"dy\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"dz\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"x\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"y\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n registry.add_field(\n (\"index\", \"z\"), sampling_type=\"cell\", function=_unknown_coord\n )\n\n f1, f2 = _get_coord_fields(self.axis_id[\"r\"])\n registry.add_field(\n (\"index\", \"dr\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"code_length\",\n )\n\n registry.add_field(\n (\"index\", \"r\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"code_length\",\n )\n\n f1, f2 = _get_coord_fields(self.axis_id[\"theta\"], \"\")\n registry.add_field(\n (\"index\", \"dtheta\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"\",\n )\n\n registry.add_field(\n (\"index\", \"theta\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"\",\n )\n\n f1, f2 = _get_coord_fields(self.axis_id[\"phi\"], \"\")\n registry.add_field(\n (\"index\", \"dphi\"),\n sampling_type=\"cell\",\n function=f1,\n display_field=False,\n units=\"\",\n )\n\n registry.add_field(\n (\"index\", \"phi\"),\n sampling_type=\"cell\",\n function=f2,\n display_field=False,\n units=\"\",\n )\n\n def _SphericalVolume(field, data):\n # Here we compute the spherical volume element exactly\n r = data[\"index\", \"r\"]\n dr = data[\"index\", \"dr\"]\n theta = data[\"index\", \"theta\"]\n dtheta = data[\"index\", \"dtheta\"]\n vol = ((r + 0.5 * dr) ** 3 - (r - 0.5 * dr) ** 3) / 3.0\n vol *= np.cos(theta - 0.5 * dtheta) - np.cos(theta + 0.5 * dtheta)\n vol *= data[\"index\", \"dphi\"]\n return vol\n\n registry.add_field(\n (\"index\", \"cell_volume\"),\n sampling_type=\"cell\",\n function=_SphericalVolume,\n units=\"code_length**3\",\n )\n registry.alias((\"index\", \"volume\"), (\"index\", \"cell_volume\"))\n\n def _path_r(field, data):\n return data[\"index\", \"dr\"]\n\n registry.add_field(\n (\"index\", \"path_element_r\"),\n sampling_type=\"cell\",\n function=_path_r,\n units=\"code_length\",\n )\n\n def _path_theta(field, data):\n # Note: this already assumes cell-centered\n return data[\"index\", \"r\"] * data[\"index\", \"dtheta\"]\n\n registry.add_field(\n (\"index\", \"path_element_theta\"),\n sampling_type=\"cell\",\n function=_path_theta,\n units=\"code_length\",\n )\n\n def _path_phi(field, data):\n # Note: this already assumes cell-centered\n return (\n data[\"index\", \"r\"]\n * data[\"index\", \"dphi\"]\n * np.sin(data[\"index\", \"theta\"])\n )\n\n registry.add_field(\n (\"index\", \"path_element_phi\"),\n sampling_type=\"cell\",\n function=_path_phi,\n units=\"code_length\",\n )\n\n def pixelize(\n self, dimension, data_source, field, bounds, size, antialias=True, periodic=True\n ):\n self.period\n name = self.axis_name[dimension]\n if name == \"r\":\n return self._ortho_pixelize(\n data_source, field, bounds, size, antialias, dimension, periodic\n )\n elif name in (\"theta\", \"phi\"):\n return self._cyl_pixelize(\n data_source, field, bounds, size, antialias, dimension\n )\n else:\n raise NotImplementedError\n\n def pixelize_line(self, field, start_point, end_point, npoints):\n raise NotImplementedError\n\n def _ortho_pixelize(\n self, data_source, field, bounds, size, antialias, dim, periodic\n ):\n buff = pixelize_aitoff(\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[\"px\"],\n data_source[\"pdx\"],\n size,\n data_source[field],\n None,\n None,\n theta_offset=0,\n phi_offset=0,\n ).transpose()\n return buff\n\n def _cyl_pixelize(self, data_source, field, bounds, size, antialias, dimension):\n name = self.axis_name[dimension]\n buff = np.full((size[1], size[0]), np.nan, dtype=\"f8\")\n if name == \"theta\":\n pixelize_cylinder(\n buff,\n data_source[\"px\"],\n data_source[\"pdx\"],\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[field],\n bounds,\n )\n elif name == \"phi\":\n # Note that we feed in buff.T here\n pixelize_cylinder(\n buff.T,\n data_source[\"px\"],\n data_source[\"pdx\"],\n data_source[\"py\"],\n data_source[\"pdy\"],\n data_source[field],\n bounds,\n )\n else:\n raise RuntimeError\n self.sanitize_buffer_fill_values(buff)\n return buff\n\n def convert_from_cartesian(self, coord):\n raise NotImplementedError\n\n def convert_to_cartesian(self, coord):\n if isinstance(coord, np.ndarray) and len(coord.shape) > 1:\n ri = self.axis_id[\"r\"]\n thetai = self.axis_id[\"theta\"]\n phii = self.axis_id[\"phi\"]\n r = coord[:, ri]\n theta = coord[:, thetai]\n phi = coord[:, phii]\n nc = np.zeros_like(coord)\n # r, theta, phi\n nc[:, ri] = np.cos(phi) * np.sin(theta) * r\n nc[:, thetai] = np.sin(phi) * np.sin(theta) * r\n nc[:, phii] = np.cos(theta) * r\n else:\n r, theta, phi = coord\n nc = (\n np.cos(phi) * np.sin(theta) * r,\n np.sin(phi) * np.sin(theta) * r,\n np.cos(theta) * r,\n )\n return nc\n\n def convert_to_cylindrical(self, coord):\n raise NotImplementedError\n\n def convert_from_cylindrical(self, coord):\n raise NotImplementedError\n\n def convert_to_spherical(self, coord):\n raise NotImplementedError\n\n def convert_from_spherical(self, coord):\n raise NotImplementedError\n\n _image_axis_name = None\n\n @property\n def image_axis_name(self):\n if self._image_axis_name is not None:\n return self._image_axis_name\n # This is the x and y axes labels that get displayed. For\n # non-Cartesian coordinates, we usually want to override these for\n # Cartesian coordinates, since we transform them.\n rv = {\n self.axis_id[\"r\"]: (\"\\\\theta\", \"\\\\phi\"),\n self.axis_id[\"theta\"]: (\"x / \\\\sin(\\\\theta)\", \"y / \\\\sin(\\\\theta)\"),\n self.axis_id[\"phi\"]: (\"R\", \"z\"),\n }\n for i in list(rv.keys()):\n rv[self.axis_name[i]] = rv[i]\n rv[self.axis_name[i].capitalize()] = rv[i]\n self._image_axis_name = rv\n return rv\n\n _x_pairs = ((\"r\", \"theta\"), (\"theta\", \"r\"), (\"phi\", \"r\"))\n _y_pairs = ((\"r\", \"phi\"), (\"theta\", \"phi\"), (\"phi\", \"theta\"))\n\n @property\n def period(self):\n return self.ds.domain_width\n\n def sanitize_center(self, center, axis):\n center, display_center = super().sanitize_center(center, axis)\n name = self.axis_name[axis]\n if name == \"r\":\n display_center = center\n elif name == \"theta\":\n display_center = (\n 0.0 * display_center[0],\n 0.0 * display_center[1],\n 0.0 * display_center[2],\n )\n elif name == \"phi\":\n display_center = [\n self.ds.domain_width[0] / 2.0 + self.ds.domain_left_edge[0],\n 0.0 * display_center[1],\n 0.0 * display_center[2],\n ]\n ri = self.axis_id[\"r\"]\n c = self.ds.domain_width[ri] / 2.0 + self.ds.domain_left_edge[ri]\n display_center[ri] = c\n display_center = tuple(display_center)\n return center, display_center\n\n def sanitize_width(self, axis, width, depth):\n name = self.axis_name[axis]\n if width is not None:\n width = super().sanitize_width(axis, width, depth)\n elif name == \"r\":\n width = [\n self.ds.domain_width[self.x_axis[\"r\"]],\n self.ds.domain_width[self.y_axis[\"r\"]],\n ]\n elif name == \"theta\":\n ri = self.axis_id[\"r\"]\n # Remember, in spherical coordinates when we cut in theta,\n # we create a conic section\n width = [2.0 * self.ds.domain_width[ri], 2.0 * self.ds.domain_width[ri]]\n elif name == \"phi\":\n ri = self.axis_id[\"r\"]\n width = [self.ds.domain_right_edge[ri], 2.0 * self.ds.domain_width[ri]]\n return width\n\n def _sanity_check(self):\n \"\"\"This prints out a handful of diagnostics that help verify the\n dataset is well formed.\"\"\"\n # We just check a few things here.\n dd = self.ds.all_data()\n r0 = self.ds.domain_left_edge[self.axis_id[\"r\"]]\n r1 = self.ds.domain_right_edge[self.axis_id[\"r\"]]\n v1 = 4.0 * np.pi / 3.0 * (r1 ** 3 - r0 ** 3)\n print(f\"Total volume should be 4*pi*r**3 = {v1:0.16e}\")\n v2 = dd.quantities.total_quantity(\"cell_volume\")\n print(f\"Actual volume is {v2:0.16e}\")\n print(f\"Relative difference: {np.abs(v2 - v1) / (v2 + v1):0.16e}\")\n", "path": "yt/geometry/coordinates/spherical_coordinates.py"}]}
| 4,007 | 166 |
gh_patches_debug_16371
|
rasdani/github-patches
|
git_diff
|
encode__starlette-163
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Routes that were working on 0.5.5 not working on 0.6.2
Greetings, thanks for the project.
I am having issues after upgrading where it seems one of my routes is getting converted to a coroutine somewhere and not awaited and then is being passed down all the middleware.
```
Traceback (most recent call last):
File "../lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 387, in run_asgi
result = await asgi(self.receive, self.send)
File "../lib/python3.7/site-packages/uvicorn/middleware/message_logger.py", line 59, in __call__
await self.inner(self.receive, self.send)
File "../lib/python3.7/site-packages/uvicorn/middleware/debug.py", line 80, in __call__
await asgi(receive, self.send)
File "../lib/python3.7/site-packages/starlette/exceptions.py", line 69, in app
raise exc from None
File "../lib/python3.7/site-packages/starlette/exceptions.py", line 61, in app
await instance(receive, sender)
TypeError: 'coroutine' object is not callable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/routing.py`
Content:
```
1 import asyncio
2 import inspect
3 import re
4 import typing
5 from concurrent.futures import ThreadPoolExecutor
6
7 from starlette.datastructures import URL
8 from starlette.exceptions import HTTPException
9 from starlette.graphql import GraphQLApp
10 from starlette.requests import Request
11 from starlette.responses import PlainTextResponse
12 from starlette.types import ASGIApp, ASGIInstance, Receive, Scope, Send
13 from starlette.websockets import WebSocket, WebSocketClose
14
15
16 class NoMatchFound(Exception):
17 pass
18
19
20 def request_response(func: typing.Callable) -> ASGIApp:
21 """
22 Takes a function or coroutine `func(request) -> response`,
23 and returns an ASGI application.
24 """
25 is_coroutine = asyncio.iscoroutinefunction(func)
26
27 def app(scope: Scope) -> ASGIInstance:
28 async def awaitable(receive: Receive, send: Send) -> None:
29 request = Request(scope, receive=receive)
30 if is_coroutine:
31 response = await func(request)
32 else:
33 response = func(request)
34 await response(receive, send)
35
36 return awaitable
37
38 return app
39
40
41 def websocket_session(func: typing.Callable) -> ASGIApp:
42 """
43 Takes a coroutine `func(session)`, and returns an ASGI application.
44 """
45
46 def app(scope: Scope) -> ASGIInstance:
47 async def awaitable(receive: Receive, send: Send) -> None:
48 session = WebSocket(scope, receive=receive, send=send)
49 await func(session)
50
51 return awaitable
52
53 return app
54
55
56 def get_name(endpoint: typing.Callable) -> str:
57 if inspect.isfunction(endpoint) or inspect.isclass(endpoint):
58 return endpoint.__name__
59 return endpoint.__class__.__name__
60
61
62 def replace_params(path: str, **path_params: str) -> str:
63 for key, value in path_params.items():
64 path = path.replace("{" + key + "}", value)
65 return path
66
67
68 class BaseRoute:
69 def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:
70 raise NotImplementedError() # pragma: no cover
71
72 def url_path_for(self, name: str, **path_params: str) -> URL:
73 raise NotImplementedError() # pragma: no cover
74
75 def __call__(self, scope: Scope) -> ASGIInstance:
76 raise NotImplementedError() # pragma: no cover
77
78
79 class Route(BaseRoute):
80 def __init__(
81 self, path: str, *, endpoint: typing.Callable, methods: typing.List[str] = None
82 ) -> None:
83 self.path = path
84 self.endpoint = endpoint
85 self.name = get_name(endpoint)
86
87 if inspect.isfunction(endpoint):
88 self.app = request_response(endpoint)
89 if methods is None:
90 methods = ["GET"]
91 else:
92 self.app = endpoint
93
94 self.methods = methods
95 regex = "^" + path + "$"
96 regex = re.sub("{([a-zA-Z_][a-zA-Z0-9_]*)}", r"(?P<\1>[^/]+)", regex)
97 self.path_regex = re.compile(regex)
98 self.param_names = set(self.path_regex.groupindex.keys())
99
100 def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:
101 if scope["type"] == "http":
102 match = self.path_regex.match(scope["path"])
103 if match:
104 path_params = dict(scope.get("path_params", {}))
105 path_params.update(match.groupdict())
106 child_scope = dict(scope)
107 child_scope["path_params"] = path_params
108 return True, child_scope
109 return False, {}
110
111 def url_path_for(self, name: str, **path_params: str) -> URL:
112 if name != self.name or self.param_names != set(path_params.keys()):
113 raise NoMatchFound()
114 return URL(scheme="http", path=replace_params(self.path, **path_params))
115
116 def __call__(self, scope: Scope) -> ASGIInstance:
117 if self.methods and scope["method"] not in self.methods:
118 if "app" in scope:
119 raise HTTPException(status_code=405)
120 return PlainTextResponse("Method Not Allowed", status_code=405)
121 return self.app(scope)
122
123 def __eq__(self, other: typing.Any) -> bool:
124 return (
125 isinstance(other, Route)
126 and self.path == other.path
127 and self.endpoint == other.endpoint
128 and self.methods == other.methods
129 )
130
131
132 class WebSocketRoute(BaseRoute):
133 def __init__(self, path: str, *, endpoint: typing.Callable) -> None:
134 self.path = path
135 self.endpoint = endpoint
136 self.name = get_name(endpoint)
137
138 if inspect.isfunction(endpoint):
139 self.app = websocket_session(endpoint)
140 else:
141 self.app = endpoint
142
143 regex = "^" + path + "$"
144 regex = re.sub("{([a-zA-Z_][a-zA-Z0-9_]*)}", r"(?P<\1>[^/]+)", regex)
145 self.path_regex = re.compile(regex)
146 self.param_names = set(self.path_regex.groupindex.keys())
147
148 def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:
149 if scope["type"] == "websocket":
150 match = self.path_regex.match(scope["path"])
151 if match:
152 path_params = dict(scope.get("path_params", {}))
153 path_params.update(match.groupdict())
154 child_scope = dict(scope)
155 child_scope["path_params"] = path_params
156 return True, child_scope
157 return False, {}
158
159 def url_path_for(self, name: str, **path_params: str) -> URL:
160 if name != self.name or self.param_names != set(path_params.keys()):
161 raise NoMatchFound()
162 return URL(scheme="ws", path=replace_params(self.path, **path_params))
163
164 def __call__(self, scope: Scope) -> ASGIInstance:
165 return self.app(scope)
166
167 def __eq__(self, other: typing.Any) -> bool:
168 return (
169 isinstance(other, WebSocketRoute)
170 and self.path == other.path
171 and self.endpoint == other.endpoint
172 )
173
174
175 class Mount(BaseRoute):
176 def __init__(self, path: str, app: ASGIApp) -> None:
177 self.path = path
178 self.app = app
179 regex = "^" + path
180 regex = re.sub("{([a-zA-Z_][a-zA-Z0-9_]*)}", r"(?P<\1>[^/]*)", regex)
181 self.path_regex = re.compile(regex)
182
183 @property
184 def routes(self) -> typing.List[BaseRoute]:
185 return getattr(self.app, "routes", None)
186
187 def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:
188 match = self.path_regex.match(scope["path"])
189 if match:
190 path_params = dict(scope.get("path_params", {}))
191 path_params.update(match.groupdict())
192 child_scope = dict(scope)
193 child_scope["path_params"] = path_params
194 child_scope["root_path"] = scope.get("root_path", "") + match.string
195 child_scope["path"] = scope["path"][match.span()[1] :]
196 return True, child_scope
197 return False, {}
198
199 def url_path_for(self, name: str, **path_params: str) -> URL:
200 for route in self.routes or []:
201 try:
202 url = route.url_path_for(name, **path_params)
203 return URL(scheme=url.scheme, path=self.path + url.path)
204 except NoMatchFound as exc:
205 pass
206 raise NoMatchFound()
207
208 def __call__(self, scope: Scope) -> ASGIInstance:
209 return self.app(scope)
210
211 def __eq__(self, other: typing.Any) -> bool:
212 return (
213 isinstance(other, Mount)
214 and self.path == other.path
215 and self.app == other.app
216 )
217
218
219 class Router:
220 def __init__(
221 self, routes: typing.List[BaseRoute] = None, default: ASGIApp = None
222 ) -> None:
223 self.routes = [] if routes is None else routes
224 self.default = self.not_found if default is None else default
225
226 def mount(self, path: str, app: ASGIApp) -> None:
227 route = Mount(path, app=app)
228 self.routes.append(route)
229
230 def add_route(
231 self, path: str, endpoint: typing.Callable, methods: typing.List[str] = None
232 ) -> None:
233 route = Route(path, endpoint=endpoint, methods=methods)
234 self.routes.append(route)
235
236 def add_graphql_route(
237 self, path: str, schema: typing.Any, executor: typing.Any = None
238 ) -> None:
239 app = GraphQLApp(schema=schema, executor=executor)
240 self.add_route(path, endpoint=app)
241
242 def add_websocket_route(self, path: str, endpoint: typing.Callable) -> None:
243 route = WebSocketRoute(path, endpoint=endpoint)
244 self.routes.append(route)
245
246 def route(self, path: str, methods: typing.List[str] = None) -> typing.Callable:
247 def decorator(func: typing.Callable) -> typing.Callable:
248 self.add_route(path, func, methods=methods)
249 return func
250
251 return decorator
252
253 def websocket_route(self, path: str) -> typing.Callable:
254 def decorator(func: typing.Callable) -> typing.Callable:
255 self.add_websocket_route(path, func)
256 return func
257
258 return decorator
259
260 def not_found(self, scope: Scope) -> ASGIInstance:
261 if scope["type"] == "websocket":
262 return WebSocketClose()
263
264 # If we're running inside a starlette application then raise an
265 # exception, so that the configurable exception handler can deal with
266 # returning the response. For plain ASGI apps, just return the response.
267 if "app" in scope:
268 raise HTTPException(status_code=404)
269 return PlainTextResponse("Not Found", status_code=404)
270
271 def url_path_for(self, name: str, **path_params: str) -> URL:
272 for route in self.routes:
273 try:
274 return route.url_path_for(name, **path_params)
275 except NoMatchFound as exc:
276 pass
277 raise NoMatchFound()
278
279 def __call__(self, scope: Scope) -> ASGIInstance:
280 assert scope["type"] in ("http", "websocket")
281
282 if "router" not in scope:
283 scope["router"] = self
284
285 for route in self.routes:
286 matched, child_scope = route.matches(scope)
287 if matched:
288 return route(child_scope)
289 return self.not_found(scope)
290
291 def __eq__(self, other: typing.Any) -> bool:
292 return isinstance(other, Router) and self.routes == other.routes
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/routing.py b/starlette/routing.py
--- a/starlette/routing.py
+++ b/starlette/routing.py
@@ -84,7 +84,7 @@
self.endpoint = endpoint
self.name = get_name(endpoint)
- if inspect.isfunction(endpoint):
+ if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):
self.app = request_response(endpoint)
if methods is None:
methods = ["GET"]
@@ -135,7 +135,7 @@
self.endpoint = endpoint
self.name = get_name(endpoint)
- if inspect.isfunction(endpoint):
+ if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):
self.app = websocket_session(endpoint)
else:
self.app = endpoint
|
{"golden_diff": "diff --git a/starlette/routing.py b/starlette/routing.py\n--- a/starlette/routing.py\n+++ b/starlette/routing.py\n@@ -84,7 +84,7 @@\n self.endpoint = endpoint\n self.name = get_name(endpoint)\n \n- if inspect.isfunction(endpoint):\n+ if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):\n self.app = request_response(endpoint)\n if methods is None:\n methods = [\"GET\"]\n@@ -135,7 +135,7 @@\n self.endpoint = endpoint\n self.name = get_name(endpoint)\n \n- if inspect.isfunction(endpoint):\n+ if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):\n self.app = websocket_session(endpoint)\n else:\n self.app = endpoint\n", "issue": "Routes that were working on 0.5.5 not working on 0.6.2\nGreetings, thanks for the project. \r\n\r\nI am having issues after upgrading where it seems one of my routes is getting converted to a coroutine somewhere and not awaited and then is being passed down all the middleware.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"../lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 387, in run_asgi\r\n result = await asgi(self.receive, self.send)\r\n File \"../lib/python3.7/site-packages/uvicorn/middleware/message_logger.py\", line 59, in __call__\r\n await self.inner(self.receive, self.send)\r\n File \"../lib/python3.7/site-packages/uvicorn/middleware/debug.py\", line 80, in __call__\r\n await asgi(receive, self.send)\r\n File \"../lib/python3.7/site-packages/starlette/exceptions.py\", line 69, in app\r\n raise exc from None\r\n File \"../lib/python3.7/site-packages/starlette/exceptions.py\", line 61, in app\r\n await instance(receive, sender)\r\nTypeError: 'coroutine' object is not callable\r\n```\n", "before_files": [{"content": "import asyncio\nimport inspect\nimport re\nimport typing\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom starlette.datastructures import URL\nfrom starlette.exceptions import HTTPException\nfrom starlette.graphql import GraphQLApp\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketClose\n\n\nclass NoMatchFound(Exception):\n pass\n\n\ndef request_response(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a function or coroutine `func(request) -> response`,\n and returns an ASGI application.\n \"\"\"\n is_coroutine = asyncio.iscoroutinefunction(func)\n\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n request = Request(scope, receive=receive)\n if is_coroutine:\n response = await func(request)\n else:\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n\n\ndef websocket_session(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a coroutine `func(session)`, and returns an ASGI application.\n \"\"\"\n\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n session = WebSocket(scope, receive=receive, send=send)\n await func(session)\n\n return awaitable\n\n return app\n\n\ndef get_name(endpoint: typing.Callable) -> str:\n if inspect.isfunction(endpoint) or inspect.isclass(endpoint):\n return endpoint.__name__\n return endpoint.__class__.__name__\n\n\ndef replace_params(path: str, **path_params: str) -> str:\n for key, value in path_params.items():\n path = path.replace(\"{\" + key + \"}\", value)\n return path\n\n\nclass BaseRoute:\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n raise NotImplementedError() # pragma: no cover\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n raise NotImplementedError() # pragma: no cover\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n raise NotImplementedError() # pragma: no cover\n\n\nclass Route(BaseRoute):\n def __init__(\n self, path: str, *, endpoint: typing.Callable, methods: typing.List[str] = None\n ) -> None:\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint)\n\n if inspect.isfunction(endpoint):\n self.app = request_response(endpoint)\n if methods is None:\n methods = [\"GET\"]\n else:\n self.app = endpoint\n\n self.methods = methods\n regex = \"^\" + path + \"$\"\n regex = re.sub(\"{([a-zA-Z_][a-zA-Z0-9_]*)}\", r\"(?P<\\1>[^/]+)\", regex)\n self.path_regex = re.compile(regex)\n self.param_names = set(self.path_regex.groupindex.keys())\n\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n if scope[\"type\"] == \"http\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(match.groupdict())\n child_scope = dict(scope)\n child_scope[\"path_params\"] = path_params\n return True, child_scope\n return False, {}\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n if name != self.name or self.param_names != set(path_params.keys()):\n raise NoMatchFound()\n return URL(scheme=\"http\", path=replace_params(self.path, **path_params))\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n if self.methods and scope[\"method\"] not in self.methods:\n if \"app\" in scope:\n raise HTTPException(status_code=405)\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n return self.app(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Route)\n and self.path == other.path\n and self.endpoint == other.endpoint\n and self.methods == other.methods\n )\n\n\nclass WebSocketRoute(BaseRoute):\n def __init__(self, path: str, *, endpoint: typing.Callable) -> None:\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint)\n\n if inspect.isfunction(endpoint):\n self.app = websocket_session(endpoint)\n else:\n self.app = endpoint\n\n regex = \"^\" + path + \"$\"\n regex = re.sub(\"{([a-zA-Z_][a-zA-Z0-9_]*)}\", r\"(?P<\\1>[^/]+)\", regex)\n self.path_regex = re.compile(regex)\n self.param_names = set(self.path_regex.groupindex.keys())\n\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n if scope[\"type\"] == \"websocket\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(match.groupdict())\n child_scope = dict(scope)\n child_scope[\"path_params\"] = path_params\n return True, child_scope\n return False, {}\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n if name != self.name or self.param_names != set(path_params.keys()):\n raise NoMatchFound()\n return URL(scheme=\"ws\", path=replace_params(self.path, **path_params))\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n return self.app(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, WebSocketRoute)\n and self.path == other.path\n and self.endpoint == other.endpoint\n )\n\n\nclass Mount(BaseRoute):\n def __init__(self, path: str, app: ASGIApp) -> None:\n self.path = path\n self.app = app\n regex = \"^\" + path\n regex = re.sub(\"{([a-zA-Z_][a-zA-Z0-9_]*)}\", r\"(?P<\\1>[^/]*)\", regex)\n self.path_regex = re.compile(regex)\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return getattr(self.app, \"routes\", None)\n\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n match = self.path_regex.match(scope[\"path\"])\n if match:\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(match.groupdict())\n child_scope = dict(scope)\n child_scope[\"path_params\"] = path_params\n child_scope[\"root_path\"] = scope.get(\"root_path\", \"\") + match.string\n child_scope[\"path\"] = scope[\"path\"][match.span()[1] :]\n return True, child_scope\n return False, {}\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n for route in self.routes or []:\n try:\n url = route.url_path_for(name, **path_params)\n return URL(scheme=url.scheme, path=self.path + url.path)\n except NoMatchFound as exc:\n pass\n raise NoMatchFound()\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n return self.app(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Mount)\n and self.path == other.path\n and self.app == other.app\n )\n\n\nclass Router:\n def __init__(\n self, routes: typing.List[BaseRoute] = None, default: ASGIApp = None\n ) -> None:\n self.routes = [] if routes is None else routes\n self.default = self.not_found if default is None else default\n\n def mount(self, path: str, app: ASGIApp) -> None:\n route = Mount(path, app=app)\n self.routes.append(route)\n\n def add_route(\n self, path: str, endpoint: typing.Callable, methods: typing.List[str] = None\n ) -> None:\n route = Route(path, endpoint=endpoint, methods=methods)\n self.routes.append(route)\n\n def add_graphql_route(\n self, path: str, schema: typing.Any, executor: typing.Any = None\n ) -> None:\n app = GraphQLApp(schema=schema, executor=executor)\n self.add_route(path, endpoint=app)\n\n def add_websocket_route(self, path: str, endpoint: typing.Callable) -> None:\n route = WebSocketRoute(path, endpoint=endpoint)\n self.routes.append(route)\n\n def route(self, path: str, methods: typing.List[str] = None) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_route(path, func, methods=methods)\n return func\n\n return decorator\n\n def websocket_route(self, path: str) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_websocket_route(path, func)\n return func\n\n return decorator\n\n def not_found(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] == \"websocket\":\n return WebSocketClose()\n\n # If we're running inside a starlette application then raise an\n # exception, so that the configurable exception handler can deal with\n # returning the response. For plain ASGI apps, just return the response.\n if \"app\" in scope:\n raise HTTPException(status_code=404)\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n for route in self.routes:\n try:\n return route.url_path_for(name, **path_params)\n except NoMatchFound as exc:\n pass\n raise NoMatchFound()\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n\n if \"router\" not in scope:\n scope[\"router\"] = self\n\n for route in self.routes:\n matched, child_scope = route.matches(scope)\n if matched:\n return route(child_scope)\n return self.not_found(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return isinstance(other, Router) and self.routes == other.routes\n", "path": "starlette/routing.py"}], "after_files": [{"content": "import asyncio\nimport inspect\nimport re\nimport typing\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom starlette.datastructures import URL\nfrom starlette.exceptions import HTTPException\nfrom starlette.graphql import GraphQLApp\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketClose\n\n\nclass NoMatchFound(Exception):\n pass\n\n\ndef request_response(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a function or coroutine `func(request) -> response`,\n and returns an ASGI application.\n \"\"\"\n is_coroutine = asyncio.iscoroutinefunction(func)\n\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n request = Request(scope, receive=receive)\n if is_coroutine:\n response = await func(request)\n else:\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n\n\ndef websocket_session(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a coroutine `func(session)`, and returns an ASGI application.\n \"\"\"\n\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n session = WebSocket(scope, receive=receive, send=send)\n await func(session)\n\n return awaitable\n\n return app\n\n\ndef get_name(endpoint: typing.Callable) -> str:\n if inspect.isfunction(endpoint) or inspect.isclass(endpoint):\n return endpoint.__name__\n return endpoint.__class__.__name__\n\n\ndef replace_params(path: str, **path_params: str) -> str:\n for key, value in path_params.items():\n path = path.replace(\"{\" + key + \"}\", value)\n return path\n\n\nclass BaseRoute:\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n raise NotImplementedError() # pragma: no cover\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n raise NotImplementedError() # pragma: no cover\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n raise NotImplementedError() # pragma: no cover\n\n\nclass Route(BaseRoute):\n def __init__(\n self, path: str, *, endpoint: typing.Callable, methods: typing.List[str] = None\n ) -> None:\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint)\n\n if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):\n self.app = request_response(endpoint)\n if methods is None:\n methods = [\"GET\"]\n else:\n self.app = endpoint\n\n self.methods = methods\n regex = \"^\" + path + \"$\"\n regex = re.sub(\"{([a-zA-Z_][a-zA-Z0-9_]*)}\", r\"(?P<\\1>[^/]+)\", regex)\n self.path_regex = re.compile(regex)\n self.param_names = set(self.path_regex.groupindex.keys())\n\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n if scope[\"type\"] == \"http\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(match.groupdict())\n child_scope = dict(scope)\n child_scope[\"path_params\"] = path_params\n return True, child_scope\n return False, {}\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n if name != self.name or self.param_names != set(path_params.keys()):\n raise NoMatchFound()\n return URL(scheme=\"http\", path=replace_params(self.path, **path_params))\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n if self.methods and scope[\"method\"] not in self.methods:\n if \"app\" in scope:\n raise HTTPException(status_code=405)\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n return self.app(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Route)\n and self.path == other.path\n and self.endpoint == other.endpoint\n and self.methods == other.methods\n )\n\n\nclass WebSocketRoute(BaseRoute):\n def __init__(self, path: str, *, endpoint: typing.Callable) -> None:\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint)\n\n if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):\n self.app = websocket_session(endpoint)\n else:\n self.app = endpoint\n\n regex = \"^\" + path + \"$\"\n regex = re.sub(\"{([a-zA-Z_][a-zA-Z0-9_]*)}\", r\"(?P<\\1>[^/]+)\", regex)\n self.path_regex = re.compile(regex)\n self.param_names = set(self.path_regex.groupindex.keys())\n\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n if scope[\"type\"] == \"websocket\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(match.groupdict())\n child_scope = dict(scope)\n child_scope[\"path_params\"] = path_params\n return True, child_scope\n return False, {}\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n if name != self.name or self.param_names != set(path_params.keys()):\n raise NoMatchFound()\n return URL(scheme=\"ws\", path=replace_params(self.path, **path_params))\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n return self.app(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, WebSocketRoute)\n and self.path == other.path\n and self.endpoint == other.endpoint\n )\n\n\nclass Mount(BaseRoute):\n def __init__(self, path: str, app: ASGIApp) -> None:\n self.path = path\n self.app = app\n regex = \"^\" + path\n regex = re.sub(\"{([a-zA-Z_][a-zA-Z0-9_]*)}\", r\"(?P<\\1>[^/]*)\", regex)\n self.path_regex = re.compile(regex)\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return getattr(self.app, \"routes\", None)\n\n def matches(self, scope: Scope) -> typing.Tuple[bool, Scope]:\n match = self.path_regex.match(scope[\"path\"])\n if match:\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(match.groupdict())\n child_scope = dict(scope)\n child_scope[\"path_params\"] = path_params\n child_scope[\"root_path\"] = scope.get(\"root_path\", \"\") + match.string\n child_scope[\"path\"] = scope[\"path\"][match.span()[1] :]\n return True, child_scope\n return False, {}\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n for route in self.routes or []:\n try:\n url = route.url_path_for(name, **path_params)\n return URL(scheme=url.scheme, path=self.path + url.path)\n except NoMatchFound as exc:\n pass\n raise NoMatchFound()\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n return self.app(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Mount)\n and self.path == other.path\n and self.app == other.app\n )\n\n\nclass Router:\n def __init__(\n self, routes: typing.List[BaseRoute] = None, default: ASGIApp = None\n ) -> None:\n self.routes = [] if routes is None else routes\n self.default = self.not_found if default is None else default\n\n def mount(self, path: str, app: ASGIApp) -> None:\n route = Mount(path, app=app)\n self.routes.append(route)\n\n def add_route(\n self, path: str, endpoint: typing.Callable, methods: typing.List[str] = None\n ) -> None:\n route = Route(path, endpoint=endpoint, methods=methods)\n self.routes.append(route)\n\n def add_graphql_route(\n self, path: str, schema: typing.Any, executor: typing.Any = None\n ) -> None:\n app = GraphQLApp(schema=schema, executor=executor)\n self.add_route(path, endpoint=app)\n\n def add_websocket_route(self, path: str, endpoint: typing.Callable) -> None:\n route = WebSocketRoute(path, endpoint=endpoint)\n self.routes.append(route)\n\n def route(self, path: str, methods: typing.List[str] = None) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_route(path, func, methods=methods)\n return func\n\n return decorator\n\n def websocket_route(self, path: str) -> typing.Callable:\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_websocket_route(path, func)\n return func\n\n return decorator\n\n def not_found(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] == \"websocket\":\n return WebSocketClose()\n\n # If we're running inside a starlette application then raise an\n # exception, so that the configurable exception handler can deal with\n # returning the response. For plain ASGI apps, just return the response.\n if \"app\" in scope:\n raise HTTPException(status_code=404)\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n def url_path_for(self, name: str, **path_params: str) -> URL:\n for route in self.routes:\n try:\n return route.url_path_for(name, **path_params)\n except NoMatchFound as exc:\n pass\n raise NoMatchFound()\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] in (\"http\", \"websocket\")\n\n if \"router\" not in scope:\n scope[\"router\"] = self\n\n for route in self.routes:\n matched, child_scope = route.matches(scope)\n if matched:\n return route(child_scope)\n return self.not_found(scope)\n\n def __eq__(self, other: typing.Any) -> bool:\n return isinstance(other, Router) and self.routes == other.routes\n", "path": "starlette/routing.py"}]}
| 3,653 | 169 |
gh_patches_debug_48344
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-1749
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Formatar mudança de linha nos campos text do crud
As mudanças de linha `\n` dos campos TextField, ao que parece, estão sendo exibidas nas telas de leitura do crud.
Por exemplo no campo `observacao` de `DocumentoAdministrativo`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/crispy_layout_mixin.py`
Content:
```
1 from math import ceil
2
3 import rtyaml
4 from crispy_forms.bootstrap import FormActions
5 from crispy_forms.helper import FormHelper
6 from crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit
7 from django import template
8 from django.core.urlresolvers import reverse, reverse_lazy
9 from django.utils import formats
10 from django.utils.translation import ugettext as _
11
12
13 def heads_and_tails(list_of_lists):
14 for alist in list_of_lists:
15 yield alist[0], alist[1:]
16
17
18 def to_column(name_span):
19 fieldname, span = name_span
20 return Div(fieldname, css_class='col-md-%d' % span)
21
22
23 def to_row(names_spans):
24 return Div(*map(to_column, names_spans), css_class='row-fluid')
25
26
27 def to_fieldsets(fields):
28 for field in fields:
29 if isinstance(field, list):
30 legend, row_specs = field[0], field[1:]
31 rows = [to_row(name_span_list) for name_span_list in row_specs]
32 yield Fieldset(legend, *rows)
33 else:
34 yield field
35
36
37 def form_actions(more=[Div(css_class='clearfix')],
38 label=_('Salvar'), name='salvar', css_class='pull-right', disabled=True):
39
40 if disabled:
41 doubleclick = 'this.form.submit();this.disabled=true;'
42 else:
43 doubleclick = 'return true;'
44
45 return FormActions(
46 Submit(name, label, css_class=css_class,
47 # para impedir resubmissão do form
48 onclick=doubleclick),
49 *more)
50
51
52 class SaplFormLayout(Layout):
53
54 def __init__(self, *fields, cancel_label=_('Cancelar'),
55 save_label=_('Salvar'), actions=None):
56
57 buttons = actions
58 if not buttons:
59 buttons = form_actions(label=save_label, more=[
60 HTML('<a href="{{ view.cancel_url }}"'
61 ' class="btn btn-inverse">%s</a>' % cancel_label)
62 if cancel_label else None])
63
64 _fields = list(to_fieldsets(fields))
65 if buttons:
66 _fields += [to_row([(buttons, 12)])]
67 super(SaplFormLayout, self).__init__(*_fields)
68
69
70 def get_field_display(obj, fieldname):
71 field = ''
72 try:
73 field = obj._meta.get_field(fieldname)
74 except Exception as e:
75 """ nos casos que o fieldname não é um field_model,
76 ele pode ser um aggregate, annotate, um property, um manager,
77 ou mesmo uma método no model.
78 """
79 value = getattr(obj, fieldname)
80 try:
81 verbose_name = value.model._meta.verbose_name
82 except AttributeError:
83 verbose_name = ''
84
85 else:
86 verbose_name = str(field.verbose_name)\
87 if hasattr(field, 'verbose_name') else ''
88
89 if hasattr(field, 'choices') and field.choices:
90 value = getattr(obj, 'get_%s_display' % fieldname)()
91 else:
92 value = getattr(obj, fieldname)
93
94 str_type_from_value = str(type(value))
95 str_type_from_field = str(type(field))
96
97 if value is None:
98 display = ''
99 elif '.date' in str_type_from_value:
100 display = formats.date_format(value, "SHORT_DATE_FORMAT")
101 elif 'bool' in str_type_from_value:
102 display = _('Sim') if value else _('Não')
103 elif 'ImageFieldFile' in str(type(value)):
104 if value:
105 display = '<img src="{}" />'.format(value.url)
106 else:
107 display = ''
108 elif 'FieldFile' in str_type_from_value:
109 if value:
110 display = '<a href="{}">{}</a>'.format(
111 value.url,
112 value.name.split('/')[-1:][0])
113 else:
114 display = ''
115 elif 'ManyRelatedManager' in str_type_from_value\
116 or 'RelatedManager' in str_type_from_value\
117 or 'GenericRelatedObjectManager' in str_type_from_value:
118 display = '<ul>'
119 for v in value.all():
120 display += '<li>%s</li>' % str(v)
121 display += '</ul>'
122 if not verbose_name:
123 if hasattr(field, 'related_model'):
124 verbose_name = str(
125 field.related_model._meta.verbose_name_plural)
126 elif hasattr(field, 'model'):
127 verbose_name = str(field.model._meta.verbose_name_plural)
128 elif 'GenericForeignKey' in str_type_from_field:
129 display = '<a href="{}">{}</a>'.format(
130 reverse(
131 '%s:%s_detail' % (
132 value._meta.app_config.name, obj.content_type.model),
133 args=(value.id,)),
134 value)
135 else:
136 display = str(value)
137 return verbose_name, display
138
139
140 class CrispyLayoutFormMixin:
141
142 @property
143 def layout_key(self):
144 if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):
145 return super(CrispyLayoutFormMixin, self).layout_key
146 else:
147 return self.model.__name__
148
149 @property
150 def layout_key_set(self):
151 if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):
152 return super(CrispyLayoutFormMixin, self).layout_key_set
153 else:
154 obj = self.crud if hasattr(self, 'crud') else self
155 return getattr(obj.model,
156 obj.model_set).field.model.__name__
157
158 def get_layout(self):
159 yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label
160 return read_layout_from_yaml(yaml_layout, self.layout_key)
161
162 def get_layout_set(self):
163 obj = self.crud if hasattr(self, 'crud') else self
164 yaml_layout = '%s/layouts.yaml' % getattr(
165 obj.model, obj.model_set).field.model._meta.app_config.label
166 return read_layout_from_yaml(yaml_layout, self.layout_key_set)
167
168 @property
169 def fields(self):
170 if hasattr(self, 'form_class') and self.form_class:
171 return None
172 else:
173 '''Returns all fields in the layout'''
174 return [fieldname for legend_rows in self.get_layout()
175 for row in legend_rows[1:]
176 for fieldname, span in row]
177
178 def get_form(self, form_class=None):
179 try:
180 form = super(CrispyLayoutFormMixin, self).get_form(form_class)
181 except AttributeError:
182 # simply return None if there is no get_form on super
183 pass
184 else:
185 if self.layout_key:
186 form.helper = FormHelper()
187 form.helper.layout = SaplFormLayout(*self.get_layout())
188 return form
189
190 @property
191 def list_field_names(self):
192 '''The list of field names to display on table
193
194 This base implementation returns the field names
195 in the first fieldset of the layout.
196 '''
197 obj = self.crud if hasattr(self, 'crud') else self
198 if hasattr(obj, 'list_field_names') and obj.list_field_names:
199 return obj.list_field_names
200 rows = self.get_layout()[0][1:]
201 return [fieldname for row in rows for fieldname, __ in row]
202
203 @property
204 def list_field_names_set(self):
205 '''The list of field names to display on table
206
207 This base implementation returns the field names
208 in the first fieldset of the layout.
209 '''
210 rows = self.get_layout_set()[0][1:]
211 return [fieldname for row in rows for fieldname, __ in row]
212
213 def get_column(self, fieldname, span):
214 obj = self.get_object()
215
216 func = None
217 if '|' in fieldname:
218 fieldname, func = tuple(fieldname.split('|'))
219
220 if func:
221 verbose_name, text = getattr(self, func)(obj, fieldname)
222 else:
223 verbose_name, text = get_field_display(obj, fieldname)
224
225 return {
226 'id': fieldname,
227 'span': span,
228 'verbose_name': verbose_name,
229 'text': text,
230 }
231
232 def fk_urlize_for_detail(self, obj, fieldname):
233
234 field = obj._meta.get_field(fieldname)
235 value = getattr(obj, fieldname)
236
237 display = '<a href="{}">{}</a>'.format(
238 reverse(
239 '%s:%s_detail' % (
240 value._meta.app_config.name, value._meta.model_name),
241 args=(value.id,)),
242 value)
243
244 return field.verbose_name, display
245
246 def m2m_urlize_for_detail(self, obj, fieldname):
247
248 manager, fieldname = tuple(fieldname.split('__'))
249
250 manager = getattr(obj, manager)
251
252 verbose_name = manager.model._meta.verbose_name
253 display = ''
254 for item in manager.all():
255 obj_m2m = getattr(item, fieldname)
256
257 if obj == obj_m2m:
258 continue
259
260 verbose_name = item._meta.get_field(fieldname).verbose_name
261
262 display += '<li><a href="{}">{}</a></li>'.format(
263 reverse(
264 '%s:%s_detail' % (
265 obj_m2m._meta.app_config.name, obj_m2m._meta.model_name),
266 args=(obj_m2m.id,)),
267 obj_m2m)
268
269 display += ''
270
271 if display:
272 display = '<ul>%s</ul>' % display
273 else:
274 verbose_name = ''
275
276 return verbose_name, display
277
278 @property
279 def layout_display(self):
280
281 return [
282 {'legend': legend,
283 'rows': [[self.get_column(fieldname, span)
284 for fieldname, span in row]
285 for row in rows]
286 } for legend, rows in heads_and_tails(self.get_layout())]
287
288
289 def read_yaml_from_file(yaml_layout):
290 # TODO cache this at application level
291 t = template.loader.get_template(yaml_layout)
292 # aqui é importante converter para str pois, dependendo do ambiente,
293 # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream
294 rendered = str(t.render())
295 return rtyaml.load(rendered)
296
297
298 def read_layout_from_yaml(yaml_layout, key):
299 # TODO cache this at application level
300 yaml = read_yaml_from_file(yaml_layout)
301 base = yaml[key]
302
303 def line_to_namespans(line):
304 split = [cell.split(':') for cell in line.split()]
305 namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]
306 remaining = 12 - sum(s for n, s in namespans)
307 nondefined = [ns for ns in namespans if not ns[1]]
308 while nondefined:
309 span = ceil(remaining / len(nondefined))
310 namespan = nondefined.pop(0)
311 namespan[1] = span
312 remaining = remaining - span
313 return list(map(tuple, namespans))
314
315 return [[legend] + [line_to_namespans(l) for l in lines]
316 for legend, lines in base.items()]
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sapl/crispy_layout_mixin.py b/sapl/crispy_layout_mixin.py
--- a/sapl/crispy_layout_mixin.py
+++ b/sapl/crispy_layout_mixin.py
@@ -132,6 +132,8 @@
value._meta.app_config.name, obj.content_type.model),
args=(value.id,)),
value)
+ elif 'TextField' in str_type_from_field:
+ display = value.replace('\n', '<br/>')
else:
display = str(value)
return verbose_name, display
|
{"golden_diff": "diff --git a/sapl/crispy_layout_mixin.py b/sapl/crispy_layout_mixin.py\n--- a/sapl/crispy_layout_mixin.py\n+++ b/sapl/crispy_layout_mixin.py\n@@ -132,6 +132,8 @@\n value._meta.app_config.name, obj.content_type.model),\n args=(value.id,)),\n value)\n+ elif 'TextField' in str_type_from_field:\n+ display = value.replace('\\n', '<br/>')\n else:\n display = str(value)\n return verbose_name, display\n", "issue": "Formatar mudan\u00e7a de linha nos campos text do crud\nAs mudan\u00e7as de linha `\\n` dos campos TextField, ao que parece, est\u00e3o sendo exibidas nas telas de leitura do crud.\r\n\r\nPor exemplo no campo `observacao` de `DocumentoAdministrativo`.\n", "before_files": [{"content": "from math import ceil\n\nimport rtyaml\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit\nfrom django import template\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.utils import formats\nfrom django.utils.translation import ugettext as _\n\n\ndef heads_and_tails(list_of_lists):\n for alist in list_of_lists:\n yield alist[0], alist[1:]\n\n\ndef to_column(name_span):\n fieldname, span = name_span\n return Div(fieldname, css_class='col-md-%d' % span)\n\n\ndef to_row(names_spans):\n return Div(*map(to_column, names_spans), css_class='row-fluid')\n\n\ndef to_fieldsets(fields):\n for field in fields:\n if isinstance(field, list):\n legend, row_specs = field[0], field[1:]\n rows = [to_row(name_span_list) for name_span_list in row_specs]\n yield Fieldset(legend, *rows)\n else:\n yield field\n\n\ndef form_actions(more=[Div(css_class='clearfix')],\n label=_('Salvar'), name='salvar', css_class='pull-right', disabled=True):\n\n if disabled:\n doubleclick = 'this.form.submit();this.disabled=true;'\n else:\n doubleclick = 'return true;'\n\n return FormActions(\n Submit(name, label, css_class=css_class,\n # para impedir resubmiss\u00e3o do form\n onclick=doubleclick),\n *more)\n\n\nclass SaplFormLayout(Layout):\n\n def __init__(self, *fields, cancel_label=_('Cancelar'),\n save_label=_('Salvar'), actions=None):\n\n buttons = actions\n if not buttons:\n buttons = form_actions(label=save_label, more=[\n HTML('<a href=\"{{ view.cancel_url }}\"'\n ' class=\"btn btn-inverse\">%s</a>' % cancel_label)\n if cancel_label else None])\n\n _fields = list(to_fieldsets(fields))\n if buttons:\n _fields += [to_row([(buttons, 12)])]\n super(SaplFormLayout, self).__init__(*_fields)\n\n\ndef get_field_display(obj, fieldname):\n field = ''\n try:\n field = obj._meta.get_field(fieldname)\n except Exception as e:\n \"\"\" nos casos que o fieldname n\u00e3o \u00e9 um field_model,\n ele pode ser um aggregate, annotate, um property, um manager,\n ou mesmo uma m\u00e9todo no model.\n \"\"\"\n value = getattr(obj, fieldname)\n try:\n verbose_name = value.model._meta.verbose_name\n except AttributeError:\n verbose_name = ''\n\n else:\n verbose_name = str(field.verbose_name)\\\n if hasattr(field, 'verbose_name') else ''\n\n if hasattr(field, 'choices') and field.choices:\n value = getattr(obj, 'get_%s_display' % fieldname)()\n else:\n value = getattr(obj, fieldname)\n\n str_type_from_value = str(type(value))\n str_type_from_field = str(type(field))\n\n if value is None:\n display = ''\n elif '.date' in str_type_from_value:\n display = formats.date_format(value, \"SHORT_DATE_FORMAT\")\n elif 'bool' in str_type_from_value:\n display = _('Sim') if value else _('N\u00e3o')\n elif 'ImageFieldFile' in str(type(value)):\n if value:\n display = '<img src=\"{}\" />'.format(value.url)\n else:\n display = ''\n elif 'FieldFile' in str_type_from_value:\n if value:\n display = '<a href=\"{}\">{}</a>'.format(\n value.url,\n value.name.split('/')[-1:][0])\n else:\n display = ''\n elif 'ManyRelatedManager' in str_type_from_value\\\n or 'RelatedManager' in str_type_from_value\\\n or 'GenericRelatedObjectManager' in str_type_from_value:\n display = '<ul>'\n for v in value.all():\n display += '<li>%s</li>' % str(v)\n display += '</ul>'\n if not verbose_name:\n if hasattr(field, 'related_model'):\n verbose_name = str(\n field.related_model._meta.verbose_name_plural)\n elif hasattr(field, 'model'):\n verbose_name = str(field.model._meta.verbose_name_plural)\n elif 'GenericForeignKey' in str_type_from_field:\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, obj.content_type.model),\n args=(value.id,)),\n value)\n else:\n display = str(value)\n return verbose_name, display\n\n\nclass CrispyLayoutFormMixin:\n\n @property\n def layout_key(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):\n return super(CrispyLayoutFormMixin, self).layout_key\n else:\n return self.model.__name__\n\n @property\n def layout_key_set(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):\n return super(CrispyLayoutFormMixin, self).layout_key_set\n else:\n obj = self.crud if hasattr(self, 'crud') else self\n return getattr(obj.model,\n obj.model_set).field.model.__name__\n\n def get_layout(self):\n yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key)\n\n def get_layout_set(self):\n obj = self.crud if hasattr(self, 'crud') else self\n yaml_layout = '%s/layouts.yaml' % getattr(\n obj.model, obj.model_set).field.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key_set)\n\n @property\n def fields(self):\n if hasattr(self, 'form_class') and self.form_class:\n return None\n else:\n '''Returns all fields in the layout'''\n return [fieldname for legend_rows in self.get_layout()\n for row in legend_rows[1:]\n for fieldname, span in row]\n\n def get_form(self, form_class=None):\n try:\n form = super(CrispyLayoutFormMixin, self).get_form(form_class)\n except AttributeError:\n # simply return None if there is no get_form on super\n pass\n else:\n if self.layout_key:\n form.helper = FormHelper()\n form.helper.layout = SaplFormLayout(*self.get_layout())\n return form\n\n @property\n def list_field_names(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n obj = self.crud if hasattr(self, 'crud') else self\n if hasattr(obj, 'list_field_names') and obj.list_field_names:\n return obj.list_field_names\n rows = self.get_layout()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n @property\n def list_field_names_set(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n rows = self.get_layout_set()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n def get_column(self, fieldname, span):\n obj = self.get_object()\n\n func = None\n if '|' in fieldname:\n fieldname, func = tuple(fieldname.split('|'))\n\n if func:\n verbose_name, text = getattr(self, func)(obj, fieldname)\n else:\n verbose_name, text = get_field_display(obj, fieldname)\n\n return {\n 'id': fieldname,\n 'span': span,\n 'verbose_name': verbose_name,\n 'text': text,\n }\n\n def fk_urlize_for_detail(self, obj, fieldname):\n\n field = obj._meta.get_field(fieldname)\n value = getattr(obj, fieldname)\n\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, value._meta.model_name),\n args=(value.id,)),\n value)\n\n return field.verbose_name, display\n\n def m2m_urlize_for_detail(self, obj, fieldname):\n\n manager, fieldname = tuple(fieldname.split('__'))\n\n manager = getattr(obj, manager)\n\n verbose_name = manager.model._meta.verbose_name\n display = ''\n for item in manager.all():\n obj_m2m = getattr(item, fieldname)\n\n if obj == obj_m2m:\n continue\n\n verbose_name = item._meta.get_field(fieldname).verbose_name\n\n display += '<li><a href=\"{}\">{}</a></li>'.format(\n reverse(\n '%s:%s_detail' % (\n obj_m2m._meta.app_config.name, obj_m2m._meta.model_name),\n args=(obj_m2m.id,)),\n obj_m2m)\n\n display += ''\n\n if display:\n display = '<ul>%s</ul>' % display\n else:\n verbose_name = ''\n\n return verbose_name, display\n\n @property\n def layout_display(self):\n\n return [\n {'legend': legend,\n 'rows': [[self.get_column(fieldname, span)\n for fieldname, span in row]\n for row in rows]\n } for legend, rows in heads_and_tails(self.get_layout())]\n\n\ndef read_yaml_from_file(yaml_layout):\n # TODO cache this at application level\n t = template.loader.get_template(yaml_layout)\n # aqui \u00e9 importante converter para str pois, dependendo do ambiente,\n # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream\n rendered = str(t.render())\n return rtyaml.load(rendered)\n\n\ndef read_layout_from_yaml(yaml_layout, key):\n # TODO cache this at application level\n yaml = read_yaml_from_file(yaml_layout)\n base = yaml[key]\n\n def line_to_namespans(line):\n split = [cell.split(':') for cell in line.split()]\n namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]\n remaining = 12 - sum(s for n, s in namespans)\n nondefined = [ns for ns in namespans if not ns[1]]\n while nondefined:\n span = ceil(remaining / len(nondefined))\n namespan = nondefined.pop(0)\n namespan[1] = span\n remaining = remaining - span\n return list(map(tuple, namespans))\n\n return [[legend] + [line_to_namespans(l) for l in lines]\n for legend, lines in base.items()]\n", "path": "sapl/crispy_layout_mixin.py"}], "after_files": [{"content": "from math import ceil\n\nimport rtyaml\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit\nfrom django import template\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.utils import formats\nfrom django.utils.translation import ugettext as _\n\n\ndef heads_and_tails(list_of_lists):\n for alist in list_of_lists:\n yield alist[0], alist[1:]\n\n\ndef to_column(name_span):\n fieldname, span = name_span\n return Div(fieldname, css_class='col-md-%d' % span)\n\n\ndef to_row(names_spans):\n return Div(*map(to_column, names_spans), css_class='row-fluid')\n\n\ndef to_fieldsets(fields):\n for field in fields:\n if isinstance(field, list):\n legend, row_specs = field[0], field[1:]\n rows = [to_row(name_span_list) for name_span_list in row_specs]\n yield Fieldset(legend, *rows)\n else:\n yield field\n\n\ndef form_actions(more=[Div(css_class='clearfix')],\n label=_('Salvar'), name='salvar', css_class='pull-right', disabled=True):\n\n if disabled:\n doubleclick = 'this.form.submit();this.disabled=true;'\n else:\n doubleclick = 'return true;'\n\n return FormActions(\n Submit(name, label, css_class=css_class,\n # para impedir resubmiss\u00e3o do form\n onclick=doubleclick),\n *more)\n\n\nclass SaplFormLayout(Layout):\n\n def __init__(self, *fields, cancel_label=_('Cancelar'),\n save_label=_('Salvar'), actions=None):\n\n buttons = actions\n if not buttons:\n buttons = form_actions(label=save_label, more=[\n HTML('<a href=\"{{ view.cancel_url }}\"'\n ' class=\"btn btn-inverse\">%s</a>' % cancel_label)\n if cancel_label else None])\n\n _fields = list(to_fieldsets(fields))\n if buttons:\n _fields += [to_row([(buttons, 12)])]\n super(SaplFormLayout, self).__init__(*_fields)\n\n\ndef get_field_display(obj, fieldname):\n field = ''\n try:\n field = obj._meta.get_field(fieldname)\n except Exception as e:\n \"\"\" nos casos que o fieldname n\u00e3o \u00e9 um field_model,\n ele pode ser um aggregate, annotate, um property, um manager,\n ou mesmo uma m\u00e9todo no model.\n \"\"\"\n value = getattr(obj, fieldname)\n try:\n verbose_name = value.model._meta.verbose_name\n except AttributeError:\n verbose_name = ''\n\n else:\n verbose_name = str(field.verbose_name)\\\n if hasattr(field, 'verbose_name') else ''\n\n if hasattr(field, 'choices') and field.choices:\n value = getattr(obj, 'get_%s_display' % fieldname)()\n else:\n value = getattr(obj, fieldname)\n\n str_type_from_value = str(type(value))\n str_type_from_field = str(type(field))\n\n if value is None:\n display = ''\n elif '.date' in str_type_from_value:\n display = formats.date_format(value, \"SHORT_DATE_FORMAT\")\n elif 'bool' in str_type_from_value:\n display = _('Sim') if value else _('N\u00e3o')\n elif 'ImageFieldFile' in str(type(value)):\n if value:\n display = '<img src=\"{}\" />'.format(value.url)\n else:\n display = ''\n elif 'FieldFile' in str_type_from_value:\n if value:\n display = '<a href=\"{}\">{}</a>'.format(\n value.url,\n value.name.split('/')[-1:][0])\n else:\n display = ''\n elif 'ManyRelatedManager' in str_type_from_value\\\n or 'RelatedManager' in str_type_from_value\\\n or 'GenericRelatedObjectManager' in str_type_from_value:\n display = '<ul>'\n for v in value.all():\n display += '<li>%s</li>' % str(v)\n display += '</ul>'\n if not verbose_name:\n if hasattr(field, 'related_model'):\n verbose_name = str(\n field.related_model._meta.verbose_name_plural)\n elif hasattr(field, 'model'):\n verbose_name = str(field.model._meta.verbose_name_plural)\n elif 'GenericForeignKey' in str_type_from_field:\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, obj.content_type.model),\n args=(value.id,)),\n value)\n elif 'TextField' in str_type_from_field:\n display = value.replace('\\n', '<br/>')\n else:\n display = str(value)\n return verbose_name, display\n\n\nclass CrispyLayoutFormMixin:\n\n @property\n def layout_key(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):\n return super(CrispyLayoutFormMixin, self).layout_key\n else:\n return self.model.__name__\n\n @property\n def layout_key_set(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):\n return super(CrispyLayoutFormMixin, self).layout_key_set\n else:\n obj = self.crud if hasattr(self, 'crud') else self\n return getattr(obj.model,\n obj.model_set).field.model.__name__\n\n def get_layout(self):\n yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key)\n\n def get_layout_set(self):\n obj = self.crud if hasattr(self, 'crud') else self\n yaml_layout = '%s/layouts.yaml' % getattr(\n obj.model, obj.model_set).field.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key_set)\n\n @property\n def fields(self):\n if hasattr(self, 'form_class') and self.form_class:\n return None\n else:\n '''Returns all fields in the layout'''\n return [fieldname for legend_rows in self.get_layout()\n for row in legend_rows[1:]\n for fieldname, span in row]\n\n def get_form(self, form_class=None):\n try:\n form = super(CrispyLayoutFormMixin, self).get_form(form_class)\n except AttributeError:\n # simply return None if there is no get_form on super\n pass\n else:\n if self.layout_key:\n form.helper = FormHelper()\n form.helper.layout = SaplFormLayout(*self.get_layout())\n return form\n\n @property\n def list_field_names(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n obj = self.crud if hasattr(self, 'crud') else self\n if hasattr(obj, 'list_field_names') and obj.list_field_names:\n return obj.list_field_names\n rows = self.get_layout()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n @property\n def list_field_names_set(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n rows = self.get_layout_set()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n def get_column(self, fieldname, span):\n obj = self.get_object()\n\n func = None\n if '|' in fieldname:\n fieldname, func = tuple(fieldname.split('|'))\n\n if func:\n verbose_name, text = getattr(self, func)(obj, fieldname)\n else:\n verbose_name, text = get_field_display(obj, fieldname)\n\n return {\n 'id': fieldname,\n 'span': span,\n 'verbose_name': verbose_name,\n 'text': text,\n }\n\n def fk_urlize_for_detail(self, obj, fieldname):\n\n field = obj._meta.get_field(fieldname)\n value = getattr(obj, fieldname)\n\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, value._meta.model_name),\n args=(value.id,)),\n value)\n\n return field.verbose_name, display\n\n def m2m_urlize_for_detail(self, obj, fieldname):\n\n manager, fieldname = tuple(fieldname.split('__'))\n\n manager = getattr(obj, manager)\n\n verbose_name = manager.model._meta.verbose_name\n display = ''\n for item in manager.all():\n obj_m2m = getattr(item, fieldname)\n\n if obj == obj_m2m:\n continue\n\n verbose_name = item._meta.get_field(fieldname).verbose_name\n\n display += '<li><a href=\"{}\">{}</a></li>'.format(\n reverse(\n '%s:%s_detail' % (\n obj_m2m._meta.app_config.name, obj_m2m._meta.model_name),\n args=(obj_m2m.id,)),\n obj_m2m)\n\n display += ''\n\n if display:\n display = '<ul>%s</ul>' % display\n else:\n verbose_name = ''\n\n return verbose_name, display\n\n @property\n def layout_display(self):\n\n return [\n {'legend': legend,\n 'rows': [[self.get_column(fieldname, span)\n for fieldname, span in row]\n for row in rows]\n } for legend, rows in heads_and_tails(self.get_layout())]\n\n\ndef read_yaml_from_file(yaml_layout):\n # TODO cache this at application level\n t = template.loader.get_template(yaml_layout)\n # aqui \u00e9 importante converter para str pois, dependendo do ambiente,\n # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream\n rendered = str(t.render())\n return rtyaml.load(rendered)\n\n\ndef read_layout_from_yaml(yaml_layout, key):\n # TODO cache this at application level\n yaml = read_yaml_from_file(yaml_layout)\n base = yaml[key]\n\n def line_to_namespans(line):\n split = [cell.split(':') for cell in line.split()]\n namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]\n remaining = 12 - sum(s for n, s in namespans)\n nondefined = [ns for ns in namespans if not ns[1]]\n while nondefined:\n span = ceil(remaining / len(nondefined))\n namespan = nondefined.pop(0)\n namespan[1] = span\n remaining = remaining - span\n return list(map(tuple, namespans))\n\n return [[legend] + [line_to_namespans(l) for l in lines]\n for legend, lines in base.items()]\n", "path": "sapl/crispy_layout_mixin.py"}]}
| 3,546 | 126 |
gh_patches_debug_25052
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-2759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python 3.3 support
This is a placeholder for Pyramid 1.8 to drop Python 3.3 support.
Creating a new issue, splitting it off from https://github.com/Pylons/pyramid/issues/2368.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21 is_pypy = '__pypy__' in sys.builtin_module_names
22
23 PY3 = py_version[0] == 3
24
25 if PY3:
26 if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...
27 raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')
28 else:
29 if py_version < (2, 6):
30 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
31
32 here = os.path.abspath(os.path.dirname(__file__))
33 try:
34 with open(os.path.join(here, 'README.rst')) as f:
35 README = f.read()
36 with open(os.path.join(here, 'CHANGES.txt')) as f:
37 CHANGES = f.read()
38 except IOError:
39 README = CHANGES = ''
40
41 install_requires = [
42 'setuptools',
43 'WebOb >= 1.3.1', # request.domain and CookieProfile
44 'repoze.lru >= 0.4', # py3 compat
45 'zope.interface >= 3.8.0', # has zope.interface.registry
46 'zope.deprecation >= 3.5.0', # py3 compat
47 'venusian >= 1.0a3', # ``ignore``
48 'translationstring >= 0.4', # py3 compat
49 'PasteDeploy >= 1.5.0', # py3 compat
50 ]
51
52 tests_require = [
53 'WebTest >= 1.3.1', # py3 compat
54 ]
55
56 if not PY3:
57 tests_require.append('zope.component>=3.11.0')
58
59 docs_extras = [
60 'Sphinx >= 1.3.5',
61 'docutils',
62 'repoze.sphinx.autointerface',
63 'pylons_sphinx_latesturl',
64 'pylons-sphinx-themes',
65 'sphinxcontrib-programoutput',
66 ]
67
68 testing_extras = tests_require + [
69 'nose',
70 'coverage',
71 'virtualenv', # for scaffolding tests
72 ]
73
74 setup(name='pyramid',
75 version='1.8.dev0',
76 description='The Pyramid Web Framework, a Pylons project',
77 long_description=README + '\n\n' + CHANGES,
78 classifiers=[
79 "Development Status :: 6 - Mature",
80 "Intended Audience :: Developers",
81 "Programming Language :: Python",
82 "Programming Language :: Python :: 2.7",
83 "Programming Language :: Python :: 3",
84 "Programming Language :: Python :: 3.3",
85 "Programming Language :: Python :: 3.4",
86 "Programming Language :: Python :: 3.5",
87 "Programming Language :: Python :: Implementation :: CPython",
88 "Programming Language :: Python :: Implementation :: PyPy",
89 "Framework :: Pyramid",
90 "Topic :: Internet :: WWW/HTTP",
91 "Topic :: Internet :: WWW/HTTP :: WSGI",
92 "License :: Repoze Public License",
93 ],
94 keywords='web wsgi pylons pyramid',
95 author="Chris McDonough, Agendaless Consulting",
96 author_email="[email protected]",
97 url="https://trypyramid.com",
98 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
99 packages=find_packages(),
100 include_package_data=True,
101 zip_safe=False,
102 install_requires=install_requires,
103 extras_require={
104 'testing': testing_extras,
105 'docs': docs_extras,
106 },
107 tests_require=tests_require,
108 test_suite="pyramid.tests",
109 entry_points="""\
110 [pyramid.scaffold]
111 starter=pyramid.scaffolds:StarterProjectTemplate
112 zodb=pyramid.scaffolds:ZODBProjectTemplate
113 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
114 [pyramid.pshell_runner]
115 python=pyramid.scripts.pshell:python_shell_runner
116 [console_scripts]
117 pcreate = pyramid.scripts.pcreate:main
118 pserve = pyramid.scripts.pserve:main
119 pshell = pyramid.scripts.pshell:main
120 proutes = pyramid.scripts.proutes:main
121 pviews = pyramid.scripts.pviews:main
122 ptweens = pyramid.scripts.ptweens:main
123 prequest = pyramid.scripts.prequest:main
124 pdistreport = pyramid.scripts.pdistreport:main
125 [paste.server_runner]
126 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
127 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
128 """
129 )
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,16 +18,15 @@
from setuptools import setup, find_packages
py_version = sys.version_info[:2]
-is_pypy = '__pypy__' in sys.builtin_module_names
PY3 = py_version[0] == 3
if PY3:
- if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...
- raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')
+ if py_version < (3, 4):
+ raise RuntimeError('On Python 3, Pyramid requires Python 3.4 or better')
else:
- if py_version < (2, 6):
- raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
+ if py_version < (2, 7):
+ raise RuntimeError('On Python 2, Pyramid requires Python 2.7 or better')
here = os.path.abspath(os.path.dirname(__file__))
try:
@@ -81,7 +80,6 @@
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,16 +18,15 @@\n from setuptools import setup, find_packages\n \n py_version = sys.version_info[:2]\n-is_pypy = '__pypy__' in sys.builtin_module_names\n \n PY3 = py_version[0] == 3\n \n if PY3:\n- if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...\n- raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')\n+ if py_version < (3, 4):\n+ raise RuntimeError('On Python 3, Pyramid requires Python 3.4 or better')\n else:\n- if py_version < (2, 6):\n- raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n+ if py_version < (2, 7):\n+ raise RuntimeError('On Python 2, Pyramid requires Python 2.7 or better')\n \n here = os.path.abspath(os.path.dirname(__file__))\n try:\n@@ -81,7 +80,6 @@\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n", "issue": "Drop Python 3.3 support\nThis is a placeholder for Pyramid 1.8 to drop Python 3.3 support.\n\nCreating a new issue, splitting it off from https://github.com/Pylons/pyramid/issues/2368.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\nis_pypy = '__pypy__' in sys.builtin_module_names\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...\n raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.5',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.8.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n", "path": "setup.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 4):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.4 or better')\nelse:\n if py_version < (2, 7):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.7 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.5',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.8.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n", "path": "setup.py"}]}
| 1,747 | 337 |
gh_patches_debug_19166
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add EfficientDet AdvProp-AA
## 🚀 Feature
Add EfficientDet AdvProp-AA pretrained backbones for D0-D5
See https://github.com/google/automl/blob/master/efficientdet/Det-AdvProp.md
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/ross/efficientdet/backbones.py`
Content:
```
1 __all__ = [
2 "tf_lite0",
3 "tf_lite1",
4 "tf_lite2",
5 "tf_lite3",
6 "tf_d0",
7 "tf_d1",
8 "tf_d2",
9 "tf_d3",
10 "tf_d4",
11 "tf_d5",
12 "tf_d6",
13 "tf_d7",
14 "tf_d7x",
15 "d0",
16 "d1",
17 "d2",
18 "d3",
19 "d4",
20 "d5",
21 "d6",
22 "d7",
23 "d7x",
24 ]
25
26 from icevision.models.ross.efficientdet.utils import *
27
28
29 tf_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
30 tf_lite1 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite1")
31 tf_lite2 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite2")
32 tf_lite3 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite3")
33
34 tf_d0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d0")
35 tf_d1 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d1")
36 tf_d2 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d2")
37 tf_d3 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d3")
38 tf_d4 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d4")
39 tf_d5 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d5")
40 tf_d6 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d6")
41 tf_d7 = EfficientDetBackboneConfig(model_name="tf_efficientdet_d7")
42 tf_d7x = EfficientDetBackboneConfig(model_name="tf_efficientdet_d7x")
43
44 d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
45 d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
46 d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
47 d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
48 d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
49 d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
50 d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
51 d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
52 d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py
--- a/icevision/models/ross/efficientdet/backbones.py
+++ b/icevision/models/ross/efficientdet/backbones.py
@@ -21,6 +21,12 @@
"d6",
"d7",
"d7x",
+ "tf_d0_ap",
+ "tf_d1_ap",
+ "tf_d2_ap",
+ "tf_d3_ap",
+ "tf_d4_ap",
+ "tf_d5_ap",
]
from icevision.models.ross.efficientdet.utils import *
@@ -50,3 +56,10 @@
d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
+
+tf_d0_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d0_ap")
+tf_d1_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d1_ap")
+tf_d2_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d2_ap")
+tf_d3_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d3_ap")
+tf_d4_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d4_ap")
+tf_d5_ap = EfficientDetBackboneConfig(model_name="tf_efficientdet_d5_ap")
|
{"golden_diff": "diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py\n--- a/icevision/models/ross/efficientdet/backbones.py\n+++ b/icevision/models/ross/efficientdet/backbones.py\n@@ -21,6 +21,12 @@\n \"d6\",\n \"d7\",\n \"d7x\",\n+ \"tf_d0_ap\",\n+ \"tf_d1_ap\",\n+ \"tf_d2_ap\",\n+ \"tf_d3_ap\",\n+ \"tf_d4_ap\",\n+ \"tf_d5_ap\",\n ]\n \n from icevision.models.ross.efficientdet.utils import *\n@@ -50,3 +56,10 @@\n d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n+\n+tf_d0_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d0_ap\")\n+tf_d1_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d1_ap\")\n+tf_d2_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d2_ap\")\n+tf_d3_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d3_ap\")\n+tf_d4_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d4_ap\")\n+tf_d5_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d5_ap\")\n", "issue": "Add EfficientDet AdvProp-AA\n## \ud83d\ude80 Feature\r\nAdd EfficientDet AdvProp-AA pretrained backbones for D0-D5\r\n\r\nSee https://github.com/google/automl/blob/master/efficientdet/Det-AdvProp.md\n", "before_files": [{"content": "__all__ = [\n \"tf_lite0\",\n \"tf_lite1\",\n \"tf_lite2\",\n \"tf_lite3\",\n \"tf_d0\",\n \"tf_d1\",\n \"tf_d2\",\n \"tf_d3\",\n \"tf_d4\",\n \"tf_d5\",\n \"tf_d6\",\n \"tf_d7\",\n \"tf_d7x\",\n \"d0\",\n \"d1\",\n \"d2\",\n \"d3\",\n \"d4\",\n \"d5\",\n \"d6\",\n \"d7\",\n \"d7x\",\n]\n\nfrom icevision.models.ross.efficientdet.utils import *\n\n\ntf_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\ntf_lite1 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite1\")\ntf_lite2 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite2\")\ntf_lite3 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite3\")\n\ntf_d0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d0\")\ntf_d1 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d1\")\ntf_d2 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d2\")\ntf_d3 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d3\")\ntf_d4 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d4\")\ntf_d5 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d5\")\ntf_d6 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d6\")\ntf_d7 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d7\")\ntf_d7x = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d7x\")\n\nd0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\nd1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\nd2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\nd3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\nd4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\nd5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\nd6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\nd7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\nd7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n", "path": "icevision/models/ross/efficientdet/backbones.py"}], "after_files": [{"content": "__all__ = [\n \"tf_lite0\",\n \"tf_lite1\",\n \"tf_lite2\",\n \"tf_lite3\",\n \"tf_d0\",\n \"tf_d1\",\n \"tf_d2\",\n \"tf_d3\",\n \"tf_d4\",\n \"tf_d5\",\n \"tf_d6\",\n \"tf_d7\",\n \"tf_d7x\",\n \"d0\",\n \"d1\",\n \"d2\",\n \"d3\",\n \"d4\",\n \"d5\",\n \"d6\",\n \"d7\",\n \"d7x\",\n \"tf_d0_ap\",\n \"tf_d1_ap\",\n \"tf_d2_ap\",\n \"tf_d3_ap\",\n \"tf_d4_ap\",\n \"tf_d5_ap\",\n]\n\nfrom icevision.models.ross.efficientdet.utils import *\n\n\ntf_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\ntf_lite1 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite1\")\ntf_lite2 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite2\")\ntf_lite3 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite3\")\n\ntf_d0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d0\")\ntf_d1 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d1\")\ntf_d2 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d2\")\ntf_d3 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d3\")\ntf_d4 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d4\")\ntf_d5 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d5\")\ntf_d6 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d6\")\ntf_d7 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d7\")\ntf_d7x = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d7x\")\n\nd0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\nd1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\nd2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\nd3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\nd4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\nd5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\nd6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\nd7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\nd7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n\ntf_d0_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d0_ap\")\ntf_d1_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d1_ap\")\ntf_d2_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d2_ap\")\ntf_d3_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d3_ap\")\ntf_d4_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d4_ap\")\ntf_d5_ap = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_d5_ap\")\n", "path": "icevision/models/ross/efficientdet/backbones.py"}]}
| 956 | 348 |
gh_patches_debug_31743
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-9009
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
wagtail 4.0rc1 migration ValueError: Could not find object DocumentChooserBlock
### Issue Summary
```
Operations to perform:
Apply all migrations: admin, auth, contenttypes, core, custom_documents, custom_images, custom_users, home, ...,
No migrations to apply.
Traceback (most recent call last):
File "django/./manage.py", line 12, in <module>
execute_from_command_line(sys.argv)
File ".venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File ".venv/lib/python3.10/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File ".venv/lib/python3.10/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File ".venv/lib/python3.10/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File ".venv/lib/python3.10/site-packages/django/core/management/base.py", line 89, in wrapped
res = handle_func(*args, **kwargs)
File ".venv/lib/python3.10/site-packages/django/core/management/commands/migrate.py", line 227, in handle
changes = autodetector.changes(graph=executor.loader.graph)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 41, in changes
changes = self._detect_changes(convert_apps, graph)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 184, in _detect_changes
self.generate_altered_fields()
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 964, in generate_altered_fields
old_field_dec = self.deep_deconstruct(old_field)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 78, in deep_deconstruct
[self.deep_deconstruct(value) for value in args],
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 78, in <listcomp>
[self.deep_deconstruct(value) for value in args],
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 54, in deep_deconstruct
return [self.deep_deconstruct(value) for value in obj]
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 54, in <listcomp>
return [self.deep_deconstruct(value) for value in obj]
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 56, in deep_deconstruct
return tuple(self.deep_deconstruct(value) for value in obj)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 56, in <genexpr>
return tuple(self.deep_deconstruct(value) for value in obj)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 78, in deep_deconstruct
[self.deep_deconstruct(value) for value in args],
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 78, in <listcomp>
[self.deep_deconstruct(value) for value in args],
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 54, in deep_deconstruct
return [self.deep_deconstruct(value) for value in obj]
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 54, in <listcomp>
return [self.deep_deconstruct(value) for value in obj]
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 56, in deep_deconstruct
return tuple(self.deep_deconstruct(value) for value in obj)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 56, in <genexpr>
return tuple(self.deep_deconstruct(value) for value in obj)
File ".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py", line 71, in deep_deconstruct
deconstructed = obj.deconstruct()
File ".venv/lib/python3.10/site-packages/wagtail/blocks/base.py", line 344, in deconstruct
raise ValueError(
ValueError: Could not find object CustomDocumentChooserBlock in wagtail.blocks.base.
Please note that you cannot serialize things like inner classes. Please move the object into the main module body to use migrations.
```
I'm using a custom document model (`WAGTAILDOCS_DOCUMENT_MODEL = "custom_documents.CustomDocument"`) so I removed `WAGTAILDOCS_DOCUMENT_MODEL` setting it see if the error persisted and when running `./manage.py migrate` the ValueError changes to:
```
ValueError: Could not find object DocumentChooserBlock in wagtail.blocks.base.
```
`CustomDocumentChooserBlock` -> `DocumentChooserBlock`
### Steps to Reproduce
1. run `./manage.py migrate` or `./manage.py makemigrations`
- I couldn't reproduce this on a fresh install of wagtail 4.0rc1 however I have been unable to find a cause for this error in my project so far.
### Technical details
- Python version: 3.10.5
- Django version: 3.2.15
- Wagtail version: 4.0rc1
-
Ensure DocumentChooserBlock can be deconstructed for migrations
Fixes #8989. Now that DocumentChooserBlock is constructed dynamically via wagtail.documents.viewsets.chooser, we need to explicitly set its `__module__` attribute so that the result of calling `deconstruct()` for migrations points back to the wagtail.documents.blocks module.
Also update the documentation for defining custom choosers, and add tests for deconstructing the other chooser blocks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/documents/blocks.py`
Content:
```
1 from wagtail.documents.views.chooser import viewset as chooser_viewset
2
3 DocumentChooserBlock = chooser_viewset.block_class
4
5 # When deconstructing a DocumentChooserBlock instance for migrations, the module path
6 # used in migrations should point to this module
7 DocumentChooserBlock.__module__ = "wagtail.documents.blocks"
8
```
Path: `wagtail/admin/viewsets/chooser.py`
Content:
```
1 from django.db.models import ForeignKey
2 from django.urls import path
3 from django.utils.functional import cached_property
4 from django.utils.translation import gettext as _
5
6 from wagtail.admin.forms.models import register_form_field_override
7 from wagtail.admin.views.generic import chooser as chooser_views
8 from wagtail.admin.widgets.chooser import BaseChooser
9 from wagtail.blocks import ChooserBlock
10
11 from .base import ViewSet
12
13
14 class ChooserViewSet(ViewSet):
15 """
16 A viewset that creates a chooser modal interface for choosing model instances.
17 """
18
19 model = None
20
21 icon = "snippet" #: The icon to use in the header of the chooser modal, and on the chooser widget
22 choose_one_text = _(
23 "Choose"
24 ) #: Label for the 'choose' button in the chooser widget when choosing an initial item
25 page_title = None #: Title text for the chooser modal (defaults to the same as ``choose_one_text``)`
26 choose_another_text = _(
27 "Change"
28 ) #: Label for the 'choose' button in the chooser widget, when an item has already been chosen
29 edit_item_text = _("Edit") #: Label for the 'edit' button in the chooser widget
30
31 per_page = 10 #: Number of results to show per page
32
33 #: The view class to use for the overall chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseView``.
34 choose_view_class = chooser_views.ChooseView
35
36 #: The view class used to render just the results panel within the chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseResultsView``.
37 choose_results_view_class = chooser_views.ChooseResultsView
38
39 #: The view class used after an item has been chosen; must be a subclass of ``wagtail.admin.views.generic.chooser.ChosenView``.
40 chosen_view_class = chooser_views.ChosenView
41
42 #: The view class used to handle submissions of the 'create' form; must be a subclass of ``wagtail.admin.views.generic.chooser.CreateView``.
43 create_view_class = chooser_views.CreateView
44
45 #: The base Widget class that the chooser widget will be derived from.
46 base_widget_class = BaseChooser
47
48 #: The base ChooserBlock class that the StreamField chooser block will be derived from.
49 base_block_class = ChooserBlock
50
51 #: Defaults to True; if False, the chooser widget will not automatically be registered for use in admin forms.
52 register_widget = True
53
54 #: Form class to use for the form in the "Create" tab of the modal.
55 creation_form_class = None
56
57 #: List of model fields that should be included in the creation form, if creation_form_class is not specified.
58 form_fields = None
59
60 #: List of model fields that should be excluded from the creation form, if creation_form_class.
61 #: If none of ``creation_form_class``, ``form_fields`` or ``exclude_form_fields`` are specified, the "Create" tab will be omitted.
62 exclude_form_fields = None
63
64 search_tab_label = _("Search") #: Label for the 'search' tab in the chooser modal
65 create_action_label = _(
66 "Create"
67 ) #: Label for the submit button on the 'create' form
68 create_action_clicked_label = None #: Alternative text to display on the submit button after it has been clicked
69 creation_tab_label = None #: Label for the 'create' tab in the chooser modal (defaults to the same as create_action_label)
70
71 permission_policy = None
72
73 def __init__(self, *args, **kwargs):
74 super().__init__(*args, **kwargs)
75 if self.page_title is None:
76 self.page_title = self.choose_one_text
77
78 @property
79 def choose_view(self):
80 return self.choose_view_class.as_view(
81 model=self.model,
82 chosen_url_name=self.get_url_name("chosen"),
83 results_url_name=self.get_url_name("choose_results"),
84 create_url_name=self.get_url_name("create"),
85 icon=self.icon,
86 page_title=self.page_title,
87 per_page=self.per_page,
88 creation_form_class=self.creation_form_class,
89 form_fields=self.form_fields,
90 exclude_form_fields=self.exclude_form_fields,
91 search_tab_label=self.search_tab_label,
92 creation_tab_label=self.creation_tab_label,
93 create_action_label=self.create_action_label,
94 create_action_clicked_label=self.create_action_clicked_label,
95 permission_policy=self.permission_policy,
96 )
97
98 @property
99 def choose_results_view(self):
100 return self.choose_results_view_class.as_view(
101 model=self.model,
102 chosen_url_name=self.get_url_name("chosen"),
103 results_url_name=self.get_url_name("choose_results"),
104 per_page=self.per_page,
105 creation_form_class=self.creation_form_class,
106 form_fields=self.form_fields,
107 exclude_form_fields=self.exclude_form_fields,
108 create_action_label=self.create_action_label,
109 create_action_clicked_label=self.create_action_clicked_label,
110 permission_policy=self.permission_policy,
111 )
112
113 @property
114 def chosen_view(self):
115 return self.chosen_view_class.as_view(
116 model=self.model,
117 )
118
119 @property
120 def create_view(self):
121 return self.create_view_class.as_view(
122 model=self.model,
123 create_url_name=self.get_url_name("create"),
124 creation_form_class=self.creation_form_class,
125 form_fields=self.form_fields,
126 exclude_form_fields=self.exclude_form_fields,
127 create_action_label=self.create_action_label,
128 create_action_clicked_label=self.create_action_clicked_label,
129 permission_policy=self.permission_policy,
130 )
131
132 @cached_property
133 def model_name(self):
134 if isinstance(self.model, str):
135 return self.model.split(".")[-1]
136 else:
137 return self.model.__name__
138
139 @cached_property
140 def widget_class(self):
141 """
142 Returns the form widget class for this chooser.
143 """
144 if self.model is None:
145 widget_class_name = "ChooserWidget"
146 else:
147 if isinstance(self.model, str):
148 model_name = self.model.split(".")[-1]
149 else:
150 model_name = self.model.__name__
151 widget_class_name = "%sChooserWidget" % model_name
152
153 return type(
154 widget_class_name,
155 (self.base_widget_class,),
156 {
157 "model": self.model,
158 "choose_one_text": self.choose_one_text,
159 "choose_another_text": self.choose_another_text,
160 "link_to_chosen_text": self.edit_item_text,
161 "chooser_modal_url_name": self.get_url_name("choose"),
162 "icon": self.icon,
163 },
164 )
165
166 @cached_property
167 def block_class(self):
168 """
169 Returns a StreamField ChooserBlock class using this chooser.
170 """
171 meta = type(
172 "Meta",
173 (self.base_block_class._meta_class,),
174 {
175 "icon": self.icon,
176 },
177 )
178 return type(
179 "%sChooserBlock" % self.model_name,
180 (self.base_block_class,),
181 {
182 "target_model": self.model,
183 "widget": self.widget_class(),
184 "Meta": meta,
185 },
186 )
187
188 def get_urlpatterns(self):
189 return super().get_urlpatterns() + [
190 path("", self.choose_view, name="choose"),
191 path("results/", self.choose_results_view, name="choose_results"),
192 path("chosen/<str:pk>/", self.chosen_view, name="chosen"),
193 path("create/", self.create_view, name="create"),
194 ]
195
196 def on_register(self):
197 if self.model and self.register_widget:
198 register_form_field_override(
199 ForeignKey, to=self.model, override={"widget": self.widget_class}
200 )
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/viewsets/chooser.py b/wagtail/admin/viewsets/chooser.py
--- a/wagtail/admin/viewsets/chooser.py
+++ b/wagtail/admin/viewsets/chooser.py
@@ -163,10 +163,13 @@
},
)
- @cached_property
- def block_class(self):
+ def get_block_class(self, name=None, module_path=None):
"""
Returns a StreamField ChooserBlock class using this chooser.
+
+ :param name: Name to give to the class; defaults to the model name with "ChooserBlock" appended
+ :param module_path: The dotted path of the module where the class can be imported from; used when
+ deconstructing the block definition for migration files.
"""
meta = type(
"Meta",
@@ -175,8 +178,8 @@
"icon": self.icon,
},
)
- return type(
- "%sChooserBlock" % self.model_name,
+ cls = type(
+ name or "%sChooserBlock" % self.model_name,
(self.base_block_class,),
{
"target_model": self.model,
@@ -184,6 +187,9 @@
"Meta": meta,
},
)
+ if module_path:
+ cls.__module__ = module_path
+ return cls
def get_urlpatterns(self):
return super().get_urlpatterns() + [
diff --git a/wagtail/documents/blocks.py b/wagtail/documents/blocks.py
--- a/wagtail/documents/blocks.py
+++ b/wagtail/documents/blocks.py
@@ -1,7 +1,5 @@
from wagtail.documents.views.chooser import viewset as chooser_viewset
-DocumentChooserBlock = chooser_viewset.block_class
-
-# When deconstructing a DocumentChooserBlock instance for migrations, the module path
-# used in migrations should point to this module
-DocumentChooserBlock.__module__ = "wagtail.documents.blocks"
+DocumentChooserBlock = chooser_viewset.get_block_class(
+ name="DocumentChooserBlock", module_path="wagtail.documents.blocks"
+)
|
{"golden_diff": "diff --git a/wagtail/admin/viewsets/chooser.py b/wagtail/admin/viewsets/chooser.py\n--- a/wagtail/admin/viewsets/chooser.py\n+++ b/wagtail/admin/viewsets/chooser.py\n@@ -163,10 +163,13 @@\n },\n )\n \n- @cached_property\n- def block_class(self):\n+ def get_block_class(self, name=None, module_path=None):\n \"\"\"\n Returns a StreamField ChooserBlock class using this chooser.\n+\n+ :param name: Name to give to the class; defaults to the model name with \"ChooserBlock\" appended\n+ :param module_path: The dotted path of the module where the class can be imported from; used when\n+ deconstructing the block definition for migration files.\n \"\"\"\n meta = type(\n \"Meta\",\n@@ -175,8 +178,8 @@\n \"icon\": self.icon,\n },\n )\n- return type(\n- \"%sChooserBlock\" % self.model_name,\n+ cls = type(\n+ name or \"%sChooserBlock\" % self.model_name,\n (self.base_block_class,),\n {\n \"target_model\": self.model,\n@@ -184,6 +187,9 @@\n \"Meta\": meta,\n },\n )\n+ if module_path:\n+ cls.__module__ = module_path\n+ return cls\n \n def get_urlpatterns(self):\n return super().get_urlpatterns() + [\ndiff --git a/wagtail/documents/blocks.py b/wagtail/documents/blocks.py\n--- a/wagtail/documents/blocks.py\n+++ b/wagtail/documents/blocks.py\n@@ -1,7 +1,5 @@\n from wagtail.documents.views.chooser import viewset as chooser_viewset\n \n-DocumentChooserBlock = chooser_viewset.block_class\n-\n-# When deconstructing a DocumentChooserBlock instance for migrations, the module path\n-# used in migrations should point to this module\n-DocumentChooserBlock.__module__ = \"wagtail.documents.blocks\"\n+DocumentChooserBlock = chooser_viewset.get_block_class(\n+ name=\"DocumentChooserBlock\", module_path=\"wagtail.documents.blocks\"\n+)\n", "issue": "wagtail 4.0rc1 migration ValueError: Could not find object DocumentChooserBlock\n### Issue Summary\r\n\r\n```\r\nOperations to perform:\r\n Apply all migrations: admin, auth, contenttypes, core, custom_documents, custom_images, custom_users, home, ...,\r\n No migrations to apply.\r\nTraceback (most recent call last):\r\n File \"django/./manage.py\", line 12, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \".venv/lib/python3.10/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \".venv/lib/python3.10/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \".venv/lib/python3.10/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \".venv/lib/python3.10/site-packages/django/core/management/base.py\", line 398, in execute\r\n output = self.handle(*args, **options)\r\n File \".venv/lib/python3.10/site-packages/django/core/management/base.py\", line 89, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \".venv/lib/python3.10/site-packages/django/core/management/commands/migrate.py\", line 227, in handle\r\n changes = autodetector.changes(graph=executor.loader.graph)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 41, in changes\r\n changes = self._detect_changes(convert_apps, graph)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 184, in _detect_changes\r\n self.generate_altered_fields()\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 964, in generate_altered_fields\r\n old_field_dec = self.deep_deconstruct(old_field)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 78, in deep_deconstruct\r\n [self.deep_deconstruct(value) for value in args],\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 78, in <listcomp>\r\n [self.deep_deconstruct(value) for value in args],\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 54, in deep_deconstruct\r\n return [self.deep_deconstruct(value) for value in obj]\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 54, in <listcomp>\r\n return [self.deep_deconstruct(value) for value in obj]\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 56, in deep_deconstruct\r\n return tuple(self.deep_deconstruct(value) for value in obj)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 56, in <genexpr>\r\n return tuple(self.deep_deconstruct(value) for value in obj)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 78, in deep_deconstruct\r\n [self.deep_deconstruct(value) for value in args],\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 78, in <listcomp>\r\n [self.deep_deconstruct(value) for value in args],\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 54, in deep_deconstruct\r\n return [self.deep_deconstruct(value) for value in obj]\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 54, in <listcomp>\r\n return [self.deep_deconstruct(value) for value in obj]\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 56, in deep_deconstruct\r\n return tuple(self.deep_deconstruct(value) for value in obj)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 56, in <genexpr>\r\n return tuple(self.deep_deconstruct(value) for value in obj)\r\n File \".venv/lib/python3.10/site-packages/django/db/migrations/autodetector.py\", line 71, in deep_deconstruct\r\n deconstructed = obj.deconstruct()\r\n File \".venv/lib/python3.10/site-packages/wagtail/blocks/base.py\", line 344, in deconstruct\r\n raise ValueError(\r\nValueError: Could not find object CustomDocumentChooserBlock in wagtail.blocks.base.\r\nPlease note that you cannot serialize things like inner classes. Please move the object into the main module body to use migrations.\r\n```\r\n\r\nI'm using a custom document model (`WAGTAILDOCS_DOCUMENT_MODEL = \"custom_documents.CustomDocument\"`) so I removed `WAGTAILDOCS_DOCUMENT_MODEL` setting it see if the error persisted and when running `./manage.py migrate` the ValueError changes to:\r\n\r\n```\r\nValueError: Could not find object DocumentChooserBlock in wagtail.blocks.base.\r\n```\r\n\r\n`CustomDocumentChooserBlock` -> `DocumentChooserBlock` \r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. run `./manage.py migrate` or `./manage.py makemigrations`\r\n\r\n- I couldn't reproduce this on a fresh install of wagtail 4.0rc1 however I have been unable to find a cause for this error in my project so far.\r\n\r\n### Technical details\r\n\r\n- Python version: 3.10.5\r\n- Django version: 3.2.15\r\n- Wagtail version: 4.0rc1\r\n-\nEnsure DocumentChooserBlock can be deconstructed for migrations\nFixes #8989. Now that DocumentChooserBlock is constructed dynamically via wagtail.documents.viewsets.chooser, we need to explicitly set its `__module__` attribute so that the result of calling `deconstruct()` for migrations points back to the wagtail.documents.blocks module.\r\n\r\nAlso update the documentation for defining custom choosers, and add tests for deconstructing the other chooser blocks.\r\n\n", "before_files": [{"content": "from wagtail.documents.views.chooser import viewset as chooser_viewset\n\nDocumentChooserBlock = chooser_viewset.block_class\n\n# When deconstructing a DocumentChooserBlock instance for migrations, the module path\n# used in migrations should point to this module\nDocumentChooserBlock.__module__ = \"wagtail.documents.blocks\"\n", "path": "wagtail/documents/blocks.py"}, {"content": "from django.db.models import ForeignKey\nfrom django.urls import path\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\n\nfrom wagtail.admin.forms.models import register_form_field_override\nfrom wagtail.admin.views.generic import chooser as chooser_views\nfrom wagtail.admin.widgets.chooser import BaseChooser\nfrom wagtail.blocks import ChooserBlock\n\nfrom .base import ViewSet\n\n\nclass ChooserViewSet(ViewSet):\n \"\"\"\n A viewset that creates a chooser modal interface for choosing model instances.\n \"\"\"\n\n model = None\n\n icon = \"snippet\" #: The icon to use in the header of the chooser modal, and on the chooser widget\n choose_one_text = _(\n \"Choose\"\n ) #: Label for the 'choose' button in the chooser widget when choosing an initial item\n page_title = None #: Title text for the chooser modal (defaults to the same as ``choose_one_text``)`\n choose_another_text = _(\n \"Change\"\n ) #: Label for the 'choose' button in the chooser widget, when an item has already been chosen\n edit_item_text = _(\"Edit\") #: Label for the 'edit' button in the chooser widget\n\n per_page = 10 #: Number of results to show per page\n\n #: The view class to use for the overall chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseView``.\n choose_view_class = chooser_views.ChooseView\n\n #: The view class used to render just the results panel within the chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseResultsView``.\n choose_results_view_class = chooser_views.ChooseResultsView\n\n #: The view class used after an item has been chosen; must be a subclass of ``wagtail.admin.views.generic.chooser.ChosenView``.\n chosen_view_class = chooser_views.ChosenView\n\n #: The view class used to handle submissions of the 'create' form; must be a subclass of ``wagtail.admin.views.generic.chooser.CreateView``.\n create_view_class = chooser_views.CreateView\n\n #: The base Widget class that the chooser widget will be derived from.\n base_widget_class = BaseChooser\n\n #: The base ChooserBlock class that the StreamField chooser block will be derived from.\n base_block_class = ChooserBlock\n\n #: Defaults to True; if False, the chooser widget will not automatically be registered for use in admin forms.\n register_widget = True\n\n #: Form class to use for the form in the \"Create\" tab of the modal.\n creation_form_class = None\n\n #: List of model fields that should be included in the creation form, if creation_form_class is not specified.\n form_fields = None\n\n #: List of model fields that should be excluded from the creation form, if creation_form_class.\n #: If none of ``creation_form_class``, ``form_fields`` or ``exclude_form_fields`` are specified, the \"Create\" tab will be omitted.\n exclude_form_fields = None\n\n search_tab_label = _(\"Search\") #: Label for the 'search' tab in the chooser modal\n create_action_label = _(\n \"Create\"\n ) #: Label for the submit button on the 'create' form\n create_action_clicked_label = None #: Alternative text to display on the submit button after it has been clicked\n creation_tab_label = None #: Label for the 'create' tab in the chooser modal (defaults to the same as create_action_label)\n\n permission_policy = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.page_title is None:\n self.page_title = self.choose_one_text\n\n @property\n def choose_view(self):\n return self.choose_view_class.as_view(\n model=self.model,\n chosen_url_name=self.get_url_name(\"chosen\"),\n results_url_name=self.get_url_name(\"choose_results\"),\n create_url_name=self.get_url_name(\"create\"),\n icon=self.icon,\n page_title=self.page_title,\n per_page=self.per_page,\n creation_form_class=self.creation_form_class,\n form_fields=self.form_fields,\n exclude_form_fields=self.exclude_form_fields,\n search_tab_label=self.search_tab_label,\n creation_tab_label=self.creation_tab_label,\n create_action_label=self.create_action_label,\n create_action_clicked_label=self.create_action_clicked_label,\n permission_policy=self.permission_policy,\n )\n\n @property\n def choose_results_view(self):\n return self.choose_results_view_class.as_view(\n model=self.model,\n chosen_url_name=self.get_url_name(\"chosen\"),\n results_url_name=self.get_url_name(\"choose_results\"),\n per_page=self.per_page,\n creation_form_class=self.creation_form_class,\n form_fields=self.form_fields,\n exclude_form_fields=self.exclude_form_fields,\n create_action_label=self.create_action_label,\n create_action_clicked_label=self.create_action_clicked_label,\n permission_policy=self.permission_policy,\n )\n\n @property\n def chosen_view(self):\n return self.chosen_view_class.as_view(\n model=self.model,\n )\n\n @property\n def create_view(self):\n return self.create_view_class.as_view(\n model=self.model,\n create_url_name=self.get_url_name(\"create\"),\n creation_form_class=self.creation_form_class,\n form_fields=self.form_fields,\n exclude_form_fields=self.exclude_form_fields,\n create_action_label=self.create_action_label,\n create_action_clicked_label=self.create_action_clicked_label,\n permission_policy=self.permission_policy,\n )\n\n @cached_property\n def model_name(self):\n if isinstance(self.model, str):\n return self.model.split(\".\")[-1]\n else:\n return self.model.__name__\n\n @cached_property\n def widget_class(self):\n \"\"\"\n Returns the form widget class for this chooser.\n \"\"\"\n if self.model is None:\n widget_class_name = \"ChooserWidget\"\n else:\n if isinstance(self.model, str):\n model_name = self.model.split(\".\")[-1]\n else:\n model_name = self.model.__name__\n widget_class_name = \"%sChooserWidget\" % model_name\n\n return type(\n widget_class_name,\n (self.base_widget_class,),\n {\n \"model\": self.model,\n \"choose_one_text\": self.choose_one_text,\n \"choose_another_text\": self.choose_another_text,\n \"link_to_chosen_text\": self.edit_item_text,\n \"chooser_modal_url_name\": self.get_url_name(\"choose\"),\n \"icon\": self.icon,\n },\n )\n\n @cached_property\n def block_class(self):\n \"\"\"\n Returns a StreamField ChooserBlock class using this chooser.\n \"\"\"\n meta = type(\n \"Meta\",\n (self.base_block_class._meta_class,),\n {\n \"icon\": self.icon,\n },\n )\n return type(\n \"%sChooserBlock\" % self.model_name,\n (self.base_block_class,),\n {\n \"target_model\": self.model,\n \"widget\": self.widget_class(),\n \"Meta\": meta,\n },\n )\n\n def get_urlpatterns(self):\n return super().get_urlpatterns() + [\n path(\"\", self.choose_view, name=\"choose\"),\n path(\"results/\", self.choose_results_view, name=\"choose_results\"),\n path(\"chosen/<str:pk>/\", self.chosen_view, name=\"chosen\"),\n path(\"create/\", self.create_view, name=\"create\"),\n ]\n\n def on_register(self):\n if self.model and self.register_widget:\n register_form_field_override(\n ForeignKey, to=self.model, override={\"widget\": self.widget_class}\n )\n", "path": "wagtail/admin/viewsets/chooser.py"}], "after_files": [{"content": "from wagtail.documents.views.chooser import viewset as chooser_viewset\n\nDocumentChooserBlock = chooser_viewset.get_block_class(\n name=\"DocumentChooserBlock\", module_path=\"wagtail.documents.blocks\"\n)\n", "path": "wagtail/documents/blocks.py"}, {"content": "from django.db.models import ForeignKey\nfrom django.urls import path\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\n\nfrom wagtail.admin.forms.models import register_form_field_override\nfrom wagtail.admin.views.generic import chooser as chooser_views\nfrom wagtail.admin.widgets.chooser import BaseChooser\nfrom wagtail.blocks import ChooserBlock\n\nfrom .base import ViewSet\n\n\nclass ChooserViewSet(ViewSet):\n \"\"\"\n A viewset that creates a chooser modal interface for choosing model instances.\n \"\"\"\n\n model = None\n\n icon = \"snippet\" #: The icon to use in the header of the chooser modal, and on the chooser widget\n choose_one_text = _(\n \"Choose\"\n ) #: Label for the 'choose' button in the chooser widget when choosing an initial item\n page_title = None #: Title text for the chooser modal (defaults to the same as ``choose_one_text``)`\n choose_another_text = _(\n \"Change\"\n ) #: Label for the 'choose' button in the chooser widget, when an item has already been chosen\n edit_item_text = _(\"Edit\") #: Label for the 'edit' button in the chooser widget\n\n per_page = 10 #: Number of results to show per page\n\n #: The view class to use for the overall chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseView``.\n choose_view_class = chooser_views.ChooseView\n\n #: The view class used to render just the results panel within the chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseResultsView``.\n choose_results_view_class = chooser_views.ChooseResultsView\n\n #: The view class used after an item has been chosen; must be a subclass of ``wagtail.admin.views.generic.chooser.ChosenView``.\n chosen_view_class = chooser_views.ChosenView\n\n #: The view class used to handle submissions of the 'create' form; must be a subclass of ``wagtail.admin.views.generic.chooser.CreateView``.\n create_view_class = chooser_views.CreateView\n\n #: The base Widget class that the chooser widget will be derived from.\n base_widget_class = BaseChooser\n\n #: The base ChooserBlock class that the StreamField chooser block will be derived from.\n base_block_class = ChooserBlock\n\n #: Defaults to True; if False, the chooser widget will not automatically be registered for use in admin forms.\n register_widget = True\n\n #: Form class to use for the form in the \"Create\" tab of the modal.\n creation_form_class = None\n\n #: List of model fields that should be included in the creation form, if creation_form_class is not specified.\n form_fields = None\n\n #: List of model fields that should be excluded from the creation form, if creation_form_class.\n #: If none of ``creation_form_class``, ``form_fields`` or ``exclude_form_fields`` are specified, the \"Create\" tab will be omitted.\n exclude_form_fields = None\n\n search_tab_label = _(\"Search\") #: Label for the 'search' tab in the chooser modal\n create_action_label = _(\n \"Create\"\n ) #: Label for the submit button on the 'create' form\n create_action_clicked_label = None #: Alternative text to display on the submit button after it has been clicked\n creation_tab_label = None #: Label for the 'create' tab in the chooser modal (defaults to the same as create_action_label)\n\n permission_policy = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.page_title is None:\n self.page_title = self.choose_one_text\n\n @property\n def choose_view(self):\n return self.choose_view_class.as_view(\n model=self.model,\n chosen_url_name=self.get_url_name(\"chosen\"),\n results_url_name=self.get_url_name(\"choose_results\"),\n create_url_name=self.get_url_name(\"create\"),\n icon=self.icon,\n page_title=self.page_title,\n per_page=self.per_page,\n creation_form_class=self.creation_form_class,\n form_fields=self.form_fields,\n exclude_form_fields=self.exclude_form_fields,\n search_tab_label=self.search_tab_label,\n creation_tab_label=self.creation_tab_label,\n create_action_label=self.create_action_label,\n create_action_clicked_label=self.create_action_clicked_label,\n permission_policy=self.permission_policy,\n )\n\n @property\n def choose_results_view(self):\n return self.choose_results_view_class.as_view(\n model=self.model,\n chosen_url_name=self.get_url_name(\"chosen\"),\n results_url_name=self.get_url_name(\"choose_results\"),\n per_page=self.per_page,\n creation_form_class=self.creation_form_class,\n form_fields=self.form_fields,\n exclude_form_fields=self.exclude_form_fields,\n create_action_label=self.create_action_label,\n create_action_clicked_label=self.create_action_clicked_label,\n permission_policy=self.permission_policy,\n )\n\n @property\n def chosen_view(self):\n return self.chosen_view_class.as_view(\n model=self.model,\n )\n\n @property\n def create_view(self):\n return self.create_view_class.as_view(\n model=self.model,\n create_url_name=self.get_url_name(\"create\"),\n creation_form_class=self.creation_form_class,\n form_fields=self.form_fields,\n exclude_form_fields=self.exclude_form_fields,\n create_action_label=self.create_action_label,\n create_action_clicked_label=self.create_action_clicked_label,\n permission_policy=self.permission_policy,\n )\n\n @cached_property\n def model_name(self):\n if isinstance(self.model, str):\n return self.model.split(\".\")[-1]\n else:\n return self.model.__name__\n\n @cached_property\n def widget_class(self):\n \"\"\"\n Returns the form widget class for this chooser.\n \"\"\"\n if self.model is None:\n widget_class_name = \"ChooserWidget\"\n else:\n if isinstance(self.model, str):\n model_name = self.model.split(\".\")[-1]\n else:\n model_name = self.model.__name__\n widget_class_name = \"%sChooserWidget\" % model_name\n\n return type(\n widget_class_name,\n (self.base_widget_class,),\n {\n \"model\": self.model,\n \"choose_one_text\": self.choose_one_text,\n \"choose_another_text\": self.choose_another_text,\n \"link_to_chosen_text\": self.edit_item_text,\n \"chooser_modal_url_name\": self.get_url_name(\"choose\"),\n \"icon\": self.icon,\n },\n )\n\n def get_block_class(self, name=None, module_path=None):\n \"\"\"\n Returns a StreamField ChooserBlock class using this chooser.\n\n :param name: Name to give to the class; defaults to the model name with \"ChooserBlock\" appended\n :param module_path: The dotted path of the module where the class can be imported from; used when\n deconstructing the block definition for migration files.\n \"\"\"\n meta = type(\n \"Meta\",\n (self.base_block_class._meta_class,),\n {\n \"icon\": self.icon,\n },\n )\n cls = type(\n name or \"%sChooserBlock\" % self.model_name,\n (self.base_block_class,),\n {\n \"target_model\": self.model,\n \"widget\": self.widget_class(),\n \"Meta\": meta,\n },\n )\n if module_path:\n cls.__module__ = module_path\n return cls\n\n def get_urlpatterns(self):\n return super().get_urlpatterns() + [\n path(\"\", self.choose_view, name=\"choose\"),\n path(\"results/\", self.choose_results_view, name=\"choose_results\"),\n path(\"chosen/<str:pk>/\", self.chosen_view, name=\"chosen\"),\n path(\"create/\", self.create_view, name=\"create\"),\n ]\n\n def on_register(self):\n if self.model and self.register_widget:\n register_form_field_override(\n ForeignKey, to=self.model, override={\"widget\": self.widget_class}\n )\n", "path": "wagtail/admin/viewsets/chooser.py"}]}
| 3,972 | 478 |
gh_patches_debug_5896
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-7295
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Address PEP 706 - Filter for tarfile.extractall
Given proposal improves security of tarfile extraction to help avoid CVE-2007-4559.
- In Python 3.12-3.13, a DeprecationWarning is emitted and extraction uses `fully_trusted` filter.
- In Python 3.14+, it will use the `data` filter.
It seems given proposal was backported also to older version of Python.
Reference: https://peps.python.org/pep-0706/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/process/remotetransfer.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 """
17 module for regrouping all FileWriterImpl and FileReaderImpl away from steps
18 """
19
20 import os
21 import tarfile
22 import tempfile
23 from io import BytesIO
24
25 from buildbot.util import bytes2unicode
26 from buildbot.util import unicode2bytes
27 from buildbot.worker.protocols import base
28
29
30 class FileWriter(base.FileWriterImpl):
31
32 """
33 Helper class that acts as a file-object with write access
34 """
35
36 def __init__(self, destfile, maxsize, mode):
37 # Create missing directories.
38 destfile = os.path.abspath(destfile)
39 dirname = os.path.dirname(destfile)
40 if not os.path.exists(dirname):
41 os.makedirs(dirname)
42
43 self.destfile = destfile
44 self.mode = mode
45 fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')
46 self.fp = os.fdopen(fd, 'wb')
47 self.remaining = maxsize
48
49 def remote_write(self, data):
50 """
51 Called from remote worker to write L{data} to L{fp} within boundaries
52 of L{maxsize}
53
54 @type data: C{string}
55 @param data: String of data to write
56 """
57 data = unicode2bytes(data)
58 if self.remaining is not None:
59 if len(data) > self.remaining:
60 data = data[:self.remaining]
61 self.fp.write(data)
62 self.remaining = self.remaining - len(data)
63 else:
64 self.fp.write(data)
65
66 def remote_utime(self, accessed_modified):
67 os.utime(self.destfile, accessed_modified)
68
69 def remote_close(self):
70 """
71 Called by remote worker to state that no more data will be transferred
72 """
73 self.fp.close()
74 self.fp = None
75 # on windows, os.rename does not automatically unlink, so do it
76 # manually
77 if os.path.exists(self.destfile):
78 os.unlink(self.destfile)
79 os.rename(self.tmpname, self.destfile)
80 self.tmpname = None
81 if self.mode is not None:
82 os.chmod(self.destfile, self.mode)
83
84 def cancel(self):
85 # unclean shutdown, the file is probably truncated, so delete it
86 # altogether rather than deliver a corrupted file
87 fp = getattr(self, "fp", None)
88 if fp:
89 fp.close()
90 if self.destfile and os.path.exists(self.destfile):
91 os.unlink(self.destfile)
92 if self.tmpname and os.path.exists(self.tmpname):
93 os.unlink(self.tmpname)
94
95
96 class DirectoryWriter(FileWriter):
97
98 """
99 A DirectoryWriter is implemented as a FileWriter, with an added post-processing
100 step to unpack the archive, once the transfer has completed.
101 """
102
103 def __init__(self, destroot, maxsize, compress, mode):
104 self.destroot = destroot
105 self.compress = compress
106
107 self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')
108 os.close(self.fd)
109
110 super().__init__(self.tarname, maxsize, mode)
111
112 def remote_unpack(self):
113 """
114 Called by remote worker to state that no more data will be transferred
115 """
116 # Make sure remote_close is called, otherwise atomic rename won't happen
117 self.remote_close()
118
119 # Map configured compression to a TarFile setting
120 if self.compress == 'bz2':
121 mode = 'r|bz2'
122 elif self.compress == 'gz':
123 mode = 'r|gz'
124 else:
125 mode = 'r'
126
127 # Unpack archive and clean up after self
128 with tarfile.open(name=self.tarname, mode=mode) as archive:
129 archive.extractall(path=self.destroot)
130 os.remove(self.tarname)
131
132
133 class FileReader(base.FileReaderImpl):
134
135 """
136 Helper class that acts as a file-object with read access
137 """
138
139 def __init__(self, fp):
140 self.fp = fp
141
142 def remote_read(self, maxlength):
143 """
144 Called from remote worker to read at most L{maxlength} bytes of data
145
146 @type maxlength: C{integer}
147 @param maxlength: Maximum number of data bytes that can be returned
148
149 @return: Data read from L{fp}
150 @rtype: C{string} of bytes read from file
151 """
152 if self.fp is None:
153 return ''
154
155 data = self.fp.read(maxlength)
156 return data
157
158 def remote_close(self):
159 """
160 Called by remote worker to state that no more data will be transferred
161 """
162 if self.fp is not None:
163 self.fp.close()
164 self.fp = None
165
166
167 class StringFileWriter(base.FileWriterImpl):
168
169 """
170 FileWriter class that just puts received data into a buffer.
171
172 Used to upload a file from worker for inline processing rather than
173 writing into a file on master.
174 """
175
176 def __init__(self):
177 self.buffer = ""
178
179 def remote_write(self, data):
180 self.buffer += bytes2unicode(data)
181
182 def remote_close(self):
183 pass
184
185
186 class StringFileReader(FileReader):
187
188 """
189 FileWriter class that just buid send data from a string.
190
191 Used to download a file to worker from local string rather than first
192 writing into a file on master.
193 """
194
195 def __init__(self, s):
196 s = unicode2bytes(s)
197 super().__init__(BytesIO(s))
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/master/buildbot/process/remotetransfer.py b/master/buildbot/process/remotetransfer.py
--- a/master/buildbot/process/remotetransfer.py
+++ b/master/buildbot/process/remotetransfer.py
@@ -126,7 +126,10 @@
# Unpack archive and clean up after self
with tarfile.open(name=self.tarname, mode=mode) as archive:
- archive.extractall(path=self.destroot)
+ if hasattr(tarfile, 'data_filter'):
+ archive.extractall(path=self.destroot, filter='data')
+ else:
+ archive.extractall(path=self.destroot)
os.remove(self.tarname)
|
{"golden_diff": "diff --git a/master/buildbot/process/remotetransfer.py b/master/buildbot/process/remotetransfer.py\n--- a/master/buildbot/process/remotetransfer.py\n+++ b/master/buildbot/process/remotetransfer.py\n@@ -126,7 +126,10 @@\n \n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n- archive.extractall(path=self.destroot)\n+ if hasattr(tarfile, 'data_filter'):\n+ archive.extractall(path=self.destroot, filter='data')\n+ else:\n+ archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n", "issue": "Address PEP 706 - Filter for tarfile.extractall\nGiven proposal improves security of tarfile extraction to help avoid CVE-2007-4559.\r\n\r\n- In Python 3.12-3.13, a DeprecationWarning is emitted and extraction uses `fully_trusted` filter.\r\n- In Python 3.14+, it will use the `data` filter.\r\n\r\nIt seems given proposal was backported also to older version of Python.\r\n\r\nReference: https://peps.python.org/pep-0706/\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nmodule for regrouping all FileWriterImpl and FileReaderImpl away from steps\n\"\"\"\n\nimport os\nimport tarfile\nimport tempfile\nfrom io import BytesIO\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import unicode2bytes\nfrom buildbot.worker.protocols import base\n\n\nclass FileWriter(base.FileWriterImpl):\n\n \"\"\"\n Helper class that acts as a file-object with write access\n \"\"\"\n\n def __init__(self, destfile, maxsize, mode):\n # Create missing directories.\n destfile = os.path.abspath(destfile)\n dirname = os.path.dirname(destfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.destfile = destfile\n self.mode = mode\n fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')\n self.fp = os.fdopen(fd, 'wb')\n self.remaining = maxsize\n\n def remote_write(self, data):\n \"\"\"\n Called from remote worker to write L{data} to L{fp} within boundaries\n of L{maxsize}\n\n @type data: C{string}\n @param data: String of data to write\n \"\"\"\n data = unicode2bytes(data)\n if self.remaining is not None:\n if len(data) > self.remaining:\n data = data[:self.remaining]\n self.fp.write(data)\n self.remaining = self.remaining - len(data)\n else:\n self.fp.write(data)\n\n def remote_utime(self, accessed_modified):\n os.utime(self.destfile, accessed_modified)\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n self.fp.close()\n self.fp = None\n # on windows, os.rename does not automatically unlink, so do it\n # manually\n if os.path.exists(self.destfile):\n os.unlink(self.destfile)\n os.rename(self.tmpname, self.destfile)\n self.tmpname = None\n if self.mode is not None:\n os.chmod(self.destfile, self.mode)\n\n def cancel(self):\n # unclean shutdown, the file is probably truncated, so delete it\n # altogether rather than deliver a corrupted file\n fp = getattr(self, \"fp\", None)\n if fp:\n fp.close()\n if self.destfile and os.path.exists(self.destfile):\n os.unlink(self.destfile)\n if self.tmpname and os.path.exists(self.tmpname):\n os.unlink(self.tmpname)\n\n\nclass DirectoryWriter(FileWriter):\n\n \"\"\"\n A DirectoryWriter is implemented as a FileWriter, with an added post-processing\n step to unpack the archive, once the transfer has completed.\n \"\"\"\n\n def __init__(self, destroot, maxsize, compress, mode):\n self.destroot = destroot\n self.compress = compress\n\n self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')\n os.close(self.fd)\n\n super().__init__(self.tarname, maxsize, mode)\n\n def remote_unpack(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n # Make sure remote_close is called, otherwise atomic rename won't happen\n self.remote_close()\n\n # Map configured compression to a TarFile setting\n if self.compress == 'bz2':\n mode = 'r|bz2'\n elif self.compress == 'gz':\n mode = 'r|gz'\n else:\n mode = 'r'\n\n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n\n\nclass FileReader(base.FileReaderImpl):\n\n \"\"\"\n Helper class that acts as a file-object with read access\n \"\"\"\n\n def __init__(self, fp):\n self.fp = fp\n\n def remote_read(self, maxlength):\n \"\"\"\n Called from remote worker to read at most L{maxlength} bytes of data\n\n @type maxlength: C{integer}\n @param maxlength: Maximum number of data bytes that can be returned\n\n @return: Data read from L{fp}\n @rtype: C{string} of bytes read from file\n \"\"\"\n if self.fp is None:\n return ''\n\n data = self.fp.read(maxlength)\n return data\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n if self.fp is not None:\n self.fp.close()\n self.fp = None\n\n\nclass StringFileWriter(base.FileWriterImpl):\n\n \"\"\"\n FileWriter class that just puts received data into a buffer.\n\n Used to upload a file from worker for inline processing rather than\n writing into a file on master.\n \"\"\"\n\n def __init__(self):\n self.buffer = \"\"\n\n def remote_write(self, data):\n self.buffer += bytes2unicode(data)\n\n def remote_close(self):\n pass\n\n\nclass StringFileReader(FileReader):\n\n \"\"\"\n FileWriter class that just buid send data from a string.\n\n Used to download a file to worker from local string rather than first\n writing into a file on master.\n \"\"\"\n\n def __init__(self, s):\n s = unicode2bytes(s)\n super().__init__(BytesIO(s))\n", "path": "master/buildbot/process/remotetransfer.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nmodule for regrouping all FileWriterImpl and FileReaderImpl away from steps\n\"\"\"\n\nimport os\nimport tarfile\nimport tempfile\nfrom io import BytesIO\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import unicode2bytes\nfrom buildbot.worker.protocols import base\n\n\nclass FileWriter(base.FileWriterImpl):\n\n \"\"\"\n Helper class that acts as a file-object with write access\n \"\"\"\n\n def __init__(self, destfile, maxsize, mode):\n # Create missing directories.\n destfile = os.path.abspath(destfile)\n dirname = os.path.dirname(destfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.destfile = destfile\n self.mode = mode\n fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')\n self.fp = os.fdopen(fd, 'wb')\n self.remaining = maxsize\n\n def remote_write(self, data):\n \"\"\"\n Called from remote worker to write L{data} to L{fp} within boundaries\n of L{maxsize}\n\n @type data: C{string}\n @param data: String of data to write\n \"\"\"\n data = unicode2bytes(data)\n if self.remaining is not None:\n if len(data) > self.remaining:\n data = data[:self.remaining]\n self.fp.write(data)\n self.remaining = self.remaining - len(data)\n else:\n self.fp.write(data)\n\n def remote_utime(self, accessed_modified):\n os.utime(self.destfile, accessed_modified)\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n self.fp.close()\n self.fp = None\n # on windows, os.rename does not automatically unlink, so do it\n # manually\n if os.path.exists(self.destfile):\n os.unlink(self.destfile)\n os.rename(self.tmpname, self.destfile)\n self.tmpname = None\n if self.mode is not None:\n os.chmod(self.destfile, self.mode)\n\n def cancel(self):\n # unclean shutdown, the file is probably truncated, so delete it\n # altogether rather than deliver a corrupted file\n fp = getattr(self, \"fp\", None)\n if fp:\n fp.close()\n if self.destfile and os.path.exists(self.destfile):\n os.unlink(self.destfile)\n if self.tmpname and os.path.exists(self.tmpname):\n os.unlink(self.tmpname)\n\n\nclass DirectoryWriter(FileWriter):\n\n \"\"\"\n A DirectoryWriter is implemented as a FileWriter, with an added post-processing\n step to unpack the archive, once the transfer has completed.\n \"\"\"\n\n def __init__(self, destroot, maxsize, compress, mode):\n self.destroot = destroot\n self.compress = compress\n\n self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')\n os.close(self.fd)\n\n super().__init__(self.tarname, maxsize, mode)\n\n def remote_unpack(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n # Make sure remote_close is called, otherwise atomic rename won't happen\n self.remote_close()\n\n # Map configured compression to a TarFile setting\n if self.compress == 'bz2':\n mode = 'r|bz2'\n elif self.compress == 'gz':\n mode = 'r|gz'\n else:\n mode = 'r'\n\n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n if hasattr(tarfile, 'data_filter'):\n archive.extractall(path=self.destroot, filter='data')\n else:\n archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n\n\nclass FileReader(base.FileReaderImpl):\n\n \"\"\"\n Helper class that acts as a file-object with read access\n \"\"\"\n\n def __init__(self, fp):\n self.fp = fp\n\n def remote_read(self, maxlength):\n \"\"\"\n Called from remote worker to read at most L{maxlength} bytes of data\n\n @type maxlength: C{integer}\n @param maxlength: Maximum number of data bytes that can be returned\n\n @return: Data read from L{fp}\n @rtype: C{string} of bytes read from file\n \"\"\"\n if self.fp is None:\n return ''\n\n data = self.fp.read(maxlength)\n return data\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n if self.fp is not None:\n self.fp.close()\n self.fp = None\n\n\nclass StringFileWriter(base.FileWriterImpl):\n\n \"\"\"\n FileWriter class that just puts received data into a buffer.\n\n Used to upload a file from worker for inline processing rather than\n writing into a file on master.\n \"\"\"\n\n def __init__(self):\n self.buffer = \"\"\n\n def remote_write(self, data):\n self.buffer += bytes2unicode(data)\n\n def remote_close(self):\n pass\n\n\nclass StringFileReader(FileReader):\n\n \"\"\"\n FileWriter class that just buid send data from a string.\n\n Used to download a file to worker from local string rather than first\n writing into a file on master.\n \"\"\"\n\n def __init__(self, s):\n s = unicode2bytes(s)\n super().__init__(BytesIO(s))\n", "path": "master/buildbot/process/remotetransfer.py"}]}
| 2,203 | 153 |
gh_patches_debug_20891
|
rasdani/github-patches
|
git_diff
|
zigpy__zha-device-handlers-392
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Device Support Request] Add support for updated Legrand Dimmer switch w/o neutral
**Is your feature request related to a problem? Please describe.**
I've updated the firmware of my Legrand Dimmer switch w/o neutral for which support was added in https://github.com/zigpy/zha-device-handlers/issues/299
Before OTA upgrade:
- app_version: 0
- hw_version: 1
- stack_version: 64
- sw_build_id: 01a (26)
- zcl_version: 2
- Firmware: 0x03401a00
After OTA upgrade (2020-06-08):
- app_version: 0
- hw_version: 6
- stack_version: 66
- sw_build_id: 02b (43)
- zcl_version: 2
- Firmware: 0x002b4203
And now it reports a new `GreenPowerProxy` endpoint with id 242:
```
{
"node_descriptor": "<NodeDescriptor byte1=17 byte2=64 mac_capability_flags=142 manufacturer_code=4129 maximum_buffer_size=89 maximum_incoming_transfer_size=63 server_mask=10752 maximum_outgoing_transfer_size=63 descriptor_capability_field=0>",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0100",
"in_clusters": [
"0x0000",
"0x0003",
"0x0004",
"0x0005",
"0x0006",
"0x0008",
"0x000f",
"0xfc01"
],
"out_clusters": [
"0x0000",
"0x0019",
"0xfc01"
]
},
"242": {
"profile_id": 41440,
"device_type": "0x0061",
"in_clusters": [],
"out_clusters": [
"0x0021"
]
}
},
"manufacturer": " Legrand",
"model": " Dimmer switch w/o neutral",
"class": "zigpy.device.Device"
}
```
The issue is that prevents the quirk from matching:
```
2020-06-17 06:45:05 DEBUG (MainThread) [zigpy.quirks.registry] Checking quirks for Legrand Dimmer switch w/o neutral (00:04:74:00:00:8b:0e:a2)
2020-06-17 06:45:05 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.legrand.dimmer.DimmerWithoutNeutral'>
2020-06-17 06:45:05 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {1} {1, 242}
```
**Describe the solution you'd like**
Could the quirk be updated to also support new firmwares?
**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**
TODO
**Additional context**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/legrand/dimmer.py`
Content:
```
1 """Device handler for Legrand Dimmer switch w/o neutral."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 import zigpy.types as t
5 from zigpy.zcl.clusters.general import (
6 Basic,
7 BinaryInput,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 Scenes,
14 )
15 from zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster
16
17 from . import LEGRAND
18 from ..const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27 MANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC01 # decimal = 64513
28
29
30 class LegrandCluster(CustomCluster, ManufacturerSpecificCluster):
31 """LegrandCluster."""
32
33 cluster_id = MANUFACTURER_SPECIFIC_CLUSTER_ID
34 name = "LegrandCluster"
35 ep_attribute = "legrand_cluster"
36 attributes = {
37 0x0000: ("dimmer", t.data16),
38 0x0001: ("led_dark", t.Bool),
39 0x0002: ("led_on", t.Bool),
40 }
41 server_commands = {}
42 client_commands = {}
43
44
45 class DimmerWithoutNeutral(CustomDevice):
46 """Dimmer switch w/o neutral."""
47
48 signature = {
49 # <SimpleDescriptor endpoint=1 profile=260 device_type=256
50 # device_version=1
51 # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]
52 # output_clusters=[0, 64513, 25]>
53 MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch w/o neutral")],
54 ENDPOINTS: {
55 1: {
56 PROFILE_ID: zha.PROFILE_ID,
57 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
58 INPUT_CLUSTERS: [
59 Basic.cluster_id,
60 Identify.cluster_id,
61 Groups.cluster_id,
62 OnOff.cluster_id,
63 LevelControl.cluster_id,
64 Scenes.cluster_id,
65 BinaryInput.cluster_id,
66 MANUFACTURER_SPECIFIC_CLUSTER_ID,
67 ],
68 OUTPUT_CLUSTERS: [
69 Basic.cluster_id,
70 MANUFACTURER_SPECIFIC_CLUSTER_ID,
71 Ota.cluster_id,
72 ],
73 }
74 },
75 }
76
77 replacement = {
78 ENDPOINTS: {
79 1: {
80 PROFILE_ID: zha.PROFILE_ID,
81 DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
82 INPUT_CLUSTERS: [
83 Basic.cluster_id,
84 Identify.cluster_id,
85 Groups.cluster_id,
86 OnOff.cluster_id,
87 LevelControl.cluster_id,
88 Scenes.cluster_id,
89 BinaryInput.cluster_id,
90 LegrandCluster,
91 ],
92 OUTPUT_CLUSTERS: [Basic.cluster_id, LegrandCluster, Ota.cluster_id],
93 }
94 }
95 }
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zhaquirks/legrand/dimmer.py b/zhaquirks/legrand/dimmer.py
--- a/zhaquirks/legrand/dimmer.py
+++ b/zhaquirks/legrand/dimmer.py
@@ -93,3 +93,42 @@
}
}
}
+
+
+class DimmerWithoutNeutral2(DimmerWithoutNeutral):
+ """Dimmer switch w/o neutral 2."""
+
+ signature = {
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=256
+ # device_version=1
+ # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]
+ # output_clusters=[0, 64513, 25]>
+ MODELS_INFO: [(f" {LEGRAND}", " Dimmer switch w/o neutral")],
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ Identify.cluster_id,
+ Groups.cluster_id,
+ OnOff.cluster_id,
+ LevelControl.cluster_id,
+ Scenes.cluster_id,
+ BinaryInput.cluster_id,
+ MANUFACTURER_SPECIFIC_CLUSTER_ID,
+ ],
+ OUTPUT_CLUSTERS: [
+ Basic.cluster_id,
+ MANUFACTURER_SPECIFIC_CLUSTER_ID,
+ Ota.cluster_id,
+ ],
+ },
+ 242: {
+ PROFILE_ID: 41440,
+ DEVICE_TYPE: 0x0061,
+ INPUT_CLUSTERS: [],
+ OUTPUT_CLUSTERS: [0x0021],
+ },
+ },
+ }
|
{"golden_diff": "diff --git a/zhaquirks/legrand/dimmer.py b/zhaquirks/legrand/dimmer.py\n--- a/zhaquirks/legrand/dimmer.py\n+++ b/zhaquirks/legrand/dimmer.py\n@@ -93,3 +93,42 @@\n }\n }\n }\n+\n+\n+class DimmerWithoutNeutral2(DimmerWithoutNeutral):\n+ \"\"\"Dimmer switch w/o neutral 2.\"\"\"\n+\n+ signature = {\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n+ # device_version=1\n+ # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n+ # output_clusters=[0, 64513, 25]>\n+ MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Scenes.cluster_id,\n+ BinaryInput.cluster_id,\n+ MANUFACTURER_SPECIFIC_CLUSTER_ID,\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ MANUFACTURER_SPECIFIC_CLUSTER_ID,\n+ Ota.cluster_id,\n+ ],\n+ },\n+ 242: {\n+ PROFILE_ID: 41440,\n+ DEVICE_TYPE: 0x0061,\n+ INPUT_CLUSTERS: [],\n+ OUTPUT_CLUSTERS: [0x0021],\n+ },\n+ },\n+ }\n", "issue": "[Device Support Request] Add support for updated Legrand Dimmer switch w/o neutral\n**Is your feature request related to a problem? Please describe.**\r\n\r\nI've updated the firmware of my Legrand Dimmer switch w/o neutral for which support was added in https://github.com/zigpy/zha-device-handlers/issues/299\r\n\r\nBefore OTA upgrade:\r\n- app_version: 0\r\n- hw_version: 1\r\n- stack_version: 64\r\n- sw_build_id: 01a (26)\r\n- zcl_version: 2\r\n- Firmware: 0x03401a00\r\n\r\nAfter OTA upgrade (2020-06-08):\r\n- app_version: 0\r\n- hw_version: 6\r\n- stack_version: 66\r\n- sw_build_id: 02b (43)\r\n- zcl_version: 2\r\n- Firmware: 0x002b4203\r\n\r\nAnd now it reports a new `GreenPowerProxy` endpoint with id 242:\r\n\r\n```\r\n{\r\n \"node_descriptor\": \"<NodeDescriptor byte1=17 byte2=64 mac_capability_flags=142 manufacturer_code=4129 maximum_buffer_size=89 maximum_incoming_transfer_size=63 server_mask=10752 maximum_outgoing_transfer_size=63 descriptor_capability_field=0>\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0100\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0x0006\",\r\n \"0x0008\",\r\n \"0x000f\",\r\n \"0xfc01\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0000\",\r\n \"0x0019\",\r\n \"0xfc01\"\r\n ]\r\n },\r\n \"242\": {\r\n \"profile_id\": 41440,\r\n \"device_type\": \"0x0061\",\r\n \"in_clusters\": [],\r\n \"out_clusters\": [\r\n \"0x0021\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \" Legrand\",\r\n \"model\": \" Dimmer switch w/o neutral\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\r\n\r\nThe issue is that prevents the quirk from matching:\r\n\r\n```\r\n2020-06-17 06:45:05 DEBUG (MainThread) [zigpy.quirks.registry] Checking quirks for Legrand Dimmer switch w/o neutral (00:04:74:00:00:8b:0e:a2)\r\n2020-06-17 06:45:05 DEBUG (MainThread) [zigpy.quirks.registry] Considering <class 'zhaquirks.legrand.dimmer.DimmerWithoutNeutral'>\r\n2020-06-17 06:45:05 DEBUG (MainThread) [zigpy.quirks.registry] Fail because endpoint list mismatch: {1} {1, 242}\r\n```\r\n\r\n**Describe the solution you'd like**\r\n\r\nCould the quirk be updated to also support new firmwares?\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n\r\nTODO\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "\"\"\"Device handler for Legrand Dimmer switch w/o neutral.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import (\n Basic,\n BinaryInput,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n)\nfrom zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster\n\nfrom . import LEGRAND\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nMANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC01 # decimal = 64513\n\n\nclass LegrandCluster(CustomCluster, ManufacturerSpecificCluster):\n \"\"\"LegrandCluster.\"\"\"\n\n cluster_id = MANUFACTURER_SPECIFIC_CLUSTER_ID\n name = \"LegrandCluster\"\n ep_attribute = \"legrand_cluster\"\n attributes = {\n 0x0000: (\"dimmer\", t.data16),\n 0x0001: (\"led_dark\", t.Bool),\n 0x0002: (\"led_on\", t.Bool),\n }\n server_commands = {}\n client_commands = {}\n\n\nclass DimmerWithoutNeutral(CustomDevice):\n \"\"\"Dimmer switch w/o neutral.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n # device_version=1\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n # output_clusters=[0, 64513, 25]>\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n Ota.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n LegrandCluster,\n ],\n OUTPUT_CLUSTERS: [Basic.cluster_id, LegrandCluster, Ota.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/legrand/dimmer.py"}], "after_files": [{"content": "\"\"\"Device handler for Legrand Dimmer switch w/o neutral.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import (\n Basic,\n BinaryInput,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n)\nfrom zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster\n\nfrom . import LEGRAND\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nMANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC01 # decimal = 64513\n\n\nclass LegrandCluster(CustomCluster, ManufacturerSpecificCluster):\n \"\"\"LegrandCluster.\"\"\"\n\n cluster_id = MANUFACTURER_SPECIFIC_CLUSTER_ID\n name = \"LegrandCluster\"\n ep_attribute = \"legrand_cluster\"\n attributes = {\n 0x0000: (\"dimmer\", t.data16),\n 0x0001: (\"led_dark\", t.Bool),\n 0x0002: (\"led_on\", t.Bool),\n }\n server_commands = {}\n client_commands = {}\n\n\nclass DimmerWithoutNeutral(CustomDevice):\n \"\"\"Dimmer switch w/o neutral.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n # device_version=1\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n # output_clusters=[0, 64513, 25]>\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n Ota.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n LegrandCluster,\n ],\n OUTPUT_CLUSTERS: [Basic.cluster_id, LegrandCluster, Ota.cluster_id],\n }\n }\n }\n\n\nclass DimmerWithoutNeutral2(DimmerWithoutNeutral):\n \"\"\"Dimmer switch w/o neutral 2.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=256\n # device_version=1\n # input_clusters=[0, 3, 4, 8, 6, 5, 15, 64513]\n # output_clusters=[0, 64513, 25]>\n MODELS_INFO: [(f\" {LEGRAND}\", \" Dimmer switch w/o neutral\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n BinaryInput.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n Ota.cluster_id,\n ],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 0x0061,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [0x0021],\n },\n },\n }\n", "path": "zhaquirks/legrand/dimmer.py"}]}
| 1,953 | 415 |
gh_patches_debug_31905
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-1092
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StackLayout spacing added to top padding
When using a StackLayout, the spacing is added to the top padding.
To see the problem, run kivycatalog and change the spacing property for the StackLayout example to 20.
Tested on 1.6.0 and master.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/stacklayout.py`
Content:
```
1 '''
2 Stack Layout
3 ============
4
5 .. only:: html
6
7 .. image:: images/stacklayout.gif
8 :align: right
9
10 .. only:: latex
11
12 .. image:: images/stacklayout.png
13 :align: right
14
15 .. versionadded:: 1.0.5
16
17 :class:`StackLayout` arranges children vertically or horizontally, as many
18 as the layout can fit.
19
20
21 .. warning:
22
23 This is experimental and subject to change as long as this warning notice is
24 present.
25
26 '''
27
28 __all__ = ('StackLayout', )
29
30 from kivy.uix.layout import Layout
31 from kivy.properties import NumericProperty, OptionProperty, \
32 ReferenceListProperty, VariableListProperty
33
34
35 class StackLayout(Layout):
36 '''Stack layout class. See module documentation for more information.
37 '''
38
39 spacing = VariableListProperty([0, 0], length=2)
40 '''Spacing between children: [spacing_horizontal, spacing_vertical].
41
42 spacing also accepts a one argument form [spacing].
43
44 :data:`spacing` is a :class:`~kivy.properties.VariableListProperty`, default to
45 [0, 0].
46 '''
47
48 padding = VariableListProperty([0, 0, 0, 0])
49 '''Padding between layout box and children: [padding_left, padding_top,
50 padding_right, padding_bottom].
51
52 padding also accepts a two argument form [padding_horizontal,
53 padding_vertical] and a one argument form [padding].
54
55 .. versionchanged:: 1.7.0
56
57 Replaced NumericProperty with VariableListProperty.
58
59 :data:`padding` is a :class:`~kivy.properties.VariableListProperty`, default to
60 [0, 0, 0, 0].
61 '''
62
63 orientation = OptionProperty('lr-tb', options=(
64 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt', 'bt-rl'))
65 '''Orientation of the layout.
66
67 :data:`orientation` is an :class:`~kivy.properties.OptionProperty`, default
68 to 'lr-tb'.
69
70 Valid orientations are: 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',
71 'bt-lr', 'rl-bt', 'bt-rl'
72
73 .. versionchanged:: 1.5.0
74
75 :data:`orientation` now correctly handles all valid combinations of
76 'lr','rl','tb','bt'. Before this version only 'lr-tb' and
77 'tb-lr' were supported, and 'tb-lr' was misnamed and placed
78 widgets from bottom to top and from right to left (reversed compared
79 to what was expected).
80
81 .. note::
82
83 lr mean Left to Right.
84 rl mean Right to Left.
85 tb mean Top to Bottom.
86 bt mean Bottom to Top.
87 '''
88
89 minimum_width = NumericProperty(0)
90 '''Minimum width needed to contain all children.
91
92 .. versionadded:: 1.0.8
93
94 :data:`minimum_width` is a :class:`kivy.properties.NumericProperty`, default
95 to 0.
96 '''
97
98 minimum_height = NumericProperty(0)
99 '''Minimum height needed to contain all children.
100
101 .. versionadded:: 1.0.8
102
103 :data:`minimum_height` is a :class:`kivy.properties.NumericProperty`,
104 default to 0.
105 '''
106
107 minimum_size = ReferenceListProperty(minimum_width, minimum_height)
108 '''Minimum size needed to contain all children.
109
110 .. versionadded:: 1.0.8
111
112 :data:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of
113 (:data:`minimum_width`, :data:`minimum_height`) properties.
114 '''
115
116 def __init__(self, **kwargs):
117 super(StackLayout, self).__init__(**kwargs)
118 self.bind(
119 padding=self._trigger_layout,
120 spacing=self._trigger_layout,
121 children=self._trigger_layout,
122 orientation=self._trigger_layout,
123 size=self._trigger_layout,
124 pos=self._trigger_layout)
125
126 def do_layout(self, *largs):
127 # optimize layout by preventing looking at the same attribute in a loop
128 selfpos = self.pos
129 selfsize = self.size
130 orientation = self.orientation.split('-')
131 padding_left = self.padding[0]
132 padding_top = self.padding[1]
133 padding_right = self.padding[2]
134 padding_bottom = self.padding[3]
135
136 padding_x = padding_left + padding_right
137 padding_y = padding_top + padding_bottom
138 spacing_x, spacing_y = self.spacing
139
140 lc = []
141
142 # Determine which direction and in what order to place the widgets
143 posattr = [0] * 2
144 posdelta = [0] * 2
145 posstart = [0] * 2
146 for i in (0, 1):
147 posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))
148 k = posattr[i]
149 if orientation[i] == 'lr':
150 # left to right
151 posdelta[i] = 1
152 posstart[i] = selfpos[k] + padding_left
153 elif orientation[i] == 'bt':
154 # bottom to top
155 posdelta[i] = 1
156 posstart[i] = selfpos[k] + padding_bottom
157 elif orientation[i] == 'rl':
158 # right to left
159 posdelta[i] = -1
160 posstart[i] = selfpos[k] + selfsize[k] - padding_right
161 else:
162 # top to bottom
163 posdelta[i] = -1
164 posstart[i] = selfpos[k] + selfsize[k] - padding_top
165
166 innerattr, outerattr = posattr
167 ustart, vstart = posstart
168 deltau, deltav = posdelta
169 del posattr, posdelta, posstart
170
171 u = ustart # inner loop position variable
172 v = vstart # outer loop position variable
173
174 # space calculation, used for determining when a row or column is full
175
176 if orientation[0] in ('lr', 'rl'):
177 lu = self.size[innerattr] - padding_x
178 sv = padding_y # size in v-direction, for minimum_size property
179 su = padding_x # size in h-direction
180 spacing_u = spacing_x
181 spacing_v = spacing_y
182 else:
183 lu = self.size[innerattr] - padding_y
184 sv = padding_x # size in v-direction, for minimum_size property
185 su = padding_y # size in h-direction
186 spacing_u = spacing_y
187 spacing_v = spacing_x
188
189 # space calculation, row height or column width, for arranging widgets
190 lv = 0
191
192 urev = (deltau < 0)
193 vrev = (deltav < 0)
194 for c in reversed(self.children):
195 # Issue#823: ReferenceListProperty doesn't allow changing
196 # individual properties.
197 # when the above issue is fixed we can remove csize from below and
198 # access c.size[i] directly
199 csize = c.size[:] # we need to update the whole tuple at once.
200 if c.size_hint[0]:
201 # calculate width
202 csize[0] = c.size_hint[0] * (selfsize[0] - padding_x)
203 if c.size_hint[1]:
204 # calculate height
205 csize[1] = c.size_hint[1] * (selfsize[1] - padding_y)
206 c.size = tuple(csize)
207
208 # does the widget fit in the row/column?
209 if lu - c.size[innerattr] >= 0:
210 lc.append(c)
211 lu -= c.size[innerattr] + spacing_u
212 lv = max(lv, c.size[outerattr])
213 continue
214
215 # push the line
216 sv += lv + spacing_v
217 for c2 in lc:
218 if urev:
219 u -= c2.size[innerattr] + spacing_u
220 p = [0, 0] # issue #823
221 p[innerattr] = u
222 p[outerattr] = v
223 if vrev:
224 # v position is actually the top/right side of the widget
225 # when going from high to low coordinate values,
226 # we need to subtract the height/width from the position.
227 p[outerattr] -= c2.size[outerattr]
228 c2.pos = tuple(p) # issue #823
229 if not urev:
230 u += c2.size[innerattr] + spacing_u
231
232 v += deltav * lv
233 v += deltav * spacing_v
234 lc = [c]
235 lv = c.size[outerattr]
236 lu = selfsize[innerattr] - su - c.size[innerattr] - spacing_u
237 u = ustart
238
239 if lc:
240 # push the last (incomplete) line
241 sv += lv + spacing_v
242 for c2 in lc:
243 if urev:
244 u -= c2.size[innerattr] + spacing_u
245 p = [0, 0] # issue #823
246 p[innerattr] = u
247 p[outerattr] = v
248 if vrev:
249 p[outerattr] -= c2.size[outerattr]
250 c2.pos = tuple(p) # issue #823
251 if not urev:
252 u += c2.size[innerattr] + spacing_u
253
254 minsize = self.minimum_size[:] # issue #823
255 minsize[outerattr] = sv
256 self.minimum_size = tuple(minsize)
257
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/uix/stacklayout.py b/kivy/uix/stacklayout.py
--- a/kivy/uix/stacklayout.py
+++ b/kivy/uix/stacklayout.py
@@ -216,7 +216,7 @@
sv += lv + spacing_v
for c2 in lc:
if urev:
- u -= c2.size[innerattr] + spacing_u
+ u -= c2.size[innerattr]
p = [0, 0] # issue #823
p[innerattr] = u
p[outerattr] = v
@@ -226,7 +226,9 @@
# we need to subtract the height/width from the position.
p[outerattr] -= c2.size[outerattr]
c2.pos = tuple(p) # issue #823
- if not urev:
+ if urev:
+ u -= spacing_u
+ else:
u += c2.size[innerattr] + spacing_u
v += deltav * lv
@@ -241,14 +243,16 @@
sv += lv + spacing_v
for c2 in lc:
if urev:
- u -= c2.size[innerattr] + spacing_u
+ u -= c2.size[innerattr]
p = [0, 0] # issue #823
p[innerattr] = u
p[outerattr] = v
if vrev:
p[outerattr] -= c2.size[outerattr]
c2.pos = tuple(p) # issue #823
- if not urev:
+ if urev:
+ u -= spacing_u
+ else:
u += c2.size[innerattr] + spacing_u
minsize = self.minimum_size[:] # issue #823
|
{"golden_diff": "diff --git a/kivy/uix/stacklayout.py b/kivy/uix/stacklayout.py\n--- a/kivy/uix/stacklayout.py\n+++ b/kivy/uix/stacklayout.py\n@@ -216,7 +216,7 @@\n sv += lv + spacing_v\n for c2 in lc:\n if urev:\n- u -= c2.size[innerattr] + spacing_u\n+ u -= c2.size[innerattr]\n p = [0, 0] # issue #823\n p[innerattr] = u\n p[outerattr] = v\n@@ -226,7 +226,9 @@\n # we need to subtract the height/width from the position.\n p[outerattr] -= c2.size[outerattr]\n c2.pos = tuple(p) # issue #823\n- if not urev:\n+ if urev:\n+ u -= spacing_u\n+ else:\n u += c2.size[innerattr] + spacing_u\n \n v += deltav * lv\n@@ -241,14 +243,16 @@\n sv += lv + spacing_v\n for c2 in lc:\n if urev:\n- u -= c2.size[innerattr] + spacing_u\n+ u -= c2.size[innerattr]\n p = [0, 0] # issue #823\n p[innerattr] = u\n p[outerattr] = v\n if vrev:\n p[outerattr] -= c2.size[outerattr]\n c2.pos = tuple(p) # issue #823\n- if not urev:\n+ if urev:\n+ u -= spacing_u\n+ else:\n u += c2.size[innerattr] + spacing_u\n \n minsize = self.minimum_size[:] # issue #823\n", "issue": "StackLayout spacing added to top padding\nWhen using a StackLayout, the spacing is added to the top padding.\n\nTo see the problem, run kivycatalog and change the spacing property for the StackLayout example to 20.\n\nTested on 1.6.0 and master.\n\n", "before_files": [{"content": "'''\nStack Layout\n============\n\n.. only:: html\n\n .. image:: images/stacklayout.gif\n :align: right\n\n.. only:: latex\n\n .. image:: images/stacklayout.png\n :align: right\n\n.. versionadded:: 1.0.5\n\n:class:`StackLayout` arranges children vertically or horizontally, as many\nas the layout can fit.\n\n\n.. warning:\n\n This is experimental and subject to change as long as this warning notice is\n present.\n\n'''\n\n__all__ = ('StackLayout', )\n\nfrom kivy.uix.layout import Layout\nfrom kivy.properties import NumericProperty, OptionProperty, \\\n ReferenceListProperty, VariableListProperty\n\n\nclass StackLayout(Layout):\n '''Stack layout class. See module documentation for more information.\n '''\n\n spacing = VariableListProperty([0, 0], length=2)\n '''Spacing between children: [spacing_horizontal, spacing_vertical].\n\n spacing also accepts a one argument form [spacing].\n\n :data:`spacing` is a :class:`~kivy.properties.VariableListProperty`, default to\n [0, 0].\n '''\n\n padding = VariableListProperty([0, 0, 0, 0])\n '''Padding between layout box and children: [padding_left, padding_top,\n padding_right, padding_bottom].\n\n padding also accepts a two argument form [padding_horizontal,\n padding_vertical] and a one argument form [padding].\n\n .. versionchanged:: 1.7.0\n\n Replaced NumericProperty with VariableListProperty.\n\n :data:`padding` is a :class:`~kivy.properties.VariableListProperty`, default to\n [0, 0, 0, 0].\n '''\n\n orientation = OptionProperty('lr-tb', options=(\n 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt', 'bt-rl'))\n '''Orientation of the layout.\n\n :data:`orientation` is an :class:`~kivy.properties.OptionProperty`, default\n to 'lr-tb'.\n\n Valid orientations are: 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',\n 'bt-lr', 'rl-bt', 'bt-rl'\n\n .. versionchanged:: 1.5.0\n\n :data:`orientation` now correctly handles all valid combinations of\n 'lr','rl','tb','bt'. Before this version only 'lr-tb' and\n 'tb-lr' were supported, and 'tb-lr' was misnamed and placed\n widgets from bottom to top and from right to left (reversed compared\n to what was expected).\n\n .. note::\n\n lr mean Left to Right.\n rl mean Right to Left.\n tb mean Top to Bottom.\n bt mean Bottom to Top.\n '''\n\n minimum_width = NumericProperty(0)\n '''Minimum width needed to contain all children.\n\n .. versionadded:: 1.0.8\n\n :data:`minimum_width` is a :class:`kivy.properties.NumericProperty`, default\n to 0.\n '''\n\n minimum_height = NumericProperty(0)\n '''Minimum height needed to contain all children.\n\n .. versionadded:: 1.0.8\n\n :data:`minimum_height` is a :class:`kivy.properties.NumericProperty`,\n default to 0.\n '''\n\n minimum_size = ReferenceListProperty(minimum_width, minimum_height)\n '''Minimum size needed to contain all children.\n\n .. versionadded:: 1.0.8\n\n :data:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:data:`minimum_width`, :data:`minimum_height`) properties.\n '''\n\n def __init__(self, **kwargs):\n super(StackLayout, self).__init__(**kwargs)\n self.bind(\n padding=self._trigger_layout,\n spacing=self._trigger_layout,\n children=self._trigger_layout,\n orientation=self._trigger_layout,\n size=self._trigger_layout,\n pos=self._trigger_layout)\n\n def do_layout(self, *largs):\n # optimize layout by preventing looking at the same attribute in a loop\n selfpos = self.pos\n selfsize = self.size\n orientation = self.orientation.split('-')\n padding_left = self.padding[0]\n padding_top = self.padding[1]\n padding_right = self.padding[2]\n padding_bottom = self.padding[3]\n\n padding_x = padding_left + padding_right\n padding_y = padding_top + padding_bottom\n spacing_x, spacing_y = self.spacing\n\n lc = []\n\n # Determine which direction and in what order to place the widgets\n posattr = [0] * 2\n posdelta = [0] * 2\n posstart = [0] * 2\n for i in (0, 1):\n posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))\n k = posattr[i]\n if orientation[i] == 'lr':\n # left to right\n posdelta[i] = 1\n posstart[i] = selfpos[k] + padding_left\n elif orientation[i] == 'bt':\n # bottom to top\n posdelta[i] = 1\n posstart[i] = selfpos[k] + padding_bottom\n elif orientation[i] == 'rl':\n # right to left\n posdelta[i] = -1\n posstart[i] = selfpos[k] + selfsize[k] - padding_right\n else:\n # top to bottom\n posdelta[i] = -1\n posstart[i] = selfpos[k] + selfsize[k] - padding_top\n\n innerattr, outerattr = posattr\n ustart, vstart = posstart\n deltau, deltav = posdelta\n del posattr, posdelta, posstart\n\n u = ustart # inner loop position variable\n v = vstart # outer loop position variable\n\n # space calculation, used for determining when a row or column is full\n\n if orientation[0] in ('lr', 'rl'):\n lu = self.size[innerattr] - padding_x\n sv = padding_y # size in v-direction, for minimum_size property\n su = padding_x # size in h-direction\n spacing_u = spacing_x\n spacing_v = spacing_y\n else:\n lu = self.size[innerattr] - padding_y\n sv = padding_x # size in v-direction, for minimum_size property\n su = padding_y # size in h-direction\n spacing_u = spacing_y\n spacing_v = spacing_x\n\n # space calculation, row height or column width, for arranging widgets\n lv = 0\n\n urev = (deltau < 0)\n vrev = (deltav < 0)\n for c in reversed(self.children):\n # Issue#823: ReferenceListProperty doesn't allow changing\n # individual properties.\n # when the above issue is fixed we can remove csize from below and\n # access c.size[i] directly\n csize = c.size[:] # we need to update the whole tuple at once.\n if c.size_hint[0]:\n # calculate width\n csize[0] = c.size_hint[0] * (selfsize[0] - padding_x)\n if c.size_hint[1]:\n # calculate height\n csize[1] = c.size_hint[1] * (selfsize[1] - padding_y)\n c.size = tuple(csize)\n\n # does the widget fit in the row/column?\n if lu - c.size[innerattr] >= 0:\n lc.append(c)\n lu -= c.size[innerattr] + spacing_u\n lv = max(lv, c.size[outerattr])\n continue\n\n # push the line\n sv += lv + spacing_v\n for c2 in lc:\n if urev:\n u -= c2.size[innerattr] + spacing_u\n p = [0, 0] # issue #823\n p[innerattr] = u\n p[outerattr] = v\n if vrev:\n # v position is actually the top/right side of the widget\n # when going from high to low coordinate values,\n # we need to subtract the height/width from the position.\n p[outerattr] -= c2.size[outerattr]\n c2.pos = tuple(p) # issue #823\n if not urev:\n u += c2.size[innerattr] + spacing_u\n\n v += deltav * lv\n v += deltav * spacing_v\n lc = [c]\n lv = c.size[outerattr]\n lu = selfsize[innerattr] - su - c.size[innerattr] - spacing_u\n u = ustart\n\n if lc:\n # push the last (incomplete) line\n sv += lv + spacing_v\n for c2 in lc:\n if urev:\n u -= c2.size[innerattr] + spacing_u\n p = [0, 0] # issue #823\n p[innerattr] = u\n p[outerattr] = v\n if vrev:\n p[outerattr] -= c2.size[outerattr]\n c2.pos = tuple(p) # issue #823\n if not urev:\n u += c2.size[innerattr] + spacing_u\n\n minsize = self.minimum_size[:] # issue #823\n minsize[outerattr] = sv\n self.minimum_size = tuple(minsize)\n", "path": "kivy/uix/stacklayout.py"}], "after_files": [{"content": "'''\nStack Layout\n============\n\n.. only:: html\n\n .. image:: images/stacklayout.gif\n :align: right\n\n.. only:: latex\n\n .. image:: images/stacklayout.png\n :align: right\n\n.. versionadded:: 1.0.5\n\n:class:`StackLayout` arranges children vertically or horizontally, as many\nas the layout can fit.\n\n\n.. warning:\n\n This is experimental and subject to change as long as this warning notice is\n present.\n\n'''\n\n__all__ = ('StackLayout', )\n\nfrom kivy.uix.layout import Layout\nfrom kivy.properties import NumericProperty, OptionProperty, \\\n ReferenceListProperty, VariableListProperty\n\n\nclass StackLayout(Layout):\n '''Stack layout class. See module documentation for more information.\n '''\n\n spacing = VariableListProperty([0, 0], length=2)\n '''Spacing between children: [spacing_horizontal, spacing_vertical].\n\n spacing also accepts a one argument form [spacing].\n\n :data:`spacing` is a :class:`~kivy.properties.VariableListProperty`, default to\n [0, 0].\n '''\n\n padding = VariableListProperty([0, 0, 0, 0])\n '''Padding between layout box and children: [padding_left, padding_top,\n padding_right, padding_bottom].\n\n padding also accepts a two argument form [padding_horizontal,\n padding_vertical] and a one argument form [padding].\n\n .. versionchanged:: 1.7.0\n\n Replaced NumericProperty with VariableListProperty.\n\n :data:`padding` is a :class:`~kivy.properties.VariableListProperty`, default to\n [0, 0, 0, 0].\n '''\n\n orientation = OptionProperty('lr-tb', options=(\n 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt', 'bt-rl'))\n '''Orientation of the layout.\n\n :data:`orientation` is an :class:`~kivy.properties.OptionProperty`, default\n to 'lr-tb'.\n\n Valid orientations are: 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',\n 'bt-lr', 'rl-bt', 'bt-rl'\n\n .. versionchanged:: 1.5.0\n\n :data:`orientation` now correctly handles all valid combinations of\n 'lr','rl','tb','bt'. Before this version only 'lr-tb' and\n 'tb-lr' were supported, and 'tb-lr' was misnamed and placed\n widgets from bottom to top and from right to left (reversed compared\n to what was expected).\n\n .. note::\n\n lr mean Left to Right.\n rl mean Right to Left.\n tb mean Top to Bottom.\n bt mean Bottom to Top.\n '''\n\n minimum_width = NumericProperty(0)\n '''Minimum width needed to contain all children.\n\n .. versionadded:: 1.0.8\n\n :data:`minimum_width` is a :class:`kivy.properties.NumericProperty`, default\n to 0.\n '''\n\n minimum_height = NumericProperty(0)\n '''Minimum height needed to contain all children.\n\n .. versionadded:: 1.0.8\n\n :data:`minimum_height` is a :class:`kivy.properties.NumericProperty`,\n default to 0.\n '''\n\n minimum_size = ReferenceListProperty(minimum_width, minimum_height)\n '''Minimum size needed to contain all children.\n\n .. versionadded:: 1.0.8\n\n :data:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:data:`minimum_width`, :data:`minimum_height`) properties.\n '''\n\n def __init__(self, **kwargs):\n super(StackLayout, self).__init__(**kwargs)\n self.bind(\n padding=self._trigger_layout,\n spacing=self._trigger_layout,\n children=self._trigger_layout,\n orientation=self._trigger_layout,\n size=self._trigger_layout,\n pos=self._trigger_layout)\n\n def do_layout(self, *largs):\n # optimize layout by preventing looking at the same attribute in a loop\n selfpos = self.pos\n selfsize = self.size\n orientation = self.orientation.split('-')\n padding_left = self.padding[0]\n padding_top = self.padding[1]\n padding_right = self.padding[2]\n padding_bottom = self.padding[3]\n\n padding_x = padding_left + padding_right\n padding_y = padding_top + padding_bottom\n spacing_x, spacing_y = self.spacing\n\n lc = []\n\n # Determine which direction and in what order to place the widgets\n posattr = [0] * 2\n posdelta = [0] * 2\n posstart = [0] * 2\n for i in (0, 1):\n posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))\n k = posattr[i]\n if orientation[i] == 'lr':\n # left to right\n posdelta[i] = 1\n posstart[i] = selfpos[k] + padding_left\n elif orientation[i] == 'bt':\n # bottom to top\n posdelta[i] = 1\n posstart[i] = selfpos[k] + padding_bottom\n elif orientation[i] == 'rl':\n # right to left\n posdelta[i] = -1\n posstart[i] = selfpos[k] + selfsize[k] - padding_right\n else:\n # top to bottom\n posdelta[i] = -1\n posstart[i] = selfpos[k] + selfsize[k] - padding_top\n\n innerattr, outerattr = posattr\n ustart, vstart = posstart\n deltau, deltav = posdelta\n del posattr, posdelta, posstart\n\n u = ustart # inner loop position variable\n v = vstart # outer loop position variable\n\n # space calculation, used for determining when a row or column is full\n\n if orientation[0] in ('lr', 'rl'):\n lu = self.size[innerattr] - padding_x\n sv = padding_y # size in v-direction, for minimum_size property\n su = padding_x # size in h-direction\n spacing_u = spacing_x\n spacing_v = spacing_y\n else:\n lu = self.size[innerattr] - padding_y\n sv = padding_x # size in v-direction, for minimum_size property\n su = padding_y # size in h-direction\n spacing_u = spacing_y\n spacing_v = spacing_x\n\n # space calculation, row height or column width, for arranging widgets\n lv = 0\n\n urev = (deltau < 0)\n vrev = (deltav < 0)\n for c in reversed(self.children):\n # Issue#823: ReferenceListProperty doesn't allow changing\n # individual properties.\n # when the above issue is fixed we can remove csize from below and\n # access c.size[i] directly\n csize = c.size[:] # we need to update the whole tuple at once.\n if c.size_hint[0]:\n # calculate width\n csize[0] = c.size_hint[0] * (selfsize[0] - padding_x)\n if c.size_hint[1]:\n # calculate height\n csize[1] = c.size_hint[1] * (selfsize[1] - padding_y)\n c.size = tuple(csize)\n\n # does the widget fit in the row/column?\n if lu - c.size[innerattr] >= 0:\n lc.append(c)\n lu -= c.size[innerattr] + spacing_u\n lv = max(lv, c.size[outerattr])\n continue\n\n # push the line\n sv += lv + spacing_v\n for c2 in lc:\n if urev:\n u -= c2.size[innerattr]\n p = [0, 0] # issue #823\n p[innerattr] = u\n p[outerattr] = v\n if vrev:\n # v position is actually the top/right side of the widget\n # when going from high to low coordinate values,\n # we need to subtract the height/width from the position.\n p[outerattr] -= c2.size[outerattr]\n c2.pos = tuple(p) # issue #823\n if urev:\n u -= spacing_u\n else:\n u += c2.size[innerattr] + spacing_u\n\n v += deltav * lv\n v += deltav * spacing_v\n lc = [c]\n lv = c.size[outerattr]\n lu = selfsize[innerattr] - su - c.size[innerattr] - spacing_u\n u = ustart\n\n if lc:\n # push the last (incomplete) line\n sv += lv + spacing_v\n for c2 in lc:\n if urev:\n u -= c2.size[innerattr]\n p = [0, 0] # issue #823\n p[innerattr] = u\n p[outerattr] = v\n if vrev:\n p[outerattr] -= c2.size[outerattr]\n c2.pos = tuple(p) # issue #823\n if urev:\n u -= spacing_u\n else:\n u += c2.size[innerattr] + spacing_u\n\n minsize = self.minimum_size[:] # issue #823\n minsize[outerattr] = sv\n self.minimum_size = tuple(minsize)\n", "path": "kivy/uix/stacklayout.py"}]}
| 3,138 | 429 |
gh_patches_debug_25654
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5348
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Depreciated example
https://github.com/bokeh/bokeh/blob/0.12.3/examples/embed/simple/simple.py
```
Because the ``resources`` argument is no longer needed, it is deprecated and no longer has any effect.
```
The link is also broken:
http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/embed/simple/simple.py`
Content:
```
1 '''This example demonstrates embedding a standalone Bokeh document
2 into a simple Flask application, with a basic HTML web form.
3
4 To view the example, run:
5
6 python simple.py
7
8 in this directory, and navigate to:
9
10 http://localhost:5000
11
12 '''
13 from __future__ import print_function
14
15 import flask
16
17 from bokeh.embed import components
18 from bokeh.plotting import figure
19 from bokeh.resources import INLINE
20 from bokeh.util.string import encode_utf8
21
22 app = flask.Flask(__name__)
23
24 colors = {
25 'Black': '#000000',
26 'Red': '#FF0000',
27 'Green': '#00FF00',
28 'Blue': '#0000FF',
29 }
30
31 def getitem(obj, item, default):
32 if item not in obj:
33 return default
34 else:
35 return obj[item]
36
37 @app.route("/")
38 def polynomial():
39 """ Very simple embedding of a polynomial chart
40
41 """
42
43 # Grab the inputs arguments from the URL
44 # This is automated by the button
45 args = flask.request.args
46
47 # Get all the form arguments in the url with defaults
48 color = colors[getitem(args, 'color', 'Black')]
49 _from = int(getitem(args, '_from', 0))
50 to = int(getitem(args, 'to', 10))
51
52 # Create a polynomial line graph
53 x = list(range(_from, to + 1))
54 fig = figure(title="Polynomial")
55 fig.line(x, [i ** 2 for i in x], color=color, line_width=2)
56
57 # Configure resources to include BokehJS inline in the document.
58 # For more details see:
59 # http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed
60 js_resources = INLINE.render_js()
61 css_resources = INLINE.render_css()
62
63 # For more details see:
64 # http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
65 script, div = components(fig, INLINE)
66 html = flask.render_template(
67 'embed.html',
68 plot_script=script,
69 plot_div=div,
70 js_resources=js_resources,
71 css_resources=css_resources,
72 color=color,
73 _from=_from,
74 to=to
75 )
76 return encode_utf8(html)
77
78 if __name__ == "__main__":
79 print(__doc__)
80 app.run()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/embed/simple/simple.py b/examples/embed/simple/simple.py
--- a/examples/embed/simple/simple.py
+++ b/examples/embed/simple/simple.py
@@ -41,7 +41,6 @@
"""
# Grab the inputs arguments from the URL
- # This is automated by the button
args = flask.request.args
# Get all the form arguments in the url with defaults
@@ -49,20 +48,15 @@
_from = int(getitem(args, '_from', 0))
to = int(getitem(args, 'to', 10))
- # Create a polynomial line graph
+ # Create a polynomial line graph with those arguments
x = list(range(_from, to + 1))
fig = figure(title="Polynomial")
fig.line(x, [i ** 2 for i in x], color=color, line_width=2)
- # Configure resources to include BokehJS inline in the document.
- # For more details see:
- # http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
- # For more details see:
- # http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
- script, div = components(fig, INLINE)
+ script, div = components(fig)
html = flask.render_template(
'embed.html',
plot_script=script,
|
{"golden_diff": "diff --git a/examples/embed/simple/simple.py b/examples/embed/simple/simple.py\n--- a/examples/embed/simple/simple.py\n+++ b/examples/embed/simple/simple.py\n@@ -41,7 +41,6 @@\n \"\"\"\n \n # Grab the inputs arguments from the URL\n- # This is automated by the button\n args = flask.request.args\n \n # Get all the form arguments in the url with defaults\n@@ -49,20 +48,15 @@\n _from = int(getitem(args, '_from', 0))\n to = int(getitem(args, 'to', 10))\n \n- # Create a polynomial line graph\n+ # Create a polynomial line graph with those arguments\n x = list(range(_from, to + 1))\n fig = figure(title=\"Polynomial\")\n fig.line(x, [i ** 2 for i in x], color=color, line_width=2)\n \n- # Configure resources to include BokehJS inline in the document.\n- # For more details see:\n- # http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n \n- # For more details see:\n- # http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components\n- script, div = components(fig, INLINE)\n+ script, div = components(fig)\n html = flask.render_template(\n 'embed.html',\n plot_script=script,\n", "issue": "Depreciated example\nhttps://github.com/bokeh/bokeh/blob/0.12.3/examples/embed/simple/simple.py\n\n```\nBecause the ``resources`` argument is no longer needed, it is deprecated and no longer has any effect.\n```\n\nThe link is also broken:\nhttp://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components\n\n", "before_files": [{"content": "'''This example demonstrates embedding a standalone Bokeh document\ninto a simple Flask application, with a basic HTML web form.\n\nTo view the example, run:\n\n python simple.py\n\nin this directory, and navigate to:\n\n http://localhost:5000\n\n'''\nfrom __future__ import print_function\n\nimport flask\n\nfrom bokeh.embed import components\nfrom bokeh.plotting import figure\nfrom bokeh.resources import INLINE\nfrom bokeh.util.string import encode_utf8\n\napp = flask.Flask(__name__)\n\ncolors = {\n 'Black': '#000000',\n 'Red': '#FF0000',\n 'Green': '#00FF00',\n 'Blue': '#0000FF',\n}\n\ndef getitem(obj, item, default):\n if item not in obj:\n return default\n else:\n return obj[item]\n\[email protected](\"/\")\ndef polynomial():\n \"\"\" Very simple embedding of a polynomial chart\n\n \"\"\"\n\n # Grab the inputs arguments from the URL\n # This is automated by the button\n args = flask.request.args\n\n # Get all the form arguments in the url with defaults\n color = colors[getitem(args, 'color', 'Black')]\n _from = int(getitem(args, '_from', 0))\n to = int(getitem(args, 'to', 10))\n\n # Create a polynomial line graph\n x = list(range(_from, to + 1))\n fig = figure(title=\"Polynomial\")\n fig.line(x, [i ** 2 for i in x], color=color, line_width=2)\n\n # Configure resources to include BokehJS inline in the document.\n # For more details see:\n # http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n\n # For more details see:\n # http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components\n script, div = components(fig, INLINE)\n html = flask.render_template(\n 'embed.html',\n plot_script=script,\n plot_div=div,\n js_resources=js_resources,\n css_resources=css_resources,\n color=color,\n _from=_from,\n to=to\n )\n return encode_utf8(html)\n\nif __name__ == \"__main__\":\n print(__doc__)\n app.run()\n", "path": "examples/embed/simple/simple.py"}], "after_files": [{"content": "'''This example demonstrates embedding a standalone Bokeh document\ninto a simple Flask application, with a basic HTML web form.\n\nTo view the example, run:\n\n python simple.py\n\nin this directory, and navigate to:\n\n http://localhost:5000\n\n'''\nfrom __future__ import print_function\n\nimport flask\n\nfrom bokeh.embed import components\nfrom bokeh.plotting import figure\nfrom bokeh.resources import INLINE\nfrom bokeh.util.string import encode_utf8\n\napp = flask.Flask(__name__)\n\ncolors = {\n 'Black': '#000000',\n 'Red': '#FF0000',\n 'Green': '#00FF00',\n 'Blue': '#0000FF',\n}\n\ndef getitem(obj, item, default):\n if item not in obj:\n return default\n else:\n return obj[item]\n\[email protected](\"/\")\ndef polynomial():\n \"\"\" Very simple embedding of a polynomial chart\n\n \"\"\"\n\n # Grab the inputs arguments from the URL\n args = flask.request.args\n\n # Get all the form arguments in the url with defaults\n color = colors[getitem(args, 'color', 'Black')]\n _from = int(getitem(args, '_from', 0))\n to = int(getitem(args, 'to', 10))\n\n # Create a polynomial line graph with those arguments\n x = list(range(_from, to + 1))\n fig = figure(title=\"Polynomial\")\n fig.line(x, [i ** 2 for i in x], color=color, line_width=2)\n\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n\n script, div = components(fig)\n html = flask.render_template(\n 'embed.html',\n plot_script=script,\n plot_div=div,\n js_resources=js_resources,\n css_resources=css_resources,\n color=color,\n _from=_from,\n to=to\n )\n return encode_utf8(html)\n\nif __name__ == \"__main__\":\n print(__doc__)\n app.run()\n", "path": "examples/embed/simple/simple.py"}]}
| 1,025 | 331 |
gh_patches_debug_3980
|
rasdani/github-patches
|
git_diff
|
data-for-change__anyway-291
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve cluster accuracy
Cluster aggregates markers in `in_cluster` is using box instead of a circle parameter calculation which I think may cause duplications and inaccuracy
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `static/pymapcluster.py`
Content:
```
1 ##
2 import globalmaptiles as globaltiles
3 from math import cos, sin, atan2, sqrt
4 import time
5 ##
6
7 def center_geolocation(geolocations):
8 """
9 Provide a relatively accurate center lat, lon returned as a list pair, given
10 a list of list pairs.
11 ex: in: geolocations = ((lat1,lon1), (lat2,lon2),)
12 out: (center_lat, center_lon)
13 """
14 x = 0
15 y = 0
16 z = 0
17
18 for lat, lon in geolocations:
19 lat = float(lat)
20 lon = float(lon)
21 x += cos(lat) * cos(lon)
22 y += cos(lat) * sin(lon)
23 z += sin(lat)
24
25 x = float(x / len(geolocations))
26 y = float(y / len(geolocations))
27 z = float(z / len(geolocations))
28
29 return (atan2(y, x), atan2(z, sqrt(x * x + y * y)))
30
31 def latlng_to_zoompixels(mercator, lat, lng, zoom):
32 mx, my = mercator.LatLonToMeters(lat, lng)
33 pix = mercator.MetersToPixels(mx, my, zoom)
34 return pix
35
36 def in_cluster(center, radius, point):
37 return (point[0] >= center[0] - radius) and (point[0] <= center[0] + radius) \
38 and (point[1] >= center[1] - radius) and (point[1] <= center[1] + radius)
39
40 def cluster_markers(mercator, latlngs, zoom, gridsize=50):
41 """
42 Args:
43 mercator: instance of GlobalMercator()
44 latlngs: list of (lat,lng) tuple
45 zoom: current zoom level
46 gridsize: cluster radius (in pixels in current zoom level)
47 Returns:
48 centers: list of indices in latlngs of points used as centers
49 clusters: list of same length as latlngs giving assigning each point to
50 a cluster
51 """
52 start_time = time.time()
53 centers = []
54 clusters = []
55 sizes = []
56 latlngs = map(lambda latlng: latlng.serialize(), latlngs)
57 for i, latlng in enumerate(latlngs):
58 lat = latlng['latitude']
59 lng = latlng['longitude']
60 point_pix = latlng_to_zoompixels(mercator, lat, lng, zoom)
61 assigned = False
62 for cidx, c in enumerate(centers):
63 center = latlngs[c]
64 center = latlng_to_zoompixels(mercator, center['latitude'], center['longitude'], zoom)
65 if in_cluster(center, gridsize, point_pix):
66 # Assign point to cluster
67 clusters.append(cidx)
68 sizes[cidx] += 1
69 assigned = True
70 break
71 if not assigned:
72 # Create new cluster for point
73 #TODO center_geolocation the center!
74 centers.append(i)
75 sizes.append(1)
76 clusters.append(len(centers) - 1)
77
78 print('time for cluster_markers: ' + str(time.time() - start_time))
79 return centers, clusters, sizes
80
81 def create_clusters_centers(markers, zoom, radius):
82 mercator = globaltiles.GlobalMercator()
83 centers, clusters, sizes = cluster_markers(mercator, markers, zoom, radius)
84 centers_markers = [markers[i] for i in centers]
85 return centers_markers, clusters, sizes
86
87 def get_cluster_json(clust_marker, clust_size):
88 return {
89 'longitude': clust_marker.longitude,
90 'latitude': clust_marker.latitude,
91 'size': clust_size
92 }
93
94 def get_cluster_size(index, clusters):
95 from collections import Counter
96 #TODO: don't call Counter for every cluster in the array
97 return Counter(clusters)[index]
98
99 def generate_clusters_json(markers, zoom, radius=50):
100 centers, clusters, sizes = create_clusters_centers(markers, zoom, radius)
101 json_clusts=[]
102
103 for i, point in enumerate(centers):
104 json_clusts.append(get_cluster_json(point, sizes[i]))
105
106 return {
107 'clusters': json_clusts
108 }
109
110 ##
111 if __name__ == '__main__':
112 ##
113 mercator = globaltiles.GlobalMercator()
114 latlngs = [(28.43, 8), (28.43, 8), (28.44, 8), (35, 8)]
115 centers, clusters = cluster_markers(mercator, latlngs, 21)
116 ##
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/static/pymapcluster.py b/static/pymapcluster.py
--- a/static/pymapcluster.py
+++ b/static/pymapcluster.py
@@ -34,8 +34,7 @@
return pix
def in_cluster(center, radius, point):
- return (point[0] >= center[0] - radius) and (point[0] <= center[0] + radius) \
- and (point[1] >= center[1] - radius) and (point[1] <= center[1] + radius)
+ return sqrt((point[0] - center[0])**2 + (point[1] - center[1])**2) <= radius
def cluster_markers(mercator, latlngs, zoom, gridsize=50):
"""
|
{"golden_diff": "diff --git a/static/pymapcluster.py b/static/pymapcluster.py\n--- a/static/pymapcluster.py\n+++ b/static/pymapcluster.py\n@@ -34,8 +34,7 @@\n return pix\n \n def in_cluster(center, radius, point):\n- return (point[0] >= center[0] - radius) and (point[0] <= center[0] + radius) \\\n- and (point[1] >= center[1] - radius) and (point[1] <= center[1] + radius)\n+ return sqrt((point[0] - center[0])**2 + (point[1] - center[1])**2) <= radius\n \n def cluster_markers(mercator, latlngs, zoom, gridsize=50):\n \"\"\"\n", "issue": "Improve cluster accuracy\nCluster aggregates markers in `in_cluster` is using box instead of a circle parameter calculation which I think may cause duplications and inaccuracy\n\n", "before_files": [{"content": "##\nimport globalmaptiles as globaltiles\nfrom math import cos, sin, atan2, sqrt\nimport time\n##\n \ndef center_geolocation(geolocations):\n \"\"\"\n Provide a relatively accurate center lat, lon returned as a list pair, given\n a list of list pairs.\n ex: in: geolocations = ((lat1,lon1), (lat2,lon2),)\n out: (center_lat, center_lon)\n \"\"\"\n x = 0\n y = 0\n z = 0\n \n for lat, lon in geolocations:\n lat = float(lat)\n lon = float(lon)\n x += cos(lat) * cos(lon)\n y += cos(lat) * sin(lon)\n z += sin(lat)\n \n x = float(x / len(geolocations))\n y = float(y / len(geolocations))\n z = float(z / len(geolocations))\n \n return (atan2(y, x), atan2(z, sqrt(x * x + y * y)))\n\ndef latlng_to_zoompixels(mercator, lat, lng, zoom):\n mx, my = mercator.LatLonToMeters(lat, lng)\n pix = mercator.MetersToPixels(mx, my, zoom)\n return pix\n\ndef in_cluster(center, radius, point):\n return (point[0] >= center[0] - radius) and (point[0] <= center[0] + radius) \\\n and (point[1] >= center[1] - radius) and (point[1] <= center[1] + radius)\n\ndef cluster_markers(mercator, latlngs, zoom, gridsize=50):\n \"\"\"\n Args:\n mercator: instance of GlobalMercator()\n latlngs: list of (lat,lng) tuple\n zoom: current zoom level\n gridsize: cluster radius (in pixels in current zoom level)\n Returns:\n centers: list of indices in latlngs of points used as centers\n clusters: list of same length as latlngs giving assigning each point to\n a cluster\n \"\"\"\n start_time = time.time()\n centers = []\n clusters = []\n sizes = []\n latlngs = map(lambda latlng: latlng.serialize(), latlngs)\n for i, latlng in enumerate(latlngs):\n lat = latlng['latitude']\n lng = latlng['longitude']\n point_pix = latlng_to_zoompixels(mercator, lat, lng, zoom)\n assigned = False\n for cidx, c in enumerate(centers):\n center = latlngs[c]\n center = latlng_to_zoompixels(mercator, center['latitude'], center['longitude'], zoom)\n if in_cluster(center, gridsize, point_pix):\n # Assign point to cluster\n clusters.append(cidx)\n sizes[cidx] += 1\n assigned = True\n break\n if not assigned:\n # Create new cluster for point\n #TODO center_geolocation the center!\n centers.append(i)\n sizes.append(1)\n clusters.append(len(centers) - 1)\n\n print('time for cluster_markers: ' + str(time.time() - start_time))\n return centers, clusters, sizes\n\ndef create_clusters_centers(markers, zoom, radius):\n mercator = globaltiles.GlobalMercator()\n centers, clusters, sizes = cluster_markers(mercator, markers, zoom, radius)\n centers_markers = [markers[i] for i in centers]\n return centers_markers, clusters, sizes\n\ndef get_cluster_json(clust_marker, clust_size):\n return {\n 'longitude': clust_marker.longitude,\n 'latitude': clust_marker.latitude,\n 'size': clust_size\n }\n\ndef get_cluster_size(index, clusters):\n from collections import Counter\n #TODO: don't call Counter for every cluster in the array\n return Counter(clusters)[index]\n\ndef generate_clusters_json(markers, zoom, radius=50):\n centers, clusters, sizes = create_clusters_centers(markers, zoom, radius)\n json_clusts=[]\n\n for i, point in enumerate(centers):\n json_clusts.append(get_cluster_json(point, sizes[i]))\n\n return {\n 'clusters': json_clusts\n }\n\n##\nif __name__ == '__main__':\n ##\n mercator = globaltiles.GlobalMercator()\n latlngs = [(28.43, 8), (28.43, 8), (28.44, 8), (35, 8)]\n centers, clusters = cluster_markers(mercator, latlngs, 21)\n ##", "path": "static/pymapcluster.py"}], "after_files": [{"content": "##\nimport globalmaptiles as globaltiles\nfrom math import cos, sin, atan2, sqrt\nimport time\n##\n \ndef center_geolocation(geolocations):\n \"\"\"\n Provide a relatively accurate center lat, lon returned as a list pair, given\n a list of list pairs.\n ex: in: geolocations = ((lat1,lon1), (lat2,lon2),)\n out: (center_lat, center_lon)\n \"\"\"\n x = 0\n y = 0\n z = 0\n \n for lat, lon in geolocations:\n lat = float(lat)\n lon = float(lon)\n x += cos(lat) * cos(lon)\n y += cos(lat) * sin(lon)\n z += sin(lat)\n \n x = float(x / len(geolocations))\n y = float(y / len(geolocations))\n z = float(z / len(geolocations))\n \n return (atan2(y, x), atan2(z, sqrt(x * x + y * y)))\n\ndef latlng_to_zoompixels(mercator, lat, lng, zoom):\n mx, my = mercator.LatLonToMeters(lat, lng)\n pix = mercator.MetersToPixels(mx, my, zoom)\n return pix\n\ndef in_cluster(center, radius, point):\n return sqrt((point[0] - center[0])**2 + (point[1] - center[1])**2) <= radius\n\ndef cluster_markers(mercator, latlngs, zoom, gridsize=50):\n \"\"\"\n Args:\n mercator: instance of GlobalMercator()\n latlngs: list of (lat,lng) tuple\n zoom: current zoom level\n gridsize: cluster radius (in pixels in current zoom level)\n Returns:\n centers: list of indices in latlngs of points used as centers\n clusters: list of same length as latlngs giving assigning each point to\n a cluster\n \"\"\"\n start_time = time.time()\n centers = []\n clusters = []\n sizes = []\n latlngs = map(lambda latlng: latlng.serialize(), latlngs)\n for i, latlng in enumerate(latlngs):\n lat = latlng['latitude']\n lng = latlng['longitude']\n point_pix = latlng_to_zoompixels(mercator, lat, lng, zoom)\n assigned = False\n for cidx, c in enumerate(centers):\n center = latlngs[c]\n center = latlng_to_zoompixels(mercator, center['latitude'], center['longitude'], zoom)\n if in_cluster(center, gridsize, point_pix):\n # Assign point to cluster\n clusters.append(cidx)\n sizes[cidx] += 1\n assigned = True\n break\n if not assigned:\n # Create new cluster for point\n #TODO center_geolocation the center!\n centers.append(i)\n sizes.append(1)\n clusters.append(len(centers) - 1)\n\n print('time for cluster_markers: ' + str(time.time() - start_time))\n return centers, clusters, sizes\n\ndef create_clusters_centers(markers, zoom, radius):\n mercator = globaltiles.GlobalMercator()\n centers, clusters, sizes = cluster_markers(mercator, markers, zoom, radius)\n centers_markers = [markers[i] for i in centers]\n return centers_markers, clusters, sizes\n\ndef get_cluster_json(clust_marker, clust_size):\n return {\n 'longitude': clust_marker.longitude,\n 'latitude': clust_marker.latitude,\n 'size': clust_size\n }\n\ndef get_cluster_size(index, clusters):\n from collections import Counter\n #TODO: don't call Counter for every cluster in the array\n return Counter(clusters)[index]\n\ndef generate_clusters_json(markers, zoom, radius=50):\n centers, clusters, sizes = create_clusters_centers(markers, zoom, radius)\n json_clusts=[]\n\n for i, point in enumerate(centers):\n json_clusts.append(get_cluster_json(point, sizes[i]))\n\n return {\n 'clusters': json_clusts\n }\n\n##\nif __name__ == '__main__':\n ##\n mercator = globaltiles.GlobalMercator()\n latlngs = [(28.43, 8), (28.43, 8), (28.44, 8), (35, 8)]\n centers, clusters = cluster_markers(mercator, latlngs, 21)\n ##", "path": "static/pymapcluster.py"}]}
| 1,541 | 174 |
gh_patches_debug_29806
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-4802
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Celery tags include a raw dictionary that is difficult to use in APM
<!--
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
-->
### Summary of problem
When using celery, context is automatically added to spans as tags from various argument calls. One of the tags for celery is `delivery_info`, which is a dict that provides valuable routing data. This data is difficult to use currently since we can't create facets or filters in APM with the raw dictionary as a string. (Image below)
I expect the dict values should be added as separate tag data, such that `celery.delivery_info.exchange`, `celery.delivery_info.priority`, etc are all separate tags.
<img width="727" alt="image" src="https://user-images.githubusercontent.com/1430167/206885304-9b20bdc5-097a-4e60-ad4e-8b29063092ef.png">
### Which version of dd-trace-py are you using?
1.4.0
### Which version of pip are you using?
22.3
### Which libraries and their versions are you using?
<details>
<summary>`pip freeze`</summary>
celery==5.2.2
</details>
### How can we reproduce your problem?
Run celery with ddtrace with celery integration enabled and observe the resulting tags on the `celery.run` span.
### What is the result that you get?
`celery.run` span has a tag:value pair roughly matching the following (dict is a string repr) `celery.delivery_info: "{'exchange':'', 'routing_key':'queue_name', ...}"`
### What is the result that you expected?
`celery.run` span should have tag:value pairs for each child key in the dictionary
`celery.delivery_info.exchange:''`
`celery.delivery_info.routing_key:'queue_name'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/celery/utils.py`
Content:
```
1 from typing import Any
2 from typing import Dict
3 from weakref import WeakValueDictionary
4
5 from ddtrace.span import Span
6
7 from .constants import CTX_KEY
8
9
10 TAG_KEYS = frozenset(
11 [
12 ("compression", "celery.compression"),
13 ("correlation_id", "celery.correlation_id"),
14 ("countdown", "celery.countdown"),
15 ("delivery_info", "celery.delivery_info"),
16 ("eta", "celery.eta"),
17 ("exchange", "celery.exchange"),
18 ("expires", "celery.expires"),
19 ("hostname", "celery.hostname"),
20 ("id", "celery.id"),
21 ("priority", "celery.priority"),
22 ("queue", "celery.queue"),
23 ("reply_to", "celery.reply_to"),
24 ("retries", "celery.retries"),
25 ("routing_key", "celery.routing_key"),
26 ("serializer", "celery.serializer"),
27 ("timelimit", "celery.timelimit"),
28 # Celery 4.0 uses `origin` instead of `hostname`; this change preserves
29 # the same name for the tag despite Celery version
30 ("origin", "celery.hostname"),
31 ("state", "celery.state"),
32 ]
33 )
34
35
36 def set_tags_from_context(span, context):
37 # type: (Span, Dict[str, Any]) -> None
38 """Helper to extract meta values from a Celery Context"""
39
40 for key, tag_name in TAG_KEYS:
41 value = context.get(key)
42
43 # Skip this key if it is not set
44 if value is None or value == "":
45 continue
46
47 # Skip `timelimit` if it is not set (its default/unset value is a
48 # tuple or a list of `None` values
49 if key == "timelimit" and all(_ is None for _ in value):
50 continue
51
52 # Skip `retries` if its value is `0`
53 if key == "retries" and value == 0:
54 continue
55
56 span.set_tag(tag_name, value)
57
58
59 def attach_span(task, task_id, span, is_publish=False):
60 """Helper to propagate a `Span` for the given `Task` instance. This
61 function uses a `WeakValueDictionary` that stores a Datadog Span using
62 the `(task_id, is_publish)` as a key. This is useful when information must be
63 propagated from one Celery signal to another.
64
65 DEV: We use (task_id, is_publish) for the key to ensure that publishing a
66 task from within another task does not cause any conflicts.
67
68 This mostly happens when either a task fails and a retry policy is in place,
69 or when a task is manually retried (e.g. `task.retry()`), we end up trying
70 to publish a task with the same id as the task currently running.
71
72 Previously publishing the new task would overwrite the existing `celery.run` span
73 in the `weak_dict` causing that span to be forgotten and never finished.
74
75 NOTE: We cannot test for this well yet, because we do not run a celery worker,
76 and cannot run `task.apply_async()`
77 """
78 weak_dict = getattr(task, CTX_KEY, None)
79 if weak_dict is None:
80 weak_dict = WeakValueDictionary()
81 setattr(task, CTX_KEY, weak_dict)
82
83 weak_dict[(task_id, is_publish)] = span
84
85
86 def detach_span(task, task_id, is_publish=False):
87 """Helper to remove a `Span` in a Celery task when it's propagated.
88 This function handles tasks where the `Span` is not attached.
89 """
90 weak_dict = getattr(task, CTX_KEY, None)
91 if weak_dict is None:
92 return
93
94 # DEV: See note in `attach_span` for key info
95 try:
96 del weak_dict[(task_id, is_publish)]
97 except KeyError:
98 pass
99
100
101 def retrieve_span(task, task_id, is_publish=False):
102 """Helper to retrieve an active `Span` stored in a `Task`
103 instance
104 """
105 weak_dict = getattr(task, CTX_KEY, None)
106 if weak_dict is None:
107 return
108 else:
109 # DEV: See note in `attach_span` for key info
110 return weak_dict.get((task_id, is_publish))
111
112
113 def retrieve_task_id(context):
114 """Helper to retrieve the `Task` identifier from the message `body`.
115 This helper supports Protocol Version 1 and 2. The Protocol is well
116 detailed in the official documentation:
117 http://docs.celeryproject.org/en/latest/internals/protocol.html
118 """
119 headers = context.get("headers")
120 body = context.get("body")
121 if headers:
122 # Protocol Version 2 (default from Celery 4.0)
123 return headers.get("id")
124 else:
125 # Protocol Version 1
126 return body.get("id")
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/celery/utils.py b/ddtrace/contrib/celery/utils.py
--- a/ddtrace/contrib/celery/utils.py
+++ b/ddtrace/contrib/celery/utils.py
@@ -2,6 +2,7 @@
from typing import Dict
from weakref import WeakValueDictionary
+from ddtrace.contrib.trace_utils import set_flattened_tags
from ddtrace.span import Span
from .constants import CTX_KEY
@@ -33,27 +34,37 @@
)
+def should_skip_context_value(key, value):
+ # type: (str, Any) -> bool
+ # Skip this key if it is not set
+ if value is None or value == "":
+ return True
+
+ # Skip `timelimit` if it is not set (its default/unset value is a
+ # tuple or a list of `None` values
+ if key == "timelimit" and all(_ is None for _ in value):
+ return True
+
+ # Skip `retries` if its value is `0`
+ if key == "retries" and value == 0:
+ return True
+
+ return False
+
+
def set_tags_from_context(span, context):
# type: (Span, Dict[str, Any]) -> None
"""Helper to extract meta values from a Celery Context"""
+ context_tags = []
for key, tag_name in TAG_KEYS:
value = context.get(key)
-
- # Skip this key if it is not set
- if value is None or value == "":
- continue
-
- # Skip `timelimit` if it is not set (its default/unset value is a
- # tuple or a list of `None` values
- if key == "timelimit" and all(_ is None for _ in value):
+ if should_skip_context_value(key, value):
continue
- # Skip `retries` if its value is `0`
- if key == "retries" and value == 0:
- continue
+ context_tags.append((tag_name, value))
- span.set_tag(tag_name, value)
+ set_flattened_tags(span, context_tags)
def attach_span(task, task_id, span, is_publish=False):
|
{"golden_diff": "diff --git a/ddtrace/contrib/celery/utils.py b/ddtrace/contrib/celery/utils.py\n--- a/ddtrace/contrib/celery/utils.py\n+++ b/ddtrace/contrib/celery/utils.py\n@@ -2,6 +2,7 @@\n from typing import Dict\n from weakref import WeakValueDictionary\n \n+from ddtrace.contrib.trace_utils import set_flattened_tags\n from ddtrace.span import Span\n \n from .constants import CTX_KEY\n@@ -33,27 +34,37 @@\n )\n \n \n+def should_skip_context_value(key, value):\n+ # type: (str, Any) -> bool\n+ # Skip this key if it is not set\n+ if value is None or value == \"\":\n+ return True\n+\n+ # Skip `timelimit` if it is not set (its default/unset value is a\n+ # tuple or a list of `None` values\n+ if key == \"timelimit\" and all(_ is None for _ in value):\n+ return True\n+\n+ # Skip `retries` if its value is `0`\n+ if key == \"retries\" and value == 0:\n+ return True\n+\n+ return False\n+\n+\n def set_tags_from_context(span, context):\n # type: (Span, Dict[str, Any]) -> None\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n \n+ context_tags = []\n for key, tag_name in TAG_KEYS:\n value = context.get(key)\n-\n- # Skip this key if it is not set\n- if value is None or value == \"\":\n- continue\n-\n- # Skip `timelimit` if it is not set (its default/unset value is a\n- # tuple or a list of `None` values\n- if key == \"timelimit\" and all(_ is None for _ in value):\n+ if should_skip_context_value(key, value):\n continue\n \n- # Skip `retries` if its value is `0`\n- if key == \"retries\" and value == 0:\n- continue\n+ context_tags.append((tag_name, value))\n \n- span.set_tag(tag_name, value)\n+ set_flattened_tags(span, context_tags)\n \n \n def attach_span(task, task_id, span, is_publish=False):\n", "issue": "Celery tags include a raw dictionary that is difficult to use in APM\n<!--\r\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n-->\r\n\r\n### Summary of problem\r\nWhen using celery, context is automatically added to spans as tags from various argument calls. One of the tags for celery is `delivery_info`, which is a dict that provides valuable routing data. This data is difficult to use currently since we can't create facets or filters in APM with the raw dictionary as a string. (Image below)\r\n\r\nI expect the dict values should be added as separate tag data, such that `celery.delivery_info.exchange`, `celery.delivery_info.priority`, etc are all separate tags.\r\n\r\n<img width=\"727\" alt=\"image\" src=\"https://user-images.githubusercontent.com/1430167/206885304-9b20bdc5-097a-4e60-ad4e-8b29063092ef.png\">\r\n\r\n\r\n### Which version of dd-trace-py are you using?\r\n1.4.0\r\n\r\n\r\n### Which version of pip are you using?\r\n22.3\r\n\r\n\r\n### Which libraries and their versions are you using?\r\n\r\n<details>\r\n <summary>`pip freeze`</summary>\r\n celery==5.2.2\r\n</details>\r\n\r\n### How can we reproduce your problem?\r\nRun celery with ddtrace with celery integration enabled and observe the resulting tags on the `celery.run` span.\r\n\r\n\r\n### What is the result that you get?\r\n`celery.run` span has a tag:value pair roughly matching the following (dict is a string repr) `celery.delivery_info: \"{'exchange':'', 'routing_key':'queue_name', ...}\"`\r\n\r\n### What is the result that you expected?\r\n`celery.run` span should have tag:value pairs for each child key in the dictionary\r\n`celery.delivery_info.exchange:''`\r\n`celery.delivery_info.routing_key:'queue_name'`\n", "before_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom weakref import WeakValueDictionary\n\nfrom ddtrace.span import Span\n\nfrom .constants import CTX_KEY\n\n\nTAG_KEYS = frozenset(\n [\n (\"compression\", \"celery.compression\"),\n (\"correlation_id\", \"celery.correlation_id\"),\n (\"countdown\", \"celery.countdown\"),\n (\"delivery_info\", \"celery.delivery_info\"),\n (\"eta\", \"celery.eta\"),\n (\"exchange\", \"celery.exchange\"),\n (\"expires\", \"celery.expires\"),\n (\"hostname\", \"celery.hostname\"),\n (\"id\", \"celery.id\"),\n (\"priority\", \"celery.priority\"),\n (\"queue\", \"celery.queue\"),\n (\"reply_to\", \"celery.reply_to\"),\n (\"retries\", \"celery.retries\"),\n (\"routing_key\", \"celery.routing_key\"),\n (\"serializer\", \"celery.serializer\"),\n (\"timelimit\", \"celery.timelimit\"),\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n (\"origin\", \"celery.hostname\"),\n (\"state\", \"celery.state\"),\n ]\n)\n\n\ndef set_tags_from_context(span, context):\n # type: (Span, Dict[str, Any]) -> None\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n\n for key, tag_name in TAG_KEYS:\n value = context.get(key)\n\n # Skip this key if it is not set\n if value is None or value == \"\":\n continue\n\n # Skip `timelimit` if it is not set (its default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\" and all(_ is None for _ in value):\n continue\n\n # Skip `retries` if its value is `0`\n if key == \"retries\" and value == 0:\n continue\n\n span.set_tag(tag_name, value)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `WeakValueDictionary` that stores a Datadog Span using\n the `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n DEV: We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retried (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `weak_dict` causing that span to be forgotten and never finished.\n\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n weak_dict = WeakValueDictionary()\n setattr(task, CTX_KEY, weak_dict)\n\n weak_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n\n # DEV: See note in `attach_span` for key info\n try:\n del weak_dict[(task_id, is_publish)]\n except KeyError:\n pass\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n else:\n # DEV: See note in `attach_span` for key info\n return weak_dict.get((task_id, is_publish))\n\n\ndef retrieve_task_id(context):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = context.get(\"headers\")\n body = context.get(\"body\")\n if headers:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n else:\n # Protocol Version 1\n return body.get(\"id\")\n", "path": "ddtrace/contrib/celery/utils.py"}], "after_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom weakref import WeakValueDictionary\n\nfrom ddtrace.contrib.trace_utils import set_flattened_tags\nfrom ddtrace.span import Span\n\nfrom .constants import CTX_KEY\n\n\nTAG_KEYS = frozenset(\n [\n (\"compression\", \"celery.compression\"),\n (\"correlation_id\", \"celery.correlation_id\"),\n (\"countdown\", \"celery.countdown\"),\n (\"delivery_info\", \"celery.delivery_info\"),\n (\"eta\", \"celery.eta\"),\n (\"exchange\", \"celery.exchange\"),\n (\"expires\", \"celery.expires\"),\n (\"hostname\", \"celery.hostname\"),\n (\"id\", \"celery.id\"),\n (\"priority\", \"celery.priority\"),\n (\"queue\", \"celery.queue\"),\n (\"reply_to\", \"celery.reply_to\"),\n (\"retries\", \"celery.retries\"),\n (\"routing_key\", \"celery.routing_key\"),\n (\"serializer\", \"celery.serializer\"),\n (\"timelimit\", \"celery.timelimit\"),\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n (\"origin\", \"celery.hostname\"),\n (\"state\", \"celery.state\"),\n ]\n)\n\n\ndef should_skip_context_value(key, value):\n # type: (str, Any) -> bool\n # Skip this key if it is not set\n if value is None or value == \"\":\n return True\n\n # Skip `timelimit` if it is not set (its default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\" and all(_ is None for _ in value):\n return True\n\n # Skip `retries` if its value is `0`\n if key == \"retries\" and value == 0:\n return True\n\n return False\n\n\ndef set_tags_from_context(span, context):\n # type: (Span, Dict[str, Any]) -> None\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n\n context_tags = []\n for key, tag_name in TAG_KEYS:\n value = context.get(key)\n if should_skip_context_value(key, value):\n continue\n\n context_tags.append((tag_name, value))\n\n set_flattened_tags(span, context_tags)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `WeakValueDictionary` that stores a Datadog Span using\n the `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n DEV: We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retried (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `weak_dict` causing that span to be forgotten and never finished.\n\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n weak_dict = WeakValueDictionary()\n setattr(task, CTX_KEY, weak_dict)\n\n weak_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n\n # DEV: See note in `attach_span` for key info\n try:\n del weak_dict[(task_id, is_publish)]\n except KeyError:\n pass\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n else:\n # DEV: See note in `attach_span` for key info\n return weak_dict.get((task_id, is_publish))\n\n\ndef retrieve_task_id(context):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = context.get(\"headers\")\n body = context.get(\"body\")\n if headers:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n else:\n # Protocol Version 1\n return body.get(\"id\")\n", "path": "ddtrace/contrib/celery/utils.py"}]}
| 2,056 | 514 |
gh_patches_debug_6729
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1829
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Organization view pages result in 500 error
Only on stag. I tested several different orgs.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_search/ckanext/hdx_search/plugin.py`
Content:
```
1 import logging, re
2 import ckan.plugins as plugins
3 import ckan.plugins.toolkit as tk
4 import ckan.lib.plugins as lib_plugins
5
6 def convert_country(q):
7 for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):
8 if re.findall(c['display_name'].lower(),q.lower()):
9 q += ' '+c['name']
10 return q
11
12 class HDXSearchPlugin(plugins.SingletonPlugin):
13 plugins.implements(plugins.IConfigurer, inherit=False)
14 plugins.implements(plugins.IRoutes, inherit=True)
15 plugins.implements(plugins.ITemplateHelpers, inherit=False)
16 plugins.implements(plugins.IPackageController, inherit=True)
17
18 def update_config(self, config):
19 tk.add_template_directory(config, 'templates')
20
21 def get_helpers(self):
22 return {}
23
24 def before_map(self, map):
25 map.connect('search', '/search',
26 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')
27 map.connect('simple_search',
28 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')
29 return map
30
31 def after_map(self, map):
32 map.connect('search', '/search',
33 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')
34 map.connect('simple_search',
35 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')
36 return map
37
38 def before_search(self, search_params):
39 search_params['q'] = convert_country(search_params['q'])
40 if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:
41 search_params['facet.field'].append('vocab_Topics')
42
43 # If indicator flag is set, search only that type
44 if 'ext_indicator' in search_params['extras']:
45 if int(search_params['extras']['ext_indicator']) == 1:
46 search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'
47 elif int(search_params['extras']['ext_indicator']) == 0:
48 search_params['fq'] = search_params[
49 'fq'] + ' -extras_indicator:1'
50 return search_params
51
52 def after_search(self, search_results, search_params):
53 return search_results
54
55 def before_view(self, pkg_dict):
56 return pkg_dict
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py
--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py
+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py
@@ -36,7 +36,7 @@
return map
def before_search(self, search_params):
- search_params['q'] = convert_country(search_params['q'])
+ #search_params['q'] = convert_country(search_params['q'])
if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:
search_params['facet.field'].append('vocab_Topics')
|
{"golden_diff": "diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n@@ -36,7 +36,7 @@\n return map\n \n def before_search(self, search_params):\n- search_params['q'] = convert_country(search_params['q'])\n+ #search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n", "issue": "Organization view pages result in 500 error\nOnly on stag. I tested several different orgs. \n\n\n\n", "before_files": [{"content": "import logging, re\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\ndef convert_country(q):\n for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):\n if re.findall(c['display_name'].lower(),q.lower()):\n q += ' '+c['name']\n return q\n\nclass HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def before_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def after_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def before_search(self, search_params):\n search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n\n # If indicator flag is set, search only that type\n if 'ext_indicator' in search_params['extras']:\n if int(search_params['extras']['ext_indicator']) == 1:\n search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'\n elif int(search_params['extras']['ext_indicator']) == 0:\n search_params['fq'] = search_params[\n 'fq'] + ' -extras_indicator:1'\n return search_params\n\n def after_search(self, search_results, search_params):\n return search_results\n\n def before_view(self, pkg_dict):\n return pkg_dict\n", "path": "ckanext-hdx_search/ckanext/hdx_search/plugin.py"}], "after_files": [{"content": "import logging, re\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\ndef convert_country(q):\n for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):\n if re.findall(c['display_name'].lower(),q.lower()):\n q += ' '+c['name']\n return q\n\nclass HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def before_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def after_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def before_search(self, search_params):\n #search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n\n # If indicator flag is set, search only that type\n if 'ext_indicator' in search_params['extras']:\n if int(search_params['extras']['ext_indicator']) == 1:\n search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'\n elif int(search_params['extras']['ext_indicator']) == 0:\n search_params['fq'] = search_params[\n 'fq'] + ' -extras_indicator:1'\n return search_params\n\n def after_search(self, search_results, search_params):\n return search_results\n\n def before_view(self, pkg_dict):\n return pkg_dict\n", "path": "ckanext-hdx_search/ckanext/hdx_search/plugin.py"}]}
| 994 | 168 |
gh_patches_debug_241
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-1807
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding new attribute fails in case new name is the last in the list
### Problem Statement
```bash
$ python scripts/add_attribute.py Permissions triage bool
Traceback (most recent call last):
File "<...>\PyGithub\scripts\add_attribute.py", line 124, in <module>
line = lines[i].rstrip()
IndexError: list index out of range
```
--> Adding a new attribute at the end of the existing list of attributes in class `Permissions` fails.
--> In this case the last attribute name was "push", so "triage" comes last.
https://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L63-L72
### Solution Approach
In case the new attribute name will result in adding it at the end of the list of attributes, then the processing within the script at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L89 was already processing the next source code line which already contains the `_initAttributes` function.
Subsequently at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L122 `inInit` is set to `False`, but only checked again after reading already the next line. This means the following code block will never again notice the place of the `_initAttributes` and fails at the end of the file due to endless loop.
Problem can be fixed by conditionally remembering we already reached the `_initAttributes` function, so replace:
https://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/scripts/add_attribute.py#L122
with
```python
inInit = True if line == " def _initAttributes(self):" else False
```
Adding new attribute fails in case new name is the last in the list
### Problem Statement
```bash
$ python scripts/add_attribute.py Permissions triage bool
Traceback (most recent call last):
File "<...>\PyGithub\scripts\add_attribute.py", line 124, in <module>
line = lines[i].rstrip()
IndexError: list index out of range
```
--> Adding a new attribute at the end of the existing list of attributes in class `Permissions` fails.
--> In this case the last attribute name was "push", so "triage" comes last.
https://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L63-L72
### Solution Approach
In case the new attribute name will result in adding it at the end of the list of attributes, then the processing within the script at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L89 was already processing the next source code line which already contains the `_initAttributes` function.
Subsequently at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L122 `inInit` is set to `False`, but only checked again after reading already the next line. This means the following code block will never again notice the place of the `_initAttributes` and fails at the end of the file due to endless loop.
Problem can be fixed by conditionally remembering we already reached the `_initAttributes` function, so replace:
https://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/scripts/add_attribute.py#L122
with
```python
inInit = True if line == " def _initAttributes(self):" else False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/add_attribute.py`
Content:
```
1 #!/usr/bin/env python
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2013 Vincent Jacques <[email protected]> #
6 # Copyright 2014 Thialfihar <[email protected]> #
7 # Copyright 2014 Vincent Jacques <[email protected]> #
8 # Copyright 2016 Peter Buckley <[email protected]> #
9 # Copyright 2018 sfdye <[email protected]> #
10 # Copyright 2018 bbi-yggy <[email protected]> #
11 # #
12 # This file is part of PyGithub. #
13 # http://pygithub.readthedocs.io/ #
14 # #
15 # PyGithub is free software: you can redistribute it and/or modify it under #
16 # the terms of the GNU Lesser General Public License as published by the Free #
17 # Software Foundation, either version 3 of the License, or (at your option) #
18 # any later version. #
19 # #
20 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
21 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
22 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
23 # details. #
24 # #
25 # You should have received a copy of the GNU Lesser General Public License #
26 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
27 # #
28 ################################################################################
29
30 import os.path
31 import sys
32
33 className, attributeName, attributeType = sys.argv[1:4]
34 if len(sys.argv) > 4:
35 attributeClassType = sys.argv[4]
36 else:
37 attributeClassType = ""
38
39
40 types = {
41 "string": (
42 "string",
43 None,
44 'self._makeStringAttribute(attributes["' + attributeName + '"])',
45 ),
46 "int": (
47 "integer",
48 None,
49 'self._makeIntAttribute(attributes["' + attributeName + '"])',
50 ),
51 "bool": (
52 "bool",
53 None,
54 'self._makeBoolAttribute(attributes["' + attributeName + '"])',
55 ),
56 "datetime": (
57 "datetime.datetime",
58 "(str, unicode)",
59 'self._makeDatetimeAttribute(attributes["' + attributeName + '"])',
60 ),
61 "class": (
62 ":class:`" + attributeClassType + "`",
63 None,
64 "self._makeClassAttribute("
65 + attributeClassType
66 + ', attributes["'
67 + attributeName
68 + '"])',
69 ),
70 }
71
72 attributeDocType, attributeAssertType, attributeValue = types[attributeType]
73
74
75 fileName = os.path.join("github", className + ".py")
76
77 with open(fileName) as f:
78 lines = list(f)
79
80 newLines = []
81
82 i = 0
83
84 added = False
85
86 isCompletable = True
87 isProperty = False
88 while not added:
89 line = lines[i].rstrip()
90 i += 1
91 if line.startswith("class "):
92 if "NonCompletableGithubObject" in line:
93 isCompletable = False
94 elif line == " @property":
95 isProperty = True
96 elif line.startswith(" def "):
97 attrName = line[8:-7]
98 # Properties will be inserted after __repr__, but before any other function.
99 if attrName != "__repr__" and (
100 attrName == "_identity" or attrName > attributeName or not isProperty
101 ):
102 if not isProperty:
103 newLines.append(" @property")
104 newLines.append(" def " + attributeName + "(self):")
105 newLines.append(' """')
106 newLines.append(" :type: " + attributeDocType)
107 newLines.append(' """')
108 if isCompletable:
109 newLines.append(
110 " self._completeIfNotSet(self._" + attributeName + ")"
111 )
112 newLines.append(" return self._" + attributeName + ".value")
113 newLines.append("")
114 if isProperty:
115 newLines.append(" @property")
116 added = True
117 isProperty = False
118 newLines.append(line)
119
120 added = False
121
122 inInit = False
123 while not added:
124 line = lines[i].rstrip()
125 i += 1
126 if line == " def _initAttributes(self):":
127 inInit = True
128 if inInit:
129 if not line or line.endswith(" = github.GithubObject.NotSet"):
130 if line:
131 attrName = line[14:-29]
132 if not line or attrName > attributeName:
133 newLines.append(
134 " self._" + attributeName + " = github.GithubObject.NotSet"
135 )
136 added = True
137 newLines.append(line)
138
139 added = False
140
141 inUse = False
142 while not added:
143 try:
144 line = lines[i].rstrip()
145 except IndexError:
146 line = ""
147 i += 1
148 if line == " def _useAttributes(self, attributes):":
149 inUse = True
150 if inUse:
151 if not line or line.endswith(" in attributes: # pragma no branch"):
152 if line:
153 attrName = line[12:-36]
154 if not line or attrName > attributeName:
155 newLines.append(
156 ' if "'
157 + attributeName
158 + '" in attributes: # pragma no branch'
159 )
160 if attributeAssertType:
161 newLines.append(
162 ' assert attributes["'
163 + attributeName
164 + '"] is None or isinstance(attributes["'
165 + attributeName
166 + '"], '
167 + attributeAssertType
168 + '), attributes["'
169 + attributeName
170 + '"]'
171 )
172 newLines.append(
173 " self._" + attributeName + " = " + attributeValue
174 )
175 added = True
176 newLines.append(line)
177
178
179 while i < len(lines):
180 line = lines[i].rstrip()
181 i += 1
182 newLines.append(line)
183
184 with open(fileName, "w") as f:
185 for line in newLines:
186 f.write(line + "\n")
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/add_attribute.py b/scripts/add_attribute.py
--- a/scripts/add_attribute.py
+++ b/scripts/add_attribute.py
@@ -119,7 +119,7 @@
added = False
-inInit = False
+inInit = line.endswith("def _initAttributes(self):")
while not added:
line = lines[i].rstrip()
i += 1
|
{"golden_diff": "diff --git a/scripts/add_attribute.py b/scripts/add_attribute.py\n--- a/scripts/add_attribute.py\n+++ b/scripts/add_attribute.py\n@@ -119,7 +119,7 @@\n \n added = False\n \n-inInit = False\n+inInit = line.endswith(\"def _initAttributes(self):\")\n while not added:\n line = lines[i].rstrip()\n i += 1\n", "issue": "Adding new attribute fails in case new name is the last in the list\n### Problem Statement\r\n\r\n```bash\r\n$ python scripts/add_attribute.py Permissions triage bool\r\nTraceback (most recent call last):\r\n File \"<...>\\PyGithub\\scripts\\add_attribute.py\", line 124, in <module>\r\n line = lines[i].rstrip()\r\nIndexError: list index out of range\r\n```\r\n--> Adding a new attribute at the end of the existing list of attributes in class `Permissions` fails.\r\n--> In this case the last attribute name was \"push\", so \"triage\" comes last.\r\nhttps://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L63-L72\r\n\r\n### Solution Approach\r\n\r\nIn case the new attribute name will result in adding it at the end of the list of attributes, then the processing within the script at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L89 was already processing the next source code line which already contains the `_initAttributes` function.\r\nSubsequently at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L122 `inInit` is set to `False`, but only checked again after reading already the next line. This means the following code block will never again notice the place of the `_initAttributes` and fails at the end of the file due to endless loop.\r\n\r\nProblem can be fixed by conditionally remembering we already reached the `_initAttributes` function, so replace:\r\nhttps://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/scripts/add_attribute.py#L122\r\n\r\nwith\r\n\r\n```python\r\ninInit = True if line == \" def _initAttributes(self):\" else False\r\n```\nAdding new attribute fails in case new name is the last in the list\n### Problem Statement\r\n\r\n```bash\r\n$ python scripts/add_attribute.py Permissions triage bool\r\nTraceback (most recent call last):\r\n File \"<...>\\PyGithub\\scripts\\add_attribute.py\", line 124, in <module>\r\n line = lines[i].rstrip()\r\nIndexError: list index out of range\r\n```\r\n--> Adding a new attribute at the end of the existing list of attributes in class `Permissions` fails.\r\n--> In this case the last attribute name was \"push\", so \"triage\" comes last.\r\nhttps://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L63-L72\r\n\r\n### Solution Approach\r\n\r\nIn case the new attribute name will result in adding it at the end of the list of attributes, then the processing within the script at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L89 was already processing the next source code line which already contains the `_initAttributes` function.\r\nSubsequently at https://github.com/PyGithub/PyGithub/blob/master/scripts/add_attribute.py#L122 `inInit` is set to `False`, but only checked again after reading already the next line. This means the following code block will never again notice the place of the `_initAttributes` and fails at the end of the file due to endless loop.\r\n\r\nProblem can be fixed by conditionally remembering we already reached the `_initAttributes` function, so replace:\r\nhttps://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/scripts/add_attribute.py#L122\r\n\r\nwith\r\n\r\n```python\r\ninInit = True if line == \" def _initAttributes(self):\" else False\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# Copyright 2018 bbi-yggy <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport os.path\nimport sys\n\nclassName, attributeName, attributeType = sys.argv[1:4]\nif len(sys.argv) > 4:\n attributeClassType = sys.argv[4]\nelse:\n attributeClassType = \"\"\n\n\ntypes = {\n \"string\": (\n \"string\",\n None,\n 'self._makeStringAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"int\": (\n \"integer\",\n None,\n 'self._makeIntAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"bool\": (\n \"bool\",\n None,\n 'self._makeBoolAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"datetime\": (\n \"datetime.datetime\",\n \"(str, unicode)\",\n 'self._makeDatetimeAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"class\": (\n \":class:`\" + attributeClassType + \"`\",\n None,\n \"self._makeClassAttribute(\"\n + attributeClassType\n + ', attributes[\"'\n + attributeName\n + '\"])',\n ),\n}\n\nattributeDocType, attributeAssertType, attributeValue = types[attributeType]\n\n\nfileName = os.path.join(\"github\", className + \".py\")\n\nwith open(fileName) as f:\n lines = list(f)\n\nnewLines = []\n\ni = 0\n\nadded = False\n\nisCompletable = True\nisProperty = False\nwhile not added:\n line = lines[i].rstrip()\n i += 1\n if line.startswith(\"class \"):\n if \"NonCompletableGithubObject\" in line:\n isCompletable = False\n elif line == \" @property\":\n isProperty = True\n elif line.startswith(\" def \"):\n attrName = line[8:-7]\n # Properties will be inserted after __repr__, but before any other function.\n if attrName != \"__repr__\" and (\n attrName == \"_identity\" or attrName > attributeName or not isProperty\n ):\n if not isProperty:\n newLines.append(\" @property\")\n newLines.append(\" def \" + attributeName + \"(self):\")\n newLines.append(' \"\"\"')\n newLines.append(\" :type: \" + attributeDocType)\n newLines.append(' \"\"\"')\n if isCompletable:\n newLines.append(\n \" self._completeIfNotSet(self._\" + attributeName + \")\"\n )\n newLines.append(\" return self._\" + attributeName + \".value\")\n newLines.append(\"\")\n if isProperty:\n newLines.append(\" @property\")\n added = True\n isProperty = False\n newLines.append(line)\n\nadded = False\n\ninInit = False\nwhile not added:\n line = lines[i].rstrip()\n i += 1\n if line == \" def _initAttributes(self):\":\n inInit = True\n if inInit:\n if not line or line.endswith(\" = github.GithubObject.NotSet\"):\n if line:\n attrName = line[14:-29]\n if not line or attrName > attributeName:\n newLines.append(\n \" self._\" + attributeName + \" = github.GithubObject.NotSet\"\n )\n added = True\n newLines.append(line)\n\nadded = False\n\ninUse = False\nwhile not added:\n try:\n line = lines[i].rstrip()\n except IndexError:\n line = \"\"\n i += 1\n if line == \" def _useAttributes(self, attributes):\":\n inUse = True\n if inUse:\n if not line or line.endswith(\" in attributes: # pragma no branch\"):\n if line:\n attrName = line[12:-36]\n if not line or attrName > attributeName:\n newLines.append(\n ' if \"'\n + attributeName\n + '\" in attributes: # pragma no branch'\n )\n if attributeAssertType:\n newLines.append(\n ' assert attributes[\"'\n + attributeName\n + '\"] is None or isinstance(attributes[\"'\n + attributeName\n + '\"], '\n + attributeAssertType\n + '), attributes[\"'\n + attributeName\n + '\"]'\n )\n newLines.append(\n \" self._\" + attributeName + \" = \" + attributeValue\n )\n added = True\n newLines.append(line)\n\n\nwhile i < len(lines):\n line = lines[i].rstrip()\n i += 1\n newLines.append(line)\n\nwith open(fileName, \"w\") as f:\n for line in newLines:\n f.write(line + \"\\n\")\n", "path": "scripts/add_attribute.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# Copyright 2018 bbi-yggy <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport os.path\nimport sys\n\nclassName, attributeName, attributeType = sys.argv[1:4]\nif len(sys.argv) > 4:\n attributeClassType = sys.argv[4]\nelse:\n attributeClassType = \"\"\n\n\ntypes = {\n \"string\": (\n \"string\",\n None,\n 'self._makeStringAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"int\": (\n \"integer\",\n None,\n 'self._makeIntAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"bool\": (\n \"bool\",\n None,\n 'self._makeBoolAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"datetime\": (\n \"datetime.datetime\",\n \"(str, unicode)\",\n 'self._makeDatetimeAttribute(attributes[\"' + attributeName + '\"])',\n ),\n \"class\": (\n \":class:`\" + attributeClassType + \"`\",\n None,\n \"self._makeClassAttribute(\"\n + attributeClassType\n + ', attributes[\"'\n + attributeName\n + '\"])',\n ),\n}\n\nattributeDocType, attributeAssertType, attributeValue = types[attributeType]\n\n\nfileName = os.path.join(\"github\", className + \".py\")\n\nwith open(fileName) as f:\n lines = list(f)\n\nnewLines = []\n\ni = 0\n\nadded = False\n\nisCompletable = True\nisProperty = False\nwhile not added:\n line = lines[i].rstrip()\n i += 1\n if line.startswith(\"class \"):\n if \"NonCompletableGithubObject\" in line:\n isCompletable = False\n elif line == \" @property\":\n isProperty = True\n elif line.startswith(\" def \"):\n attrName = line[8:-7]\n # Properties will be inserted after __repr__, but before any other function.\n if attrName != \"__repr__\" and (\n attrName == \"_identity\" or attrName > attributeName or not isProperty\n ):\n if not isProperty:\n newLines.append(\" @property\")\n newLines.append(\" def \" + attributeName + \"(self):\")\n newLines.append(' \"\"\"')\n newLines.append(\" :type: \" + attributeDocType)\n newLines.append(' \"\"\"')\n if isCompletable:\n newLines.append(\n \" self._completeIfNotSet(self._\" + attributeName + \")\"\n )\n newLines.append(\" return self._\" + attributeName + \".value\")\n newLines.append(\"\")\n if isProperty:\n newLines.append(\" @property\")\n added = True\n isProperty = False\n newLines.append(line)\n\nadded = False\n\ninInit = line.endswith(\"def _initAttributes(self):\")\nwhile not added:\n line = lines[i].rstrip()\n i += 1\n if line == \" def _initAttributes(self):\":\n inInit = True\n if inInit:\n if not line or line.endswith(\" = github.GithubObject.NotSet\"):\n if line:\n attrName = line[14:-29]\n if not line or attrName > attributeName:\n newLines.append(\n \" self._\" + attributeName + \" = github.GithubObject.NotSet\"\n )\n added = True\n newLines.append(line)\n\nadded = False\n\ninUse = False\nwhile not added:\n try:\n line = lines[i].rstrip()\n except IndexError:\n line = \"\"\n i += 1\n if line == \" def _useAttributes(self, attributes):\":\n inUse = True\n if inUse:\n if not line or line.endswith(\" in attributes: # pragma no branch\"):\n if line:\n attrName = line[12:-36]\n if not line or attrName > attributeName:\n newLines.append(\n ' if \"'\n + attributeName\n + '\" in attributes: # pragma no branch'\n )\n if attributeAssertType:\n newLines.append(\n ' assert attributes[\"'\n + attributeName\n + '\"] is None or isinstance(attributes[\"'\n + attributeName\n + '\"], '\n + attributeAssertType\n + '), attributes[\"'\n + attributeName\n + '\"]'\n )\n newLines.append(\n \" self._\" + attributeName + \" = \" + attributeValue\n )\n added = True\n newLines.append(line)\n\n\nwhile i < len(lines):\n line = lines[i].rstrip()\n i += 1\n newLines.append(line)\n\nwith open(fileName, \"w\") as f:\n for line in newLines:\n f.write(line + \"\\n\")\n", "path": "scripts/add_attribute.py"}]}
| 2,918 | 84 |
gh_patches_debug_19736
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-3649
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dev cog is missing `__name__` in its environment
# Other bugs
#### What were you trying to do?
I tried to run this code with `[p]eval`:
```py
from typing import TypeVar
T = TypeVar("T")
```
#### What were you expecting to happen?
I expected it to run successfully.
#### What actually happened?
I got error:
```
Traceback (most recent call last):
File "/home/ubuntu/red-venv/lib/python3.8/site-packages/redbot/core/dev_commands.py", line 192, in _eval
result = await func()
File "<string>", line 3, in func
File "<string>", line 2, in <module>
File "/usr/lib/python3.8/typing.py", line 603, in __init__
def_mod = sys._getframe(1).f_globals['__name__'] # for pickling
KeyError: '__name__'
```
#### How can we reproduce this issue?
Run the snippet above with `[p]eval` command.
---
Personally, I would just assign `__main__` to `__name__` variable, but I'm open to other options.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/core/dev_commands.py`
Content:
```
1 import ast
2 import asyncio
3 import aiohttp
4 import inspect
5 import io
6 import textwrap
7 import traceback
8 import types
9 import re
10 from contextlib import redirect_stdout
11 from copy import copy
12
13 import discord
14
15 from . import checks, commands
16 from .i18n import Translator
17 from .utils.chat_formatting import box, pagify
18 from .utils.predicates import MessagePredicate
19
20 """
21 Notice:
22
23 95% of the below code came from R.Danny which can be found here:
24
25 https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
26 """
27
28 _ = Translator("Dev", __file__)
29
30 START_CODE_BLOCK_RE = re.compile(r"^((```py)(?=\s)|(```))")
31
32
33 class Dev(commands.Cog):
34 """Various development focused utilities."""
35
36 def __init__(self):
37 super().__init__()
38 self._last_result = None
39 self.sessions = set()
40
41 @staticmethod
42 def async_compile(source, filename, mode):
43 return compile(source, filename, mode, flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, optimize=0)
44
45 @staticmethod
46 async def maybe_await(coro):
47 for i in range(2):
48 if inspect.isawaitable(coro):
49 coro = await coro
50 else:
51 return coro
52 return coro
53
54 @staticmethod
55 def cleanup_code(content):
56 """Automatically removes code blocks from the code."""
57 # remove ```py\n```
58 if content.startswith("```") and content.endswith("```"):
59 return START_CODE_BLOCK_RE.sub("", content)[:-3]
60
61 # remove `foo`
62 return content.strip("` \n")
63
64 @staticmethod
65 def get_syntax_error(e):
66 """Format a syntax error to send to the user.
67
68 Returns a string representation of the error formatted as a codeblock.
69 """
70 if e.text is None:
71 return box("{0.__class__.__name__}: {0}".format(e), lang="py")
72 return box(
73 "{0.text}\n{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__), lang="py"
74 )
75
76 @staticmethod
77 def get_pages(msg: str):
78 """Pagify the given message for output to the user."""
79 return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
80
81 @staticmethod
82 def sanitize_output(ctx: commands.Context, input_: str) -> str:
83 """Hides the bot's token from a string."""
84 token = ctx.bot.http.token
85 return re.sub(re.escape(token), "[EXPUNGED]", input_, re.I)
86
87 @commands.command()
88 @checks.is_owner()
89 async def debug(self, ctx, *, code):
90 """Evaluate a statement of python code.
91
92 The bot will always respond with the return value of the code.
93 If the return value of the code is a coroutine, it will be awaited,
94 and the result of that will be the bot's response.
95
96 Note: Only one statement may be evaluated. Using certain restricted
97 keywords, e.g. yield, will result in a syntax error. For multiple
98 lines or asynchronous code, see [p]repl or [p]eval.
99
100 Environment Variables:
101 ctx - command invokation context
102 bot - bot object
103 channel - the current channel object
104 author - command author's member object
105 message - the command's message object
106 discord - discord.py library
107 commands - redbot.core.commands
108 _ - The result of the last dev command.
109 """
110 env = {
111 "bot": ctx.bot,
112 "ctx": ctx,
113 "channel": ctx.channel,
114 "author": ctx.author,
115 "guild": ctx.guild,
116 "message": ctx.message,
117 "asyncio": asyncio,
118 "aiohttp": aiohttp,
119 "discord": discord,
120 "commands": commands,
121 "_": self._last_result,
122 }
123
124 code = self.cleanup_code(code)
125
126 try:
127 compiled = self.async_compile(code, "<string>", "eval")
128 result = await self.maybe_await(eval(compiled, env))
129 except SyntaxError as e:
130 await ctx.send(self.get_syntax_error(e))
131 return
132 except Exception as e:
133 await ctx.send(box("{}: {!s}".format(type(e).__name__, e), lang="py"))
134 return
135
136 self._last_result = result
137 result = self.sanitize_output(ctx, str(result))
138
139 await ctx.send_interactive(self.get_pages(result), box_lang="py")
140
141 @commands.command(name="eval")
142 @checks.is_owner()
143 async def _eval(self, ctx, *, body: str):
144 """Execute asynchronous code.
145
146 This command wraps code into the body of an async function and then
147 calls and awaits it. The bot will respond with anything printed to
148 stdout, as well as the return value of the function.
149
150 The code can be within a codeblock, inline code or neither, as long
151 as they are not mixed and they are formatted correctly.
152
153 Environment Variables:
154 ctx - command invokation context
155 bot - bot object
156 channel - the current channel object
157 author - command author's member object
158 message - the command's message object
159 discord - discord.py library
160 commands - redbot.core.commands
161 _ - The result of the last dev command.
162 """
163 env = {
164 "bot": ctx.bot,
165 "ctx": ctx,
166 "channel": ctx.channel,
167 "author": ctx.author,
168 "guild": ctx.guild,
169 "message": ctx.message,
170 "asyncio": asyncio,
171 "aiohttp": aiohttp,
172 "discord": discord,
173 "commands": commands,
174 "_": self._last_result,
175 }
176
177 body = self.cleanup_code(body)
178 stdout = io.StringIO()
179
180 to_compile = "async def func():\n%s" % textwrap.indent(body, " ")
181
182 try:
183 compiled = self.async_compile(to_compile, "<string>", "exec")
184 exec(compiled, env)
185 except SyntaxError as e:
186 return await ctx.send(self.get_syntax_error(e))
187
188 func = env["func"]
189 result = None
190 try:
191 with redirect_stdout(stdout):
192 result = await func()
193 except:
194 printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
195 else:
196 printed = stdout.getvalue()
197 await ctx.tick()
198
199 if result is not None:
200 self._last_result = result
201 msg = "{}{}".format(printed, result)
202 else:
203 msg = printed
204 msg = self.sanitize_output(ctx, msg)
205
206 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
207
208 @commands.command()
209 @checks.is_owner()
210 async def repl(self, ctx):
211 """Open an interactive REPL.
212
213 The REPL will only recognise code as messages which start with a
214 backtick. This includes codeblocks, and as such multiple lines can be
215 evaluated.
216 """
217 variables = {
218 "ctx": ctx,
219 "bot": ctx.bot,
220 "message": ctx.message,
221 "guild": ctx.guild,
222 "channel": ctx.channel,
223 "author": ctx.author,
224 "asyncio": asyncio,
225 "_": None,
226 "__builtins__": __builtins__,
227 }
228
229 if ctx.channel.id in self.sessions:
230 await ctx.send(
231 _("Already running a REPL session in this channel. Exit it with `quit`.")
232 )
233 return
234
235 self.sessions.add(ctx.channel.id)
236 await ctx.send(_("Enter code to execute or evaluate. `exit()` or `quit` to exit."))
237
238 while True:
239 response = await ctx.bot.wait_for("message", check=MessagePredicate.regex(r"^`", ctx))
240
241 cleaned = self.cleanup_code(response.content)
242
243 if cleaned in ("quit", "exit", "exit()"):
244 await ctx.send(_("Exiting."))
245 self.sessions.remove(ctx.channel.id)
246 return
247
248 executor = None
249 if cleaned.count("\n") == 0:
250 # single statement, potentially 'eval'
251 try:
252 code = self.async_compile(cleaned, "<repl session>", "eval")
253 except SyntaxError:
254 pass
255 else:
256 executor = eval
257
258 if executor is None:
259 try:
260 code = self.async_compile(cleaned, "<repl session>", "exec")
261 except SyntaxError as e:
262 await ctx.send(self.get_syntax_error(e))
263 continue
264
265 variables["message"] = response
266
267 stdout = io.StringIO()
268
269 msg = ""
270
271 try:
272 with redirect_stdout(stdout):
273 if executor is None:
274 result = types.FunctionType(code, variables)()
275 else:
276 result = executor(code, variables)
277 result = await self.maybe_await(result)
278 except:
279 value = stdout.getvalue()
280 msg = "{}{}".format(value, traceback.format_exc())
281 else:
282 value = stdout.getvalue()
283 if result is not None:
284 msg = "{}{}".format(value, result)
285 variables["_"] = result
286 elif value:
287 msg = "{}".format(value)
288
289 msg = self.sanitize_output(ctx, msg)
290
291 try:
292 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
293 except discord.Forbidden:
294 pass
295 except discord.HTTPException as e:
296 await ctx.send(_("Unexpected error: `{}`").format(e))
297
298 @commands.command()
299 @checks.is_owner()
300 async def mock(self, ctx, user: discord.Member, *, command):
301 """Mock another user invoking a command.
302
303 The prefix must not be entered.
304 """
305 msg = copy(ctx.message)
306 msg.author = user
307 msg.content = ctx.prefix + command
308
309 ctx.bot.dispatch("message", msg)
310
311 @commands.command(name="mockmsg")
312 @checks.is_owner()
313 async def mock_msg(self, ctx, user: discord.Member, *, content: str):
314 """Dispatch a message event as if it were sent by a different user.
315
316 Only reads the raw content of the message. Attachments, embeds etc. are
317 ignored.
318 """
319 old_author = ctx.author
320 old_content = ctx.message.content
321 ctx.message.author = user
322 ctx.message.content = content
323
324 ctx.bot.dispatch("message", ctx.message)
325
326 # If we change the author and content back too quickly,
327 # the bot won't process the mocked message in time.
328 await asyncio.sleep(2)
329 ctx.message.author = old_author
330 ctx.message.content = old_content
331
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py
--- a/redbot/core/dev_commands.py
+++ b/redbot/core/dev_commands.py
@@ -119,6 +119,7 @@
"discord": discord,
"commands": commands,
"_": self._last_result,
+ "__name__": "__main__",
}
code = self.cleanup_code(code)
@@ -172,6 +173,7 @@
"discord": discord,
"commands": commands,
"_": self._last_result,
+ "__name__": "__main__",
}
body = self.cleanup_code(body)
@@ -224,6 +226,7 @@
"asyncio": asyncio,
"_": None,
"__builtins__": __builtins__,
+ "__name__": "__main__",
}
if ctx.channel.id in self.sessions:
|
{"golden_diff": "diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py\n--- a/redbot/core/dev_commands.py\n+++ b/redbot/core/dev_commands.py\n@@ -119,6 +119,7 @@\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n+ \"__name__\": \"__main__\",\n }\n \n code = self.cleanup_code(code)\n@@ -172,6 +173,7 @@\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n+ \"__name__\": \"__main__\",\n }\n \n body = self.cleanup_code(body)\n@@ -224,6 +226,7 @@\n \"asyncio\": asyncio,\n \"_\": None,\n \"__builtins__\": __builtins__,\n+ \"__name__\": \"__main__\",\n }\n \n if ctx.channel.id in self.sessions:\n", "issue": "Dev cog is missing `__name__` in its environment\n# Other bugs\r\n\r\n#### What were you trying to do?\r\n\r\nI tried to run this code with `[p]eval`:\r\n```py\r\nfrom typing import TypeVar\r\nT = TypeVar(\"T\")\r\n```\r\n\r\n#### What were you expecting to happen?\r\n\r\nI expected it to run successfully.\r\n\r\n#### What actually happened?\r\n\r\nI got error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/red-venv/lib/python3.8/site-packages/redbot/core/dev_commands.py\", line 192, in _eval\r\n result = await func()\r\n File \"<string>\", line 3, in func\r\n File \"<string>\", line 2, in <module>\r\n File \"/usr/lib/python3.8/typing.py\", line 603, in __init__\r\n def_mod = sys._getframe(1).f_globals['__name__'] # for pickling\r\nKeyError: '__name__'\r\n```\r\n\r\n#### How can we reproduce this issue?\r\n\r\nRun the snippet above with `[p]eval` command.\r\n\r\n---\r\n\r\nPersonally, I would just assign `__main__` to `__name__` variable, but I'm open to other options.\r\n\n", "before_files": [{"content": "import ast\nimport asyncio\nimport aiohttp\nimport inspect\nimport io\nimport textwrap\nimport traceback\nimport types\nimport re\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\n\nfrom . import checks, commands\nfrom .i18n import Translator\nfrom .utils.chat_formatting import box, pagify\nfrom .utils.predicates import MessagePredicate\n\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = Translator(\"Dev\", __file__)\n\nSTART_CODE_BLOCK_RE = re.compile(r\"^((```py)(?=\\s)|(```))\")\n\n\nclass Dev(commands.Cog):\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n super().__init__()\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def async_compile(source, filename, mode):\n return compile(source, filename, mode, flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, optimize=0)\n\n @staticmethod\n async def maybe_await(coro):\n for i in range(2):\n if inspect.isawaitable(coro):\n coro = await coro\n else:\n return coro\n return coro\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith(\"```\") and content.endswith(\"```\"):\n return START_CODE_BLOCK_RE.sub(\"\", content)[:-3]\n\n # remove `foo`\n return content.strip(\"` \\n\")\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box(\"{0.__class__.__name__}: {0}\".format(e), lang=\"py\")\n return box(\n \"{0.text}\\n{1:>{0.offset}}\\n{2}: {0}\".format(e, \"^\", type(e).__name__), lang=\"py\"\n )\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n return re.sub(re.escape(token), \"[EXPUNGED]\", input_, re.I)\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using certain restricted\n keywords, e.g. yield, will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - redbot.core.commands\n _ - The result of the last dev command.\n \"\"\"\n env = {\n \"bot\": ctx.bot,\n \"ctx\": ctx,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"guild\": ctx.guild,\n \"message\": ctx.message,\n \"asyncio\": asyncio,\n \"aiohttp\": aiohttp,\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n }\n\n code = self.cleanup_code(code)\n\n try:\n compiled = self.async_compile(code, \"<string>\", \"eval\")\n result = await self.maybe_await(eval(compiled, env))\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(box(\"{}: {!s}\".format(type(e).__name__, e), lang=\"py\"))\n return\n\n self._last_result = result\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name=\"eval\")\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - redbot.core.commands\n _ - The result of the last dev command.\n \"\"\"\n env = {\n \"bot\": ctx.bot,\n \"ctx\": ctx,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"guild\": ctx.guild,\n \"message\": ctx.message,\n \"asyncio\": asyncio,\n \"aiohttp\": aiohttp,\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = \"async def func():\\n%s\" % textwrap.indent(body, \" \")\n\n try:\n compiled = self.async_compile(to_compile, \"<string>\", \"exec\")\n exec(compiled, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env[\"func\"]\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n \"\"\"\n variables = {\n \"ctx\": ctx,\n \"bot\": ctx.bot,\n \"message\": ctx.message,\n \"guild\": ctx.guild,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"asyncio\": asyncio,\n \"_\": None,\n \"__builtins__\": __builtins__,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(\n _(\"Already running a REPL session in this channel. Exit it with `quit`.\")\n )\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_(\"Enter code to execute or evaluate. `exit()` or `quit` to exit.\"))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=MessagePredicate.regex(r\"^`\", ctx))\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in (\"quit\", \"exit\", \"exit()\"):\n await ctx.send(_(\"Exiting.\"))\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = None\n if cleaned.count(\"\\n\") == 0:\n # single statement, potentially 'eval'\n try:\n code = self.async_compile(cleaned, \"<repl session>\", \"eval\")\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is None:\n try:\n code = self.async_compile(cleaned, \"<repl session>\", \"exec\")\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables[\"message\"] = response\n\n stdout = io.StringIO()\n\n msg = \"\"\n\n try:\n with redirect_stdout(stdout):\n if executor is None:\n result = types.FunctionType(code, variables)()\n else:\n result = executor(code, variables)\n result = await self.maybe_await(result)\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables[\"_\"] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_(\"Unexpected error: `{}`\").format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch(\"message\", msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n", "path": "redbot/core/dev_commands.py"}], "after_files": [{"content": "import ast\nimport asyncio\nimport aiohttp\nimport inspect\nimport io\nimport textwrap\nimport traceback\nimport types\nimport re\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\n\nfrom . import checks, commands\nfrom .i18n import Translator\nfrom .utils.chat_formatting import box, pagify\nfrom .utils.predicates import MessagePredicate\n\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = Translator(\"Dev\", __file__)\n\nSTART_CODE_BLOCK_RE = re.compile(r\"^((```py)(?=\\s)|(```))\")\n\n\nclass Dev(commands.Cog):\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n super().__init__()\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def async_compile(source, filename, mode):\n return compile(source, filename, mode, flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, optimize=0)\n\n @staticmethod\n async def maybe_await(coro):\n for i in range(2):\n if inspect.isawaitable(coro):\n coro = await coro\n else:\n return coro\n return coro\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith(\"```\") and content.endswith(\"```\"):\n return START_CODE_BLOCK_RE.sub(\"\", content)[:-3]\n\n # remove `foo`\n return content.strip(\"` \\n\")\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box(\"{0.__class__.__name__}: {0}\".format(e), lang=\"py\")\n return box(\n \"{0.text}\\n{1:>{0.offset}}\\n{2}: {0}\".format(e, \"^\", type(e).__name__), lang=\"py\"\n )\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n return re.sub(re.escape(token), \"[EXPUNGED]\", input_, re.I)\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using certain restricted\n keywords, e.g. yield, will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - redbot.core.commands\n _ - The result of the last dev command.\n \"\"\"\n env = {\n \"bot\": ctx.bot,\n \"ctx\": ctx,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"guild\": ctx.guild,\n \"message\": ctx.message,\n \"asyncio\": asyncio,\n \"aiohttp\": aiohttp,\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n \"__name__\": \"__main__\",\n }\n\n code = self.cleanup_code(code)\n\n try:\n compiled = self.async_compile(code, \"<string>\", \"eval\")\n result = await self.maybe_await(eval(compiled, env))\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(box(\"{}: {!s}\".format(type(e).__name__, e), lang=\"py\"))\n return\n\n self._last_result = result\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name=\"eval\")\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - redbot.core.commands\n _ - The result of the last dev command.\n \"\"\"\n env = {\n \"bot\": ctx.bot,\n \"ctx\": ctx,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"guild\": ctx.guild,\n \"message\": ctx.message,\n \"asyncio\": asyncio,\n \"aiohttp\": aiohttp,\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n \"__name__\": \"__main__\",\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = \"async def func():\\n%s\" % textwrap.indent(body, \" \")\n\n try:\n compiled = self.async_compile(to_compile, \"<string>\", \"exec\")\n exec(compiled, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env[\"func\"]\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n \"\"\"\n variables = {\n \"ctx\": ctx,\n \"bot\": ctx.bot,\n \"message\": ctx.message,\n \"guild\": ctx.guild,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"asyncio\": asyncio,\n \"_\": None,\n \"__builtins__\": __builtins__,\n \"__name__\": \"__main__\",\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(\n _(\"Already running a REPL session in this channel. Exit it with `quit`.\")\n )\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_(\"Enter code to execute or evaluate. `exit()` or `quit` to exit.\"))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=MessagePredicate.regex(r\"^`\", ctx))\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in (\"quit\", \"exit\", \"exit()\"):\n await ctx.send(_(\"Exiting.\"))\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = None\n if cleaned.count(\"\\n\") == 0:\n # single statement, potentially 'eval'\n try:\n code = self.async_compile(cleaned, \"<repl session>\", \"eval\")\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is None:\n try:\n code = self.async_compile(cleaned, \"<repl session>\", \"exec\")\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables[\"message\"] = response\n\n stdout = io.StringIO()\n\n msg = \"\"\n\n try:\n with redirect_stdout(stdout):\n if executor is None:\n result = types.FunctionType(code, variables)()\n else:\n result = executor(code, variables)\n result = await self.maybe_await(result)\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables[\"_\"] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_(\"Unexpected error: `{}`\").format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch(\"message\", msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n", "path": "redbot/core/dev_commands.py"}]}
| 3,726 | 204 |
gh_patches_debug_12807
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1086
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_119 - DynamoDB table encryption
**Describe the bug**
In general DynamoDB tables are encrypted by default and this can't be turned off, you can change it to use a KMS key of your choice. Therefore the check description is incorrect.
Further infos can be found in the API documentation https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_SSESpecification.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class DynamoDBTablesEncrypted(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure DynamoDB Tables are encrypted"
8 id = "CKV_AWS_119"
9 supported_resources = ['aws_dynamodb_table']
10 categories = [CheckCategories.NETWORKING]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return "server_side_encryption/[0]/enabled"
15
16
17 check = DynamoDBTablesEncrypted()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py b/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py
--- a/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py
+++ b/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py
@@ -4,10 +4,10 @@
class DynamoDBTablesEncrypted(BaseResourceValueCheck):
def __init__(self):
- name = "Ensure DynamoDB Tables are encrypted"
+ name = "Ensure DynamoDB Tables are encrypted using KMS"
id = "CKV_AWS_119"
- supported_resources = ['aws_dynamodb_table']
- categories = [CheckCategories.NETWORKING]
+ supported_resources = ["aws_dynamodb_table"]
+ categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py b/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py\n--- a/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py\n+++ b/checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py\n@@ -4,10 +4,10 @@\n \n class DynamoDBTablesEncrypted(BaseResourceValueCheck):\n def __init__(self):\n- name = \"Ensure DynamoDB Tables are encrypted\"\n+ name = \"Ensure DynamoDB Tables are encrypted using KMS\"\n id = \"CKV_AWS_119\"\n- supported_resources = ['aws_dynamodb_table']\n- categories = [CheckCategories.NETWORKING]\n+ supported_resources = [\"aws_dynamodb_table\"]\n+ categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self):\n", "issue": "CKV_AWS_119 - DynamoDB table encryption\n**Describe the bug**\r\nIn general DynamoDB tables are encrypted by default and this can't be turned off, you can change it to use a KMS key of your choice. Therefore the check description is incorrect.\r\n\r\nFurther infos can be found in the API documentation https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_SSESpecification.html\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DynamoDBTablesEncrypted(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure DynamoDB Tables are encrypted\"\n id = \"CKV_AWS_119\"\n supported_resources = ['aws_dynamodb_table']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"server_side_encryption/[0]/enabled\"\n\n\ncheck = DynamoDBTablesEncrypted()\n", "path": "checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DynamoDBTablesEncrypted(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure DynamoDB Tables are encrypted using KMS\"\n id = \"CKV_AWS_119\"\n supported_resources = [\"aws_dynamodb_table\"]\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"server_side_encryption/[0]/enabled\"\n\n\ncheck = DynamoDBTablesEncrypted()\n", "path": "checkov/terraform/checks/resource/aws/DynamoDBTablesEncrypted.py"}]}
| 527 | 217 |
gh_patches_debug_6434
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-6973
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot identify .fits file
### What did you do?
Tried using pillow for opening/handling a .fits file for training a machine learning model. According to the documentation opening/reading fits files should be enabled? Or am I misunderstanding how a fits file should be opened?
From Issue [4054](https://github.com/python-pillow/Pillow/issues/4054)/ PR 6056
> I've created PR https://github.com/python-pillow/Pillow/pull/6056 to resolve this. If that is merged, you should no longer have to worry about register_handler(), but can instead just Image.open("sample.fits").
### What did you expect to happen?
Not recieving a "cannot identify error" while using Image.open. Expected the function to work as with other supported file formats. The .fits files in question are not corrupted, and can be opened as normal with other software.
### What happened?
```python
from PIL import Image
with Image.open('example.fits') as im:
im.verify()
```
```
---------------------------------------------------------------------------
UnidentifiedImageError Traceback (most recent call last)
Cell In [38], line 2
1 from PIL import FitsImagePlugin, ImageFile
----> 2 with Image.open('example.fits') as im:
3 im.verify()
File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\PIL\Image.py:3186, in open(fp, mode, formats)
3184 for message in accept_warnings:
3185 warnings.warn(message)
-> 3186 raise UnidentifiedImageError(
3187 "cannot identify image file %r" % (filename if filename else fp)
3188 )
UnidentifiedImageError: cannot identify image file 'example.fits'
```
### What are your OS, Python and Pillow versions?
* OS: windows 10
* Python: 3.10
* Pillow: 9.3.0
<!--
Please include **code** that reproduces the issue and whenever possible, an **image** that demonstrates the issue. Please upload images to GitHub, not to third-party file hosting sites. If necessary, add the image to a zip or tar archive.
The best reproductions are self-contained scripts with minimal dependencies. If you are using a framework such as Plone, Django, or Buildout, try to replicate the issue just using Pillow.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/FitsImagePlugin.py`
Content:
```
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # FITS file handling
6 #
7 # Copyright (c) 1998-2003 by Fredrik Lundh
8 #
9 # See the README file for information on usage and redistribution.
10 #
11
12 import math
13
14 from . import Image, ImageFile
15
16
17 def _accept(prefix):
18 return prefix[:6] == b"SIMPLE"
19
20
21 class FitsImageFile(ImageFile.ImageFile):
22 format = "FITS"
23 format_description = "FITS"
24
25 def _open(self):
26 headers = {}
27 while True:
28 header = self.fp.read(80)
29 if not header:
30 msg = "Truncated FITS file"
31 raise OSError(msg)
32 keyword = header[:8].strip()
33 if keyword == b"END":
34 break
35 value = header[8:].strip()
36 if value.startswith(b"="):
37 value = value[1:].strip()
38 if not headers and (not _accept(keyword) or value != b"T"):
39 msg = "Not a FITS file"
40 raise SyntaxError(msg)
41 headers[keyword] = value
42
43 naxis = int(headers[b"NAXIS"])
44 if naxis == 0:
45 msg = "No image data"
46 raise ValueError(msg)
47 elif naxis == 1:
48 self._size = 1, int(headers[b"NAXIS1"])
49 else:
50 self._size = int(headers[b"NAXIS1"]), int(headers[b"NAXIS2"])
51
52 number_of_bits = int(headers[b"BITPIX"])
53 if number_of_bits == 8:
54 self.mode = "L"
55 elif number_of_bits == 16:
56 self.mode = "I"
57 # rawmode = "I;16S"
58 elif number_of_bits == 32:
59 self.mode = "I"
60 elif number_of_bits in (-32, -64):
61 self.mode = "F"
62 # rawmode = "F" if number_of_bits == -32 else "F;64F"
63
64 offset = math.ceil(self.fp.tell() / 2880) * 2880
65 self.tile = [("raw", (0, 0) + self.size, offset, (self.mode, 0, -1))]
66
67
68 # --------------------------------------------------------------------
69 # Registry
70
71 Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
72
73 Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/PIL/FitsImagePlugin.py b/src/PIL/FitsImagePlugin.py
--- a/src/PIL/FitsImagePlugin.py
+++ b/src/PIL/FitsImagePlugin.py
@@ -32,7 +32,7 @@
keyword = header[:8].strip()
if keyword == b"END":
break
- value = header[8:].strip()
+ value = header[8:].split(b"/")[0].strip()
if value.startswith(b"="):
value = value[1:].strip()
if not headers and (not _accept(keyword) or value != b"T"):
|
{"golden_diff": "diff --git a/src/PIL/FitsImagePlugin.py b/src/PIL/FitsImagePlugin.py\n--- a/src/PIL/FitsImagePlugin.py\n+++ b/src/PIL/FitsImagePlugin.py\n@@ -32,7 +32,7 @@\n keyword = header[:8].strip()\n if keyword == b\"END\":\n break\n- value = header[8:].strip()\n+ value = header[8:].split(b\"/\")[0].strip()\n if value.startswith(b\"=\"):\n value = value[1:].strip()\n if not headers and (not _accept(keyword) or value != b\"T\"):\n", "issue": "Cannot identify .fits file\n### What did you do?\r\nTried using pillow for opening/handling a .fits file for training a machine learning model. According to the documentation opening/reading fits files should be enabled? Or am I misunderstanding how a fits file should be opened? \r\n\r\n\r\nFrom Issue [4054](https://github.com/python-pillow/Pillow/issues/4054)/ PR 6056\r\n\r\n> I've created PR https://github.com/python-pillow/Pillow/pull/6056 to resolve this. If that is merged, you should no longer have to worry about register_handler(), but can instead just Image.open(\"sample.fits\").\r\n\r\n\r\n### What did you expect to happen?\r\nNot recieving a \"cannot identify error\" while using Image.open. Expected the function to work as with other supported file formats. The .fits files in question are not corrupted, and can be opened as normal with other software. \r\n\r\n### What happened?\r\n```python\r\nfrom PIL import Image\r\nwith Image.open('example.fits') as im:\r\n im.verify()\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nUnidentifiedImageError Traceback (most recent call last)\r\nCell In [38], line 2\r\n 1 from PIL import FitsImagePlugin, ImageFile\r\n----> 2 with Image.open('example.fits') as im:\r\n 3 im.verify()\r\n\r\nFile ~\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\PIL\\Image.py:3186, in open(fp, mode, formats)\r\n 3184 for message in accept_warnings:\r\n 3185 warnings.warn(message)\r\n-> 3186 raise UnidentifiedImageError(\r\n 3187 \"cannot identify image file %r\" % (filename if filename else fp)\r\n 3188 )\r\n\r\nUnidentifiedImageError: cannot identify image file 'example.fits'\r\n```\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: windows 10\r\n* Python: 3.10\r\n* Pillow: 9.3.0\r\n\r\n<!--\r\nPlease include **code** that reproduces the issue and whenever possible, an **image** that demonstrates the issue. Please upload images to GitHub, not to third-party file hosting sites. If necessary, add the image to a zip or tar archive.\r\n\r\nThe best reproductions are self-contained scripts with minimal dependencies. If you are using a framework such as Plone, Django, or Buildout, try to replicate the issue just using Pillow.\r\n-->\r\n\r\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# FITS file handling\n#\n# Copyright (c) 1998-2003 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport math\n\nfrom . import Image, ImageFile\n\n\ndef _accept(prefix):\n return prefix[:6] == b\"SIMPLE\"\n\n\nclass FitsImageFile(ImageFile.ImageFile):\n format = \"FITS\"\n format_description = \"FITS\"\n\n def _open(self):\n headers = {}\n while True:\n header = self.fp.read(80)\n if not header:\n msg = \"Truncated FITS file\"\n raise OSError(msg)\n keyword = header[:8].strip()\n if keyword == b\"END\":\n break\n value = header[8:].strip()\n if value.startswith(b\"=\"):\n value = value[1:].strip()\n if not headers and (not _accept(keyword) or value != b\"T\"):\n msg = \"Not a FITS file\"\n raise SyntaxError(msg)\n headers[keyword] = value\n\n naxis = int(headers[b\"NAXIS\"])\n if naxis == 0:\n msg = \"No image data\"\n raise ValueError(msg)\n elif naxis == 1:\n self._size = 1, int(headers[b\"NAXIS1\"])\n else:\n self._size = int(headers[b\"NAXIS1\"]), int(headers[b\"NAXIS2\"])\n\n number_of_bits = int(headers[b\"BITPIX\"])\n if number_of_bits == 8:\n self.mode = \"L\"\n elif number_of_bits == 16:\n self.mode = \"I\"\n # rawmode = \"I;16S\"\n elif number_of_bits == 32:\n self.mode = \"I\"\n elif number_of_bits in (-32, -64):\n self.mode = \"F\"\n # rawmode = \"F\" if number_of_bits == -32 else \"F;64F\"\n\n offset = math.ceil(self.fp.tell() / 2880) * 2880\n self.tile = [(\"raw\", (0, 0) + self.size, offset, (self.mode, 0, -1))]\n\n\n# --------------------------------------------------------------------\n# Registry\n\nImage.register_open(FitsImageFile.format, FitsImageFile, _accept)\n\nImage.register_extensions(FitsImageFile.format, [\".fit\", \".fits\"])\n", "path": "src/PIL/FitsImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# FITS file handling\n#\n# Copyright (c) 1998-2003 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport math\n\nfrom . import Image, ImageFile\n\n\ndef _accept(prefix):\n return prefix[:6] == b\"SIMPLE\"\n\n\nclass FitsImageFile(ImageFile.ImageFile):\n format = \"FITS\"\n format_description = \"FITS\"\n\n def _open(self):\n headers = {}\n while True:\n header = self.fp.read(80)\n if not header:\n msg = \"Truncated FITS file\"\n raise OSError(msg)\n keyword = header[:8].strip()\n if keyword == b\"END\":\n break\n value = header[8:].split(b\"/\")[0].strip()\n if value.startswith(b\"=\"):\n value = value[1:].strip()\n if not headers and (not _accept(keyword) or value != b\"T\"):\n msg = \"Not a FITS file\"\n raise SyntaxError(msg)\n headers[keyword] = value\n\n naxis = int(headers[b\"NAXIS\"])\n if naxis == 0:\n msg = \"No image data\"\n raise ValueError(msg)\n elif naxis == 1:\n self._size = 1, int(headers[b\"NAXIS1\"])\n else:\n self._size = int(headers[b\"NAXIS1\"]), int(headers[b\"NAXIS2\"])\n\n number_of_bits = int(headers[b\"BITPIX\"])\n if number_of_bits == 8:\n self.mode = \"L\"\n elif number_of_bits == 16:\n self.mode = \"I\"\n # rawmode = \"I;16S\"\n elif number_of_bits == 32:\n self.mode = \"I\"\n elif number_of_bits in (-32, -64):\n self.mode = \"F\"\n # rawmode = \"F\" if number_of_bits == -32 else \"F;64F\"\n\n offset = math.ceil(self.fp.tell() / 2880) * 2880\n self.tile = [(\"raw\", (0, 0) + self.size, offset, (self.mode, 0, -1))]\n\n\n# --------------------------------------------------------------------\n# Registry\n\nImage.register_open(FitsImageFile.format, FitsImageFile, _accept)\n\nImage.register_extensions(FitsImageFile.format, [\".fit\", \".fits\"])\n", "path": "src/PIL/FitsImagePlugin.py"}]}
| 1,501 | 136 |
gh_patches_debug_15674
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-10230
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unhandled python exception with 0.62 on windows (0.61 ok)
**Describe the bug**
When running meson 0.62 on win32 and a project using `dependency()` (ex glib):
Unhandled python exception
ModuleNotFoundError: No module named 'mesonbuild.dependencies.data'
```
Traceback (most recent call last):
File "mesonbuild\mesonmain.py", line 151, in run
File "mesonbuild\msetup.py", line 301, in run
File "mesonbuild\msetup.py", line 185, in generate
File "mesonbuild\msetup.py", line 229, in _generate
File "mesonbuild\interpreter\interpreter.py", line 2698, in run
File "mesonbuild\interpreterbase\interpreterbase.py", line 149, in run
File "mesonbuild\interpreterbase\interpreterbase.py", line 174, in evaluate_codeblock
File "mesonbuild\interpreterbase\interpreterbase.py", line 167, in evaluate_codeblock
File "mesonbuild\interpreterbase\interpreterbase.py", line 182, in evaluate_statement
File "mesonbuild\interpreterbase\interpreterbase.py", line 567, in assignment
File "mesonbuild\interpreterbase\interpreterbase.py", line 180, in evaluate_statement
File "mesonbuild\interpreterbase\interpreterbase.py", line 455, in function_call
File "mesonbuild\interpreterbase\decorators.py", line 768, in wrapped
File "mesonbuild\interpreterbase\decorators.py", line 768, in wrapped
File "mesonbuild\interpreterbase\decorators.py", line 768, in wrapped
[Previous line repeated 5 more times]
File "mesonbuild\interpreterbase\decorators.py", line 109, in wrapped
File "mesonbuild\interpreterbase\decorators.py", line 127, in wrapped
File "mesonbuild\interpreterbase\decorators.py", line 277, in wrapper
File "mesonbuild\interpreter\interpreter.py", line 1620, in func_dependency
File "mesonbuild\interpreter\dependencyfallbacks.py", line 352, in lookup
File "mesonbuild\interpreter\dependencyfallbacks.py", line 93, in _do_dependency
File "mesonbuild\dependencies\detect.py", line 112, in find_external_dependency
File "mesonbuild\dependencies\cmake.py", line 135, in __init__
File "mesonbuild\dependencies\cmake.py", line 183, in _get_cmake_info
File "mesonbuild\dependencies\cmake.py", line 614, in _call_cmake
File "mesonbuild\dependencies\cmake.py", line 585, in _setup_cmake_dir
File "importlib\resources.py", line 103, in read_text
File "importlib\resources.py", line 82, in open_text
File "importlib\resources.py", line 43, in open_binary
File "importlib\_common.py", line 66, in get_package
File "importlib\_common.py", line 57, in resolve
File "importlib\__init__.py", line 126, in import_module
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1004, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'mesonbuild.dependencies.data'
```
**To Reproduce**
project('foo')
pcre = dependency('libpcre')
**system parameters**
meson 0.62 (MSI) on windev VM (https://developer.microsoft.com/en-us/windows/downloads/virtual-machines/)
works as expected on 0.61
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packaging/hook-mesonbuild.py`
Content:
```
1 #!hint/python3
2
3 """
4 PyInstaller hook to make mesonbuild include everything it needs to.
5 """
6
7 import os
8 from glob import glob
9
10 hiddenimports = []
11
12 def get_all_modules_from_dir(dirname):
13 '''
14 Get all modules required for Meson itself from directories.
15 '''
16 modname = os.path.basename(dirname)
17 modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join(dirname, '*'))]
18 modules = ['mesonbuild.' + modname + '.' + x for x in modules if not x.startswith('_')]
19 return modules
20
21 hiddenimports += get_all_modules_from_dir('mesonbuild/modules')
22 hiddenimports += get_all_modules_from_dir('mesonbuild/scripts')
23
24 # Python packagers want to be minimal and only copy the things
25 # that they can see being used. They are blind to many things.
26 hiddenimports += [
27 # we run distutils as a subprocess via INTROSPECT_COMMAND.
28 'distutils.archive_util',
29 'distutils.cmd',
30 'distutils.config',
31 'distutils.core',
32 'distutils.debug',
33 'distutils.dep_util',
34 'distutils.dir_util',
35 'distutils.dist',
36 'distutils.errors',
37 'distutils.extension',
38 'distutils.fancy_getopt',
39 'distutils.file_util',
40 'distutils.spawn',
41 'distutils.util',
42 'distutils.version',
43 'distutils.command.build_ext',
44 'distutils.command.build',
45 'distutils.command.install',
46
47 # needed for gtk's find_program() scripts
48 'filecmp',
49 ]
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/packaging/hook-mesonbuild.py b/packaging/hook-mesonbuild.py
--- a/packaging/hook-mesonbuild.py
+++ b/packaging/hook-mesonbuild.py
@@ -7,6 +7,9 @@
import os
from glob import glob
+from PyInstaller.utils.hooks import collect_data_files
+
+datas = []
hiddenimports = []
def get_all_modules_from_dir(dirname):
@@ -18,6 +21,10 @@
modules = ['mesonbuild.' + modname + '.' + x for x in modules if not x.startswith('_')]
return modules
+datas += collect_data_files('mesonbuild.scripts')
+datas += collect_data_files('mesonbuild.cmake.data')
+datas += collect_data_files('mesonbuild.dependencies.data')
+
hiddenimports += get_all_modules_from_dir('mesonbuild/modules')
hiddenimports += get_all_modules_from_dir('mesonbuild/scripts')
|
{"golden_diff": "diff --git a/packaging/hook-mesonbuild.py b/packaging/hook-mesonbuild.py\n--- a/packaging/hook-mesonbuild.py\n+++ b/packaging/hook-mesonbuild.py\n@@ -7,6 +7,9 @@\n import os\n from glob import glob\n \n+from PyInstaller.utils.hooks import collect_data_files\n+\n+datas = []\n hiddenimports = []\n \n def get_all_modules_from_dir(dirname):\n@@ -18,6 +21,10 @@\n modules = ['mesonbuild.' + modname + '.' + x for x in modules if not x.startswith('_')]\n return modules\n \n+datas += collect_data_files('mesonbuild.scripts')\n+datas += collect_data_files('mesonbuild.cmake.data')\n+datas += collect_data_files('mesonbuild.dependencies.data')\n+\n hiddenimports += get_all_modules_from_dir('mesonbuild/modules')\n hiddenimports += get_all_modules_from_dir('mesonbuild/scripts')\n", "issue": "Unhandled python exception with 0.62 on windows (0.61 ok)\n**Describe the bug**\r\nWhen running meson 0.62 on win32 and a project using `dependency()` (ex glib):\r\n\r\nUnhandled python exception\r\nModuleNotFoundError: No module named 'mesonbuild.dependencies.data'\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"mesonbuild\\mesonmain.py\", line 151, in run\r\n File \"mesonbuild\\msetup.py\", line 301, in run\r\n File \"mesonbuild\\msetup.py\", line 185, in generate\r\n File \"mesonbuild\\msetup.py\", line 229, in _generate\r\n File \"mesonbuild\\interpreter\\interpreter.py\", line 2698, in run\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 149, in run\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 174, in evaluate_codeblock\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 167, in evaluate_codeblock\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 182, in evaluate_statement\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 567, in assignment\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 180, in evaluate_statement\r\n File \"mesonbuild\\interpreterbase\\interpreterbase.py\", line 455, in function_call\r\n File \"mesonbuild\\interpreterbase\\decorators.py\", line 768, in wrapped\r\n File \"mesonbuild\\interpreterbase\\decorators.py\", line 768, in wrapped\r\n File \"mesonbuild\\interpreterbase\\decorators.py\", line 768, in wrapped\r\n [Previous line repeated 5 more times]\r\n File \"mesonbuild\\interpreterbase\\decorators.py\", line 109, in wrapped\r\n File \"mesonbuild\\interpreterbase\\decorators.py\", line 127, in wrapped\r\n File \"mesonbuild\\interpreterbase\\decorators.py\", line 277, in wrapper\r\n File \"mesonbuild\\interpreter\\interpreter.py\", line 1620, in func_dependency\r\n File \"mesonbuild\\interpreter\\dependencyfallbacks.py\", line 352, in lookup\r\n File \"mesonbuild\\interpreter\\dependencyfallbacks.py\", line 93, in _do_dependency\r\n File \"mesonbuild\\dependencies\\detect.py\", line 112, in find_external_dependency\r\n File \"mesonbuild\\dependencies\\cmake.py\", line 135, in __init__\r\n File \"mesonbuild\\dependencies\\cmake.py\", line 183, in _get_cmake_info\r\n File \"mesonbuild\\dependencies\\cmake.py\", line 614, in _call_cmake\r\n File \"mesonbuild\\dependencies\\cmake.py\", line 585, in _setup_cmake_dir\r\n File \"importlib\\resources.py\", line 103, in read_text\r\n File \"importlib\\resources.py\", line 82, in open_text\r\n File \"importlib\\resources.py\", line 43, in open_binary\r\n File \"importlib\\_common.py\", line 66, in get_package\r\n File \"importlib\\_common.py\", line 57, in resolve\r\n File \"importlib\\__init__.py\", line 126, in import_module\r\n File \"<frozen importlib._bootstrap>\", line 1050, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1027, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 1004, in _find_and_load_unlocked\r\nModuleNotFoundError: No module named 'mesonbuild.dependencies.data'\r\n```\r\n\r\n**To Reproduce**\r\nproject('foo')\r\npcre = dependency('libpcre')\r\n\r\n**system parameters**\r\nmeson 0.62 (MSI) on windev VM (https://developer.microsoft.com/en-us/windows/downloads/virtual-machines/)\r\nworks as expected on 0.61\n", "before_files": [{"content": "#!hint/python3\n\n\"\"\"\nPyInstaller hook to make mesonbuild include everything it needs to.\n\"\"\"\n\nimport os\nfrom glob import glob\n\nhiddenimports = []\n\ndef get_all_modules_from_dir(dirname):\n '''\n Get all modules required for Meson itself from directories.\n '''\n modname = os.path.basename(dirname)\n modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join(dirname, '*'))]\n modules = ['mesonbuild.' + modname + '.' + x for x in modules if not x.startswith('_')]\n return modules\n\nhiddenimports += get_all_modules_from_dir('mesonbuild/modules')\nhiddenimports += get_all_modules_from_dir('mesonbuild/scripts')\n\n# Python packagers want to be minimal and only copy the things\n# that they can see being used. They are blind to many things.\nhiddenimports += [\n # we run distutils as a subprocess via INTROSPECT_COMMAND.\n 'distutils.archive_util',\n 'distutils.cmd',\n 'distutils.config',\n 'distutils.core',\n 'distutils.debug',\n 'distutils.dep_util',\n 'distutils.dir_util',\n 'distutils.dist',\n 'distutils.errors',\n 'distutils.extension',\n 'distutils.fancy_getopt',\n 'distutils.file_util',\n 'distutils.spawn',\n 'distutils.util',\n 'distutils.version',\n 'distutils.command.build_ext',\n 'distutils.command.build',\n 'distutils.command.install',\n\n # needed for gtk's find_program() scripts\n 'filecmp',\n]\n", "path": "packaging/hook-mesonbuild.py"}], "after_files": [{"content": "#!hint/python3\n\n\"\"\"\nPyInstaller hook to make mesonbuild include everything it needs to.\n\"\"\"\n\nimport os\nfrom glob import glob\n\nfrom PyInstaller.utils.hooks import collect_data_files\n\ndatas = []\nhiddenimports = []\n\ndef get_all_modules_from_dir(dirname):\n '''\n Get all modules required for Meson itself from directories.\n '''\n modname = os.path.basename(dirname)\n modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join(dirname, '*'))]\n modules = ['mesonbuild.' + modname + '.' + x for x in modules if not x.startswith('_')]\n return modules\n\ndatas += collect_data_files('mesonbuild.scripts')\ndatas += collect_data_files('mesonbuild.cmake.data')\ndatas += collect_data_files('mesonbuild.dependencies.data')\n\nhiddenimports += get_all_modules_from_dir('mesonbuild/modules')\nhiddenimports += get_all_modules_from_dir('mesonbuild/scripts')\n\n# Python packagers want to be minimal and only copy the things\n# that they can see being used. They are blind to many things.\nhiddenimports += [\n # we run distutils as a subprocess via INTROSPECT_COMMAND.\n 'distutils.archive_util',\n 'distutils.cmd',\n 'distutils.config',\n 'distutils.core',\n 'distutils.debug',\n 'distutils.dep_util',\n 'distutils.dir_util',\n 'distutils.dist',\n 'distutils.errors',\n 'distutils.extension',\n 'distutils.fancy_getopt',\n 'distutils.file_util',\n 'distutils.spawn',\n 'distutils.util',\n 'distutils.version',\n 'distutils.command.build_ext',\n 'distutils.command.build',\n 'distutils.command.install',\n\n # needed for gtk's find_program() scripts\n 'filecmp',\n]\n", "path": "packaging/hook-mesonbuild.py"}]}
| 1,637 | 207 |
gh_patches_debug_240
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-3013
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cython_pyximport reload broken in python3
python3.3 notebook, tested in 0.13.1 but the code looks the same in HEAD:
%%cython_pyximport foo
def f(x):
return 4.0*x
execute twice and you get
```
/usr/lib/python3/dist-packages/IPython/extensions/cythonmagic.py in cython_pyximport(self, line, cell)
99 if module_name in self._reloads:
100 module = self._reloads[module_name]
--> 101 reload(module)
102 else:
103 __import__(module_name)
NameError: global name 'reload' is not defined
```
imp.reload should be used here
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/extensions/cythonmagic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 =====================
4 Cython related magics
5 =====================
6
7 Usage
8 =====
9
10 ``%%cython``
11
12 {CYTHON_DOC}
13
14 ``%%cython_inline``
15
16 {CYTHON_INLINE_DOC}
17
18 ``%%cython_pyximport``
19
20 {CYTHON_PYXIMPORT_DOC}
21
22 Author:
23 * Brian Granger
24
25 Parts of this code were taken from Cython.inline.
26 """
27 #-----------------------------------------------------------------------------
28 # Copyright (C) 2010-2011, IPython Development Team.
29 #
30 # Distributed under the terms of the Modified BSD License.
31 #
32 # The full license is in the file COPYING.txt, distributed with this software.
33 #-----------------------------------------------------------------------------
34
35 from __future__ import print_function
36
37 import imp
38 import io
39 import os
40 import re
41 import sys
42 import time
43
44 try:
45 import hashlib
46 except ImportError:
47 import md5 as hashlib
48
49 from distutils.core import Distribution, Extension
50 from distutils.command.build_ext import build_ext
51
52 from IPython.core import display
53 from IPython.core import magic_arguments
54 from IPython.core.magic import Magics, magics_class, cell_magic
55 from IPython.testing.skipdoctest import skip_doctest
56 from IPython.utils import py3compat
57
58 import Cython
59 from Cython.Compiler.Errors import CompileError
60 from Cython.Build.Dependencies import cythonize
61
62
63 @magics_class
64 class CythonMagics(Magics):
65
66 def __init__(self, shell):
67 super(CythonMagics,self).__init__(shell)
68 self._reloads = {}
69 self._code_cache = {}
70
71 def _import_all(self, module):
72 for k,v in module.__dict__.items():
73 if not k.startswith('__'):
74 self.shell.push({k:v})
75
76 @cell_magic
77 def cython_inline(self, line, cell):
78 """Compile and run a Cython code cell using Cython.inline.
79
80 This magic simply passes the body of the cell to Cython.inline
81 and returns the result. If the variables `a` and `b` are defined
82 in the user's namespace, here is a simple example that returns
83 their sum::
84
85 %%cython_inline
86 return a+b
87
88 For most purposes, we recommend the usage of the `%%cython` magic.
89 """
90 locs = self.shell.user_global_ns
91 globs = self.shell.user_ns
92 return Cython.inline(cell, locals=locs, globals=globs)
93
94 @cell_magic
95 def cython_pyximport(self, line, cell):
96 """Compile and import a Cython code cell using pyximport.
97
98 The contents of the cell are written to a `.pyx` file in the current
99 working directory, which is then imported using `pyximport`. This
100 magic requires a module name to be passed::
101
102 %%cython_pyximport modulename
103 def f(x):
104 return 2.0*x
105
106 The compiled module is then imported and all of its symbols are
107 injected into the user's namespace. For most purposes, we recommend
108 the usage of the `%%cython` magic.
109 """
110 module_name = line.strip()
111 if not module_name:
112 raise ValueError('module name must be given')
113 fname = module_name + '.pyx'
114 with io.open(fname, 'w', encoding='utf-8') as f:
115 f.write(cell)
116 if 'pyximport' not in sys.modules:
117 import pyximport
118 pyximport.install(reload_support=True)
119 if module_name in self._reloads:
120 module = self._reloads[module_name]
121 reload(module)
122 else:
123 __import__(module_name)
124 module = sys.modules[module_name]
125 self._reloads[module_name] = module
126 self._import_all(module)
127
128 @magic_arguments.magic_arguments()
129 @magic_arguments.argument(
130 '-c', '--compile-args', action='append', default=[],
131 help="Extra flags to pass to compiler via the `extra_compile_args` "
132 "Extension flag (can be specified multiple times)."
133 )
134 @magic_arguments.argument(
135 '--link-args', action='append', default=[],
136 help="Extra flags to pass to linker via the `extra_link_args` "
137 "Extension flag (can be specified multiple times)."
138 )
139 @magic_arguments.argument(
140 '-l', '--lib', action='append', default=[],
141 help="Add a library to link the extension against (can be specified "
142 "multiple times)."
143 )
144 @magic_arguments.argument(
145 '-L', dest='library_dirs', metavar='dir', action='append', default=[],
146 help="Add a path to the list of libary directories (can be specified "
147 "multiple times)."
148 )
149 @magic_arguments.argument(
150 '-I', '--include', action='append', default=[],
151 help="Add a path to the list of include directories (can be specified "
152 "multiple times)."
153 )
154 @magic_arguments.argument(
155 '-+', '--cplus', action='store_true', default=False,
156 help="Output a C++ rather than C file."
157 )
158 @magic_arguments.argument(
159 '-f', '--force', action='store_true', default=False,
160 help="Force the compilation of a new module, even if the source has been "
161 "previously compiled."
162 )
163 @magic_arguments.argument(
164 '-a', '--annotate', action='store_true', default=False,
165 help="Produce a colorized HTML version of the source."
166 )
167 @cell_magic
168 def cython(self, line, cell):
169 """Compile and import everything from a Cython code cell.
170
171 The contents of the cell are written to a `.pyx` file in the
172 directory `IPYTHONDIR/cython` using a filename with the hash of the
173 code. This file is then cythonized and compiled. The resulting module
174 is imported and all of its symbols are injected into the user's
175 namespace. The usage is similar to that of `%%cython_pyximport` but
176 you don't have to pass a module name::
177
178 %%cython
179 def f(x):
180 return 2.0*x
181
182 To compile OpenMP codes, pass the required `--compile-args`
183 and `--link-args`. For example with gcc::
184
185 %%cython --compile-args=-fopenmp --link-args=-fopenmp
186 ...
187 """
188 args = magic_arguments.parse_argstring(self.cython, line)
189 code = cell if cell.endswith('\n') else cell+'\n'
190 lib_dir = os.path.join(self.shell.ipython_dir, 'cython')
191 quiet = True
192 key = code, sys.version_info, sys.executable, Cython.__version__
193
194 if not os.path.exists(lib_dir):
195 os.makedirs(lib_dir)
196
197 if args.force:
198 # Force a new module name by adding the current time to the
199 # key which is hashed to determine the module name.
200 key += time.time(),
201
202 module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
203 module_path = os.path.join(lib_dir, module_name + self.so_ext)
204
205 have_module = os.path.isfile(module_path)
206 need_cythonize = not have_module
207
208 if args.annotate:
209 html_file = os.path.join(lib_dir, module_name + '.html')
210 if not os.path.isfile(html_file):
211 need_cythonize = True
212
213 if need_cythonize:
214 c_include_dirs = args.include
215 if 'numpy' in code:
216 import numpy
217 c_include_dirs.append(numpy.get_include())
218 pyx_file = os.path.join(lib_dir, module_name + '.pyx')
219 pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
220 with io.open(pyx_file, 'w', encoding='utf-8') as f:
221 f.write(code)
222 extension = Extension(
223 name = module_name,
224 sources = [pyx_file],
225 include_dirs = c_include_dirs,
226 library_dirs = args.library_dirs,
227 extra_compile_args = args.compile_args,
228 extra_link_args = args.link_args,
229 libraries = args.lib,
230 language = 'c++' if args.cplus else 'c',
231 )
232 build_extension = self._get_build_extension()
233 try:
234 opts = dict(
235 quiet=quiet,
236 annotate = args.annotate,
237 force = True,
238 )
239 build_extension.extensions = cythonize([extension], **opts)
240 except CompileError:
241 return
242
243 if not have_module:
244 build_extension.build_temp = os.path.dirname(pyx_file)
245 build_extension.build_lib = lib_dir
246 build_extension.run()
247 self._code_cache[key] = module_name
248
249 module = imp.load_dynamic(module_name, module_path)
250 self._import_all(module)
251
252 if args.annotate:
253 try:
254 with io.open(html_file, encoding='utf-8') as f:
255 annotated_html = f.read()
256 except IOError as e:
257 # File could not be opened. Most likely the user has a version
258 # of Cython before 0.15.1 (when `cythonize` learned the
259 # `force` keyword argument) and has already compiled this
260 # exact source without annotation.
261 print('Cython completed successfully but the annotated '
262 'source could not be read.', file=sys.stderr)
263 print(e, file=sys.stderr)
264 else:
265 return display.HTML(self.clean_annotated_html(annotated_html))
266
267 @property
268 def so_ext(self):
269 """The extension suffix for compiled modules."""
270 try:
271 return self._so_ext
272 except AttributeError:
273 self._so_ext = self._get_build_extension().get_ext_filename('')
274 return self._so_ext
275
276 def _get_build_extension(self):
277 dist = Distribution()
278 config_files = dist.find_config_files()
279 try:
280 config_files.remove('setup.cfg')
281 except ValueError:
282 pass
283 dist.parse_config_files(config_files)
284 build_extension = build_ext(dist)
285 build_extension.finalize_options()
286 return build_extension
287
288 @staticmethod
289 def clean_annotated_html(html):
290 """Clean up the annotated HTML source.
291
292 Strips the link to the generated C or C++ file, which we do not
293 present to the user.
294 """
295 r = re.compile('<p>Raw output: <a href="(.*)">(.*)</a>')
296 html = '\n'.join(l for l in html.splitlines() if not r.match(l))
297 return html
298
299 __doc__ = __doc__.format(
300 CYTHON_DOC = ' '*8 + CythonMagics.cython.__doc__,
301 CYTHON_INLINE_DOC = ' '*8 + CythonMagics.cython_inline.__doc__,
302 CYTHON_PYXIMPORT_DOC = ' '*8 + CythonMagics.cython_pyximport.__doc__,
303 )
304
305 def load_ipython_extension(ip):
306 """Load the extension in IPython."""
307 ip.register_magics(CythonMagics)
308
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/extensions/cythonmagic.py b/IPython/extensions/cythonmagic.py
--- a/IPython/extensions/cythonmagic.py
+++ b/IPython/extensions/cythonmagic.py
@@ -41,6 +41,11 @@
import sys
import time
+try:
+ reload
+except NameError: # Python 3
+ from imp import reload
+
try:
import hashlib
except ImportError:
|
{"golden_diff": "diff --git a/IPython/extensions/cythonmagic.py b/IPython/extensions/cythonmagic.py\n--- a/IPython/extensions/cythonmagic.py\n+++ b/IPython/extensions/cythonmagic.py\n@@ -41,6 +41,11 @@\n import sys\n import time\n \n+try:\n+ reload\n+except NameError: # Python 3\n+ from imp import reload\n+\n try:\n import hashlib\n except ImportError:\n", "issue": "cython_pyximport reload broken in python3\npython3.3 notebook, tested in 0.13.1 but the code looks the same in HEAD:\n\n%%cython_pyximport foo\ndef f(x):\n return 4.0*x\n\nexecute twice and you get \n\n```\n/usr/lib/python3/dist-packages/IPython/extensions/cythonmagic.py in cython_pyximport(self, line, cell)\n 99 if module_name in self._reloads:\n 100 module = self._reloads[module_name]\n--> 101 reload(module)\n 102 else:\n 103 __import__(module_name)\n\nNameError: global name 'reload' is not defined\n```\n\nimp.reload should be used here\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n=====================\nCython related magics\n=====================\n\nUsage\n=====\n\n``%%cython``\n\n{CYTHON_DOC}\n\n``%%cython_inline``\n\n{CYTHON_INLINE_DOC}\n\n``%%cython_pyximport``\n\n{CYTHON_PYXIMPORT_DOC}\n\nAuthor:\n* Brian Granger\n\nParts of this code were taken from Cython.inline.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport imp\nimport io\nimport os\nimport re\nimport sys\nimport time\n\ntry:\n import hashlib\nexcept ImportError:\n import md5 as hashlib\n\nfrom distutils.core import Distribution, Extension\nfrom distutils.command.build_ext import build_ext\n\nfrom IPython.core import display\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, cell_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils import py3compat\n\nimport Cython\nfrom Cython.Compiler.Errors import CompileError\nfrom Cython.Build.Dependencies import cythonize\n\n\n@magics_class\nclass CythonMagics(Magics):\n\n def __init__(self, shell):\n super(CythonMagics,self).__init__(shell)\n self._reloads = {}\n self._code_cache = {}\n\n def _import_all(self, module):\n for k,v in module.__dict__.items():\n if not k.startswith('__'):\n self.shell.push({k:v})\n\n @cell_magic\n def cython_inline(self, line, cell):\n \"\"\"Compile and run a Cython code cell using Cython.inline.\n\n This magic simply passes the body of the cell to Cython.inline\n and returns the result. If the variables `a` and `b` are defined\n in the user's namespace, here is a simple example that returns\n their sum::\n\n %%cython_inline\n return a+b\n\n For most purposes, we recommend the usage of the `%%cython` magic.\n \"\"\"\n locs = self.shell.user_global_ns\n globs = self.shell.user_ns\n return Cython.inline(cell, locals=locs, globals=globs)\n\n @cell_magic\n def cython_pyximport(self, line, cell):\n \"\"\"Compile and import a Cython code cell using pyximport.\n\n The contents of the cell are written to a `.pyx` file in the current\n working directory, which is then imported using `pyximport`. This\n magic requires a module name to be passed::\n\n %%cython_pyximport modulename\n def f(x):\n return 2.0*x\n\n The compiled module is then imported and all of its symbols are\n injected into the user's namespace. For most purposes, we recommend\n the usage of the `%%cython` magic.\n \"\"\"\n module_name = line.strip()\n if not module_name:\n raise ValueError('module name must be given')\n fname = module_name + '.pyx'\n with io.open(fname, 'w', encoding='utf-8') as f:\n f.write(cell)\n if 'pyximport' not in sys.modules:\n import pyximport\n pyximport.install(reload_support=True)\n if module_name in self._reloads:\n module = self._reloads[module_name]\n reload(module)\n else:\n __import__(module_name)\n module = sys.modules[module_name]\n self._reloads[module_name] = module\n self._import_all(module)\n\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '-c', '--compile-args', action='append', default=[],\n help=\"Extra flags to pass to compiler via the `extra_compile_args` \"\n \"Extension flag (can be specified multiple times).\"\n )\n @magic_arguments.argument(\n '--link-args', action='append', default=[],\n help=\"Extra flags to pass to linker via the `extra_link_args` \"\n \"Extension flag (can be specified multiple times).\"\n )\n @magic_arguments.argument(\n '-l', '--lib', action='append', default=[],\n help=\"Add a library to link the extension against (can be specified \"\n \"multiple times).\"\n )\n @magic_arguments.argument(\n '-L', dest='library_dirs', metavar='dir', action='append', default=[],\n help=\"Add a path to the list of libary directories (can be specified \"\n \"multiple times).\"\n )\n @magic_arguments.argument(\n '-I', '--include', action='append', default=[],\n help=\"Add a path to the list of include directories (can be specified \"\n \"multiple times).\"\n )\n @magic_arguments.argument(\n '-+', '--cplus', action='store_true', default=False,\n help=\"Output a C++ rather than C file.\"\n )\n @magic_arguments.argument(\n '-f', '--force', action='store_true', default=False,\n help=\"Force the compilation of a new module, even if the source has been \"\n \"previously compiled.\"\n )\n @magic_arguments.argument(\n '-a', '--annotate', action='store_true', default=False,\n help=\"Produce a colorized HTML version of the source.\"\n )\n @cell_magic\n def cython(self, line, cell):\n \"\"\"Compile and import everything from a Cython code cell.\n\n The contents of the cell are written to a `.pyx` file in the\n directory `IPYTHONDIR/cython` using a filename with the hash of the\n code. This file is then cythonized and compiled. The resulting module\n is imported and all of its symbols are injected into the user's\n namespace. The usage is similar to that of `%%cython_pyximport` but\n you don't have to pass a module name::\n\n %%cython\n def f(x):\n return 2.0*x\n\n To compile OpenMP codes, pass the required `--compile-args`\n and `--link-args`. For example with gcc::\n\n %%cython --compile-args=-fopenmp --link-args=-fopenmp\n ...\n \"\"\"\n args = magic_arguments.parse_argstring(self.cython, line)\n code = cell if cell.endswith('\\n') else cell+'\\n'\n lib_dir = os.path.join(self.shell.ipython_dir, 'cython')\n quiet = True\n key = code, sys.version_info, sys.executable, Cython.__version__\n\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n\n if args.force:\n # Force a new module name by adding the current time to the\n # key which is hashed to determine the module name.\n key += time.time(),\n\n module_name = \"_cython_magic_\" + hashlib.md5(str(key).encode('utf-8')).hexdigest()\n module_path = os.path.join(lib_dir, module_name + self.so_ext)\n\n have_module = os.path.isfile(module_path)\n need_cythonize = not have_module\n\n if args.annotate:\n html_file = os.path.join(lib_dir, module_name + '.html')\n if not os.path.isfile(html_file):\n need_cythonize = True\n\n if need_cythonize:\n c_include_dirs = args.include\n if 'numpy' in code:\n import numpy\n c_include_dirs.append(numpy.get_include())\n pyx_file = os.path.join(lib_dir, module_name + '.pyx')\n pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())\n with io.open(pyx_file, 'w', encoding='utf-8') as f:\n f.write(code)\n extension = Extension(\n name = module_name,\n sources = [pyx_file],\n include_dirs = c_include_dirs,\n library_dirs = args.library_dirs,\n extra_compile_args = args.compile_args,\n extra_link_args = args.link_args,\n libraries = args.lib,\n language = 'c++' if args.cplus else 'c',\n )\n build_extension = self._get_build_extension()\n try:\n opts = dict(\n quiet=quiet,\n annotate = args.annotate,\n force = True,\n )\n build_extension.extensions = cythonize([extension], **opts)\n except CompileError:\n return\n\n if not have_module:\n build_extension.build_temp = os.path.dirname(pyx_file)\n build_extension.build_lib = lib_dir\n build_extension.run()\n self._code_cache[key] = module_name\n\n module = imp.load_dynamic(module_name, module_path)\n self._import_all(module)\n\n if args.annotate:\n try:\n with io.open(html_file, encoding='utf-8') as f:\n annotated_html = f.read()\n except IOError as e:\n # File could not be opened. Most likely the user has a version\n # of Cython before 0.15.1 (when `cythonize` learned the\n # `force` keyword argument) and has already compiled this\n # exact source without annotation.\n print('Cython completed successfully but the annotated '\n 'source could not be read.', file=sys.stderr)\n print(e, file=sys.stderr)\n else:\n return display.HTML(self.clean_annotated_html(annotated_html))\n\n @property\n def so_ext(self):\n \"\"\"The extension suffix for compiled modules.\"\"\"\n try:\n return self._so_ext\n except AttributeError:\n self._so_ext = self._get_build_extension().get_ext_filename('')\n return self._so_ext\n\n def _get_build_extension(self):\n dist = Distribution()\n config_files = dist.find_config_files()\n try:\n config_files.remove('setup.cfg')\n except ValueError:\n pass\n dist.parse_config_files(config_files)\n build_extension = build_ext(dist)\n build_extension.finalize_options()\n return build_extension\n\n @staticmethod\n def clean_annotated_html(html):\n \"\"\"Clean up the annotated HTML source.\n\n Strips the link to the generated C or C++ file, which we do not\n present to the user.\n \"\"\"\n r = re.compile('<p>Raw output: <a href=\"(.*)\">(.*)</a>')\n html = '\\n'.join(l for l in html.splitlines() if not r.match(l))\n return html\n\n__doc__ = __doc__.format(\n CYTHON_DOC = ' '*8 + CythonMagics.cython.__doc__,\n CYTHON_INLINE_DOC = ' '*8 + CythonMagics.cython_inline.__doc__,\n CYTHON_PYXIMPORT_DOC = ' '*8 + CythonMagics.cython_pyximport.__doc__,\n)\n\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n ip.register_magics(CythonMagics)\n", "path": "IPython/extensions/cythonmagic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n=====================\nCython related magics\n=====================\n\nUsage\n=====\n\n``%%cython``\n\n{CYTHON_DOC}\n\n``%%cython_inline``\n\n{CYTHON_INLINE_DOC}\n\n``%%cython_pyximport``\n\n{CYTHON_PYXIMPORT_DOC}\n\nAuthor:\n* Brian Granger\n\nParts of this code were taken from Cython.inline.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport imp\nimport io\nimport os\nimport re\nimport sys\nimport time\n\ntry:\n reload\nexcept NameError: # Python 3\n from imp import reload\n\ntry:\n import hashlib\nexcept ImportError:\n import md5 as hashlib\n\nfrom distutils.core import Distribution, Extension\nfrom distutils.command.build_ext import build_ext\n\nfrom IPython.core import display\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, cell_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils import py3compat\n\nimport Cython\nfrom Cython.Compiler.Errors import CompileError\nfrom Cython.Build.Dependencies import cythonize\n\n\n@magics_class\nclass CythonMagics(Magics):\n\n def __init__(self, shell):\n super(CythonMagics,self).__init__(shell)\n self._reloads = {}\n self._code_cache = {}\n\n def _import_all(self, module):\n for k,v in module.__dict__.items():\n if not k.startswith('__'):\n self.shell.push({k:v})\n\n @cell_magic\n def cython_inline(self, line, cell):\n \"\"\"Compile and run a Cython code cell using Cython.inline.\n\n This magic simply passes the body of the cell to Cython.inline\n and returns the result. If the variables `a` and `b` are defined\n in the user's namespace, here is a simple example that returns\n their sum::\n\n %%cython_inline\n return a+b\n\n For most purposes, we recommend the usage of the `%%cython` magic.\n \"\"\"\n locs = self.shell.user_global_ns\n globs = self.shell.user_ns\n return Cython.inline(cell, locals=locs, globals=globs)\n\n @cell_magic\n def cython_pyximport(self, line, cell):\n \"\"\"Compile and import a Cython code cell using pyximport.\n\n The contents of the cell are written to a `.pyx` file in the current\n working directory, which is then imported using `pyximport`. This\n magic requires a module name to be passed::\n\n %%cython_pyximport modulename\n def f(x):\n return 2.0*x\n\n The compiled module is then imported and all of its symbols are\n injected into the user's namespace. For most purposes, we recommend\n the usage of the `%%cython` magic.\n \"\"\"\n module_name = line.strip()\n if not module_name:\n raise ValueError('module name must be given')\n fname = module_name + '.pyx'\n with io.open(fname, 'w', encoding='utf-8') as f:\n f.write(cell)\n if 'pyximport' not in sys.modules:\n import pyximport\n pyximport.install(reload_support=True)\n if module_name in self._reloads:\n module = self._reloads[module_name]\n reload(module)\n else:\n __import__(module_name)\n module = sys.modules[module_name]\n self._reloads[module_name] = module\n self._import_all(module)\n\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '-c', '--compile-args', action='append', default=[],\n help=\"Extra flags to pass to compiler via the `extra_compile_args` \"\n \"Extension flag (can be specified multiple times).\"\n )\n @magic_arguments.argument(\n '--link-args', action='append', default=[],\n help=\"Extra flags to pass to linker via the `extra_link_args` \"\n \"Extension flag (can be specified multiple times).\"\n )\n @magic_arguments.argument(\n '-l', '--lib', action='append', default=[],\n help=\"Add a library to link the extension against (can be specified \"\n \"multiple times).\"\n )\n @magic_arguments.argument(\n '-L', dest='library_dirs', metavar='dir', action='append', default=[],\n help=\"Add a path to the list of libary directories (can be specified \"\n \"multiple times).\"\n )\n @magic_arguments.argument(\n '-I', '--include', action='append', default=[],\n help=\"Add a path to the list of include directories (can be specified \"\n \"multiple times).\"\n )\n @magic_arguments.argument(\n '-+', '--cplus', action='store_true', default=False,\n help=\"Output a C++ rather than C file.\"\n )\n @magic_arguments.argument(\n '-f', '--force', action='store_true', default=False,\n help=\"Force the compilation of a new module, even if the source has been \"\n \"previously compiled.\"\n )\n @magic_arguments.argument(\n '-a', '--annotate', action='store_true', default=False,\n help=\"Produce a colorized HTML version of the source.\"\n )\n @cell_magic\n def cython(self, line, cell):\n \"\"\"Compile and import everything from a Cython code cell.\n\n The contents of the cell are written to a `.pyx` file in the\n directory `IPYTHONDIR/cython` using a filename with the hash of the\n code. This file is then cythonized and compiled. The resulting module\n is imported and all of its symbols are injected into the user's\n namespace. The usage is similar to that of `%%cython_pyximport` but\n you don't have to pass a module name::\n\n %%cython\n def f(x):\n return 2.0*x\n\n To compile OpenMP codes, pass the required `--compile-args`\n and `--link-args`. For example with gcc::\n\n %%cython --compile-args=-fopenmp --link-args=-fopenmp\n ...\n \"\"\"\n args = magic_arguments.parse_argstring(self.cython, line)\n code = cell if cell.endswith('\\n') else cell+'\\n'\n lib_dir = os.path.join(self.shell.ipython_dir, 'cython')\n quiet = True\n key = code, sys.version_info, sys.executable, Cython.__version__\n\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n\n if args.force:\n # Force a new module name by adding the current time to the\n # key which is hashed to determine the module name.\n key += time.time(),\n\n module_name = \"_cython_magic_\" + hashlib.md5(str(key).encode('utf-8')).hexdigest()\n module_path = os.path.join(lib_dir, module_name + self.so_ext)\n\n have_module = os.path.isfile(module_path)\n need_cythonize = not have_module\n\n if args.annotate:\n html_file = os.path.join(lib_dir, module_name + '.html')\n if not os.path.isfile(html_file):\n need_cythonize = True\n\n if need_cythonize:\n c_include_dirs = args.include\n if 'numpy' in code:\n import numpy\n c_include_dirs.append(numpy.get_include())\n pyx_file = os.path.join(lib_dir, module_name + '.pyx')\n pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())\n with io.open(pyx_file, 'w', encoding='utf-8') as f:\n f.write(code)\n extension = Extension(\n name = module_name,\n sources = [pyx_file],\n include_dirs = c_include_dirs,\n library_dirs = args.library_dirs,\n extra_compile_args = args.compile_args,\n extra_link_args = args.link_args,\n libraries = args.lib,\n language = 'c++' if args.cplus else 'c',\n )\n build_extension = self._get_build_extension()\n try:\n opts = dict(\n quiet=quiet,\n annotate = args.annotate,\n force = True,\n )\n build_extension.extensions = cythonize([extension], **opts)\n except CompileError:\n return\n\n if not have_module:\n build_extension.build_temp = os.path.dirname(pyx_file)\n build_extension.build_lib = lib_dir\n build_extension.run()\n self._code_cache[key] = module_name\n\n module = imp.load_dynamic(module_name, module_path)\n self._import_all(module)\n\n if args.annotate:\n try:\n with io.open(html_file, encoding='utf-8') as f:\n annotated_html = f.read()\n except IOError as e:\n # File could not be opened. Most likely the user has a version\n # of Cython before 0.15.1 (when `cythonize` learned the\n # `force` keyword argument) and has already compiled this\n # exact source without annotation.\n print('Cython completed successfully but the annotated '\n 'source could not be read.', file=sys.stderr)\n print(e, file=sys.stderr)\n else:\n return display.HTML(self.clean_annotated_html(annotated_html))\n\n @property\n def so_ext(self):\n \"\"\"The extension suffix for compiled modules.\"\"\"\n try:\n return self._so_ext\n except AttributeError:\n self._so_ext = self._get_build_extension().get_ext_filename('')\n return self._so_ext\n\n def _get_build_extension(self):\n dist = Distribution()\n config_files = dist.find_config_files()\n try:\n config_files.remove('setup.cfg')\n except ValueError:\n pass\n dist.parse_config_files(config_files)\n build_extension = build_ext(dist)\n build_extension.finalize_options()\n return build_extension\n\n @staticmethod\n def clean_annotated_html(html):\n \"\"\"Clean up the annotated HTML source.\n\n Strips the link to the generated C or C++ file, which we do not\n present to the user.\n \"\"\"\n r = re.compile('<p>Raw output: <a href=\"(.*)\">(.*)</a>')\n html = '\\n'.join(l for l in html.splitlines() if not r.match(l))\n return html\n\n__doc__ = __doc__.format(\n CYTHON_DOC = ' '*8 + CythonMagics.cython.__doc__,\n CYTHON_INLINE_DOC = ' '*8 + CythonMagics.cython_inline.__doc__,\n CYTHON_PYXIMPORT_DOC = ' '*8 + CythonMagics.cython_pyximport.__doc__,\n)\n\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n ip.register_magics(CythonMagics)\n", "path": "IPython/extensions/cythonmagic.py"}]}
| 3,644 | 94 |
gh_patches_debug_19687
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-1625
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No module named 'parlai_internal'
https://parl.ai/projects/wizard_of_wikipedia/
When running ```python projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py``` I get the following error:
```
Traceback (most recent call last):
File "projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py", line 48, in <module>
eval_model(parser)
File "/home/ml/jwang301/Development/ParlAI/parlai/scripts/eval_model.py", line 68, in eval_model
agent = create_agent(opt, requireModelExists=True)
File "/home/ml/jwang301/Development/ParlAI/parlai/core/agents.py", line 554, in create_agent
model = load_agent_module(opt)
File "/home/ml/jwang301/Development/ParlAI/parlai/core/agents.py", line 407, in load_agent_module
model_class = get_agent_module(new_opt['model'])
File "/home/ml/jwang301/Development/ParlAI/parlai/core/agents.py", line 516, in get_agent_module
my_module = importlib.import_module(module_name)
File "/home/ml/jwang301/anaconda2/envs/ParlAI/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 941, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 941, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 941, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'parlai_internal'
```
I'm assuming this is accidental since the wiki is public.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 from parlai.core.params import ParlaiParser
7 from parlai.scripts.eval_model import eval_model
8 from parlai.zoo.wizard_of_wikipedia\
9 .full_dialogue_retrieval_model import download
10 from projects.wizard_of_wikipedia.wizard_transformer_ranker\
11 .wizard_transformer_ranker import WizardTransformerRankerAgent
12
13 """Evaluate pre-trained retrieval model on the full Wizard Dialogue task.
14
15 NOTE: Metrics here differ slightly to those reported in the paper as a result
16 of code changes.
17
18 Results on seen test set:
19 Hits@1/100: 86.7
20
21 Results on unseen test set (run with flag
22 `-t wizard_of_wikipedia:WizardDialogKnowledge:topic_split`):
23 Hits@1/100: 68.96
24 """
25
26 if __name__ == '__main__':
27 parser = ParlaiParser(add_model_args=True)
28 parser.add_argument('-n', '--num-examples', default=100000000)
29 parser.add_argument('-d', '--display-examples', type='bool', default=False)
30 parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
31 WizardTransformerRankerAgent.add_cmdline_args(parser)
32 parser.set_defaults(
33 task='wizard_of_wikipedia',
34 model='projects:wizard_of_wikipedia:wizard_transformer_ranker',
35 model_file='models:wizard_of_wikipedia/full_dialogue_retrieval_model/model',
36 datatype='test',
37 n_heads=6,
38 ffn_size=1200,
39 embeddings_scale=False,
40 delimiter=' __SOC__ ',
41 n_positions=1000,
42 legacy=True
43 )
44
45 opt = parser.parse_args()
46 download(opt['datapath']) # download pretrained retrieval model
47
48 eval_model(parser)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py b/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py
--- a/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py
+++ b/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py
@@ -29,7 +29,7 @@
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
WizardTransformerRankerAgent.add_cmdline_args(parser)
- parser.set_defaults(
+ parser.set_params(
task='wizard_of_wikipedia',
model='projects:wizard_of_wikipedia:wizard_transformer_ranker',
model_file='models:wizard_of_wikipedia/full_dialogue_retrieval_model/model',
@@ -45,4 +45,4 @@
opt = parser.parse_args()
download(opt['datapath']) # download pretrained retrieval model
- eval_model(parser)
+ eval_model(opt)
|
{"golden_diff": "diff --git a/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py b/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py\n--- a/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py\n+++ b/projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py\n@@ -29,7 +29,7 @@\n parser.add_argument('-d', '--display-examples', type='bool', default=False)\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)\n WizardTransformerRankerAgent.add_cmdline_args(parser)\n- parser.set_defaults(\n+ parser.set_params(\n task='wizard_of_wikipedia',\n model='projects:wizard_of_wikipedia:wizard_transformer_ranker',\n model_file='models:wizard_of_wikipedia/full_dialogue_retrieval_model/model',\n@@ -45,4 +45,4 @@\n opt = parser.parse_args()\n download(opt['datapath']) # download pretrained retrieval model\n \n- eval_model(parser)\n+ eval_model(opt)\n", "issue": "No module named 'parlai_internal'\nhttps://parl.ai/projects/wizard_of_wikipedia/\r\n\r\nWhen running ```python projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py``` I get the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py\", line 48, in <module>\r\n eval_model(parser)\r\n File \"/home/ml/jwang301/Development/ParlAI/parlai/scripts/eval_model.py\", line 68, in eval_model\r\n agent = create_agent(opt, requireModelExists=True)\r\n File \"/home/ml/jwang301/Development/ParlAI/parlai/core/agents.py\", line 554, in create_agent\r\n model = load_agent_module(opt)\r\n File \"/home/ml/jwang301/Development/ParlAI/parlai/core/agents.py\", line 407, in load_agent_module\r\n model_class = get_agent_module(new_opt['model'])\r\n File \"/home/ml/jwang301/Development/ParlAI/parlai/core/agents.py\", line 516, in get_agent_module\r\n my_module = importlib.import_module(module_name)\r\n File \"/home/ml/jwang301/anaconda2/envs/ParlAI/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 941, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 941, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 941, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 953, in _find_and_load_unlocked\r\nModuleNotFoundError: No module named 'parlai_internal'\r\n```\r\n\r\nI'm assuming this is accidental since the wiki is public. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom parlai.core.params import ParlaiParser\nfrom parlai.scripts.eval_model import eval_model\nfrom parlai.zoo.wizard_of_wikipedia\\\n .full_dialogue_retrieval_model import download\nfrom projects.wizard_of_wikipedia.wizard_transformer_ranker\\\n .wizard_transformer_ranker import WizardTransformerRankerAgent\n\n\"\"\"Evaluate pre-trained retrieval model on the full Wizard Dialogue task.\n\nNOTE: Metrics here differ slightly to those reported in the paper as a result\nof code changes.\n\nResults on seen test set:\nHits@1/100: 86.7\n\nResults on unseen test set (run with flag\n`-t wizard_of_wikipedia:WizardDialogKnowledge:topic_split`):\nHits@1/100: 68.96\n\"\"\"\n\nif __name__ == '__main__':\n parser = ParlaiParser(add_model_args=True)\n parser.add_argument('-n', '--num-examples', default=100000000)\n parser.add_argument('-d', '--display-examples', type='bool', default=False)\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)\n WizardTransformerRankerAgent.add_cmdline_args(parser)\n parser.set_defaults(\n task='wizard_of_wikipedia',\n model='projects:wizard_of_wikipedia:wizard_transformer_ranker',\n model_file='models:wizard_of_wikipedia/full_dialogue_retrieval_model/model',\n datatype='test',\n n_heads=6,\n ffn_size=1200,\n embeddings_scale=False,\n delimiter=' __SOC__ ',\n n_positions=1000,\n legacy=True\n )\n\n opt = parser.parse_args()\n download(opt['datapath']) # download pretrained retrieval model\n\n eval_model(parser)\n", "path": "projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom parlai.core.params import ParlaiParser\nfrom parlai.scripts.eval_model import eval_model\nfrom parlai.zoo.wizard_of_wikipedia\\\n .full_dialogue_retrieval_model import download\nfrom projects.wizard_of_wikipedia.wizard_transformer_ranker\\\n .wizard_transformer_ranker import WizardTransformerRankerAgent\n\n\"\"\"Evaluate pre-trained retrieval model on the full Wizard Dialogue task.\n\nNOTE: Metrics here differ slightly to those reported in the paper as a result\nof code changes.\n\nResults on seen test set:\nHits@1/100: 86.7\n\nResults on unseen test set (run with flag\n`-t wizard_of_wikipedia:WizardDialogKnowledge:topic_split`):\nHits@1/100: 68.96\n\"\"\"\n\nif __name__ == '__main__':\n parser = ParlaiParser(add_model_args=True)\n parser.add_argument('-n', '--num-examples', default=100000000)\n parser.add_argument('-d', '--display-examples', type='bool', default=False)\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)\n WizardTransformerRankerAgent.add_cmdline_args(parser)\n parser.set_params(\n task='wizard_of_wikipedia',\n model='projects:wizard_of_wikipedia:wizard_transformer_ranker',\n model_file='models:wizard_of_wikipedia/full_dialogue_retrieval_model/model',\n datatype='test',\n n_heads=6,\n ffn_size=1200,\n embeddings_scale=False,\n delimiter=' __SOC__ ',\n n_positions=1000,\n legacy=True\n )\n\n opt = parser.parse_args()\n download(opt['datapath']) # download pretrained retrieval model\n\n eval_model(opt)\n", "path": "projects/wizard_of_wikipedia/scripts/eval_retrieval_model.py"}]}
| 1,490 | 234 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.