problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_9123 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aarch64.linux.pushstr_array is broken
There's also a missing colon after the `if` statement, and it seems this was never never finished since there's no `%endif`.
https://github.com/Gallopsled/pwntools/blob/813749493288fd7330b7b44b81a7f2f7a8a02dac/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm#L76-L79
```
>>> shellcraft.pushstr_array(['hello', 'world'])
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri)
248 if self.filesystem_checks:
--> 249 return self._check(uri, self._collection[uri])
250 else:
KeyError: 'aarch64/pushstr_array.asm'
During handling of the above exception, another exception occurred:
CompileException Traceback (most recent call last)
<ipython-input-2-b064486e8883> in <module>
----> 1 shellcraft.pushstr_array(['hello', 'world'])
~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key)
84 for m in self._context_modules():
85 try:
---> 86 return getattr(m, key)
87 except AttributeError:
88 pass
~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key)
78 # This function lazy-loads the shellcodes
79 if key in self._shellcodes:
---> 80 real = internal.make_function(key, self._shellcodes[key], self._dir)
81 setattr(self, key, real)
82 return real
~/pwntools/pwnlib/shellcraft/internal.py in make_function(funcname, filename, directory)
112 import inspect
113 path = os.path.join(directory, filename)
--> 114 template = lookup_template(path)
115
116 args, varargs, keywords, defaults = inspect.getargspec(template.module.render_body)
~/pwntools/pwnlib/shellcraft/internal.py in lookup_template(filename)
80
81 if filename not in loaded:
---> 82 loaded[filename] = lookup.get_template(filename)
83
84 return loaded[filename]
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri)
258 srcfile = posixpath.normpath(posixpath.join(dir_, u))
259 if os.path.isfile(srcfile):
--> 260 return self._load(srcfile, uri)
261 else:
262 raise exceptions.TopLevelLookupException(
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in _load(self, filename, uri)
320 else:
321 module_filename = None
--> 322 self._collection[uri] = template = Template(
323 uri=uri,
324 filename=posixpath.normpath(filename),
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in __init__(self, text, filename, uri, format_exceptions, error_handler, lookup, output_encoding, encoding_errors, module_directory, cache_args, cache_impl, cache_enabled, cache_type, cache_dir, cache_url, module_filename, input_encoding, disable_unicode, module_writer, bytestring_passthrough, default_filters, buffer_filters, strict_undefined, imports, future_imports, enable_loop, preprocessor, lexer_cls, include_error_handler)
346 else:
347 path = None
--> 348 module = self._compile_from_file(path, filename)
349 else:
350 raise exceptions.RuntimeException(
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_from_file(self, path, filename)
428 # in memory
429 data = util.read_file(filename)
--> 430 code, module = _compile_text(self, data, filename)
431 self._source = None
432 self._code = code
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_text(template, text, filename)
731 def _compile_text(template, text, filename):
732 identifier = template.module_id
--> 733 source, lexer = _compile(
734 template,
735 text,
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile(template, text, filename, generate_magic_comment)
710 preprocessor=template.preprocessor,
711 )
--> 712 node = lexer.parse()
713 source = codegen.compile(
714 node,
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in parse(self)
262 if self.match_expression():
263 continue
--> 264 if self.match_control_line():
265 continue
266 if self.match_comment():
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in match_control_line(self)
474 **self.exception_kwargs
475 )
--> 476 self.append_node(parsetree.ControlLine, keyword, isend, text)
477 else:
478 self.append_node(parsetree.Comment, text)
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in append_node(self, nodecls, *args, **kwargs)
150 kwargs.setdefault("pos", self.matched_charpos)
151 kwargs["filename"] = self.filename
--> 152 node = nodecls(*args, **kwargs)
153 if len(self.tag):
154 self.tag[-1].nodes.append(node)
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/parsetree.py in __init__(self, keyword, isend, text, **kwargs)
91 self._undeclared_identifiers = []
92 else:
---> 93 code = ast.PythonFragment(text, **self.exception_kwargs)
94 self._declared_identifiers = code.declared_identifiers
95 self._undeclared_identifiers = code.undeclared_identifiers
~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/ast.py in __init__(self, code, **exception_kwargs)
87 m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S)
88 if not m:
---> 89 raise exceptions.CompileException(
90 "Fragment '%s' is not a partial control statement" % code,
91 **exception_kwargs
CompileException: Fragment 'if len(array[-1] != 'sp')' is not a partial control statement in file '/Users/heapcrash/pwntools/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm' at line: 76 char: 1
```
</issue>
<code>
[start of pwnlib/version.py]
1 __version__ = '4.1.5'
2
[end of pwnlib/version.py]
[start of setup.py]
1 #!/usr/bin/env python
2 from __future__ import print_function
3
4 import glob
5 import os
6 import platform
7 import subprocess
8 import sys
9 import traceback
10 from distutils.command.install import INSTALL_SCHEMES
11 from distutils.sysconfig import get_python_inc
12 from distutils.util import convert_path
13
14 from setuptools import find_packages
15 from setuptools import setup
16
17 # Get all template files
18 templates = []
19 for dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):
20 for f in filenames:
21 templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))
22
23 # This makes pwntools-LICENSE.txt appear with the package folders
24 for scheme in INSTALL_SCHEMES.values():
25 scheme['data'] = scheme['purelib']
26
27 console_scripts = ['pwn=pwnlib.commandline.main:main']
28
29 # Find all of the ancillary console scripts
30 # We have a magic flag --include-all-scripts
31 flag = '--only-use-pwn-command'
32 if flag in sys.argv:
33 sys.argv.remove(flag)
34 else:
35 flag = False
36
37 for filename in glob.glob('pwnlib/commandline/*'):
38 filename = os.path.basename(filename)
39 filename, ext = os.path.splitext(filename)
40
41 if ext != '.py' or '__init__' in filename:
42 continue
43
44 script = '%s=pwnlib.commandline.common:main' % filename
45 if not flag:
46 console_scripts.append(script)
47
48 install_requires = ['paramiko>=1.15.2',
49 'mako>=1.0.0',
50 'pyelftools>=0.2.4',
51 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160
52 'ropgadget>=5.3',
53 'pyserial>=2.7',
54 'requests>=2.0',
55 'pip>=6.0.8',
56 'pygments>=2.0',
57 'pysocks',
58 'python-dateutil',
59 'packaging',
60 'psutil>=3.3.0',
61 'intervaltree>=3.0',
62 'sortedcontainers',
63 'unicorn>=1.0.2rc1,<1.0.2rc4', # see unicorn-engine/unicorn#1100, unicorn-engine/unicorn#1170, Gallopsled/pwntools#1538
64 'six>=1.12.0',
65 ]
66
67 # Check that the user has installed the Python development headers
68 PythonH = os.path.join(get_python_inc(), 'Python.h')
69 if not os.path.exists(PythonH):
70 print("You must install the Python development headers!", file=sys.stderr)
71 print("$ apt-get install python-dev", file=sys.stderr)
72 sys.exit(-1)
73
74 # Convert README.md to reStructuredText for PyPI
75 long_description = ''
76 try:
77 long_description = subprocess.check_output(['pandoc', 'README.md', '--to=rst'], universal_newlines=True)
78 except Exception as e:
79 print("Failed to convert README.md through pandoc, proceeding anyway", file=sys.stderr)
80 traceback.print_exc()
81
82 setup(
83 name = 'pwntools',
84 python_requires = '>=2.7',
85 packages = find_packages(),
86 version = '4.1.5',
87 data_files = [('',
88 glob.glob('*.md') + glob.glob('*.txt')),
89 ],
90 package_data = {
91 'pwnlib': [
92 'data/crcsums.txt',
93 'data/useragents/useragents.txt',
94 'data/binutils/*',
95 'data/includes/*.h',
96 'data/includes/*/*.h',
97 'data/templates/*.mako',
98 ] + templates,
99 },
100 entry_points = {'console_scripts': console_scripts},
101 scripts = glob.glob("bin/*"),
102 description = "Pwntools CTF framework and exploit development library.",
103 long_description = long_description,
104 author = "Gallopsled et al.",
105 author_email = "[email protected]",
106 url = 'https://pwntools.com',
107 download_url = "https://github.com/Gallopsled/pwntools/releases",
108 install_requires = install_requires,
109 license = "Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt",
110 keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',
111 classifiers = [
112 'Development Status :: 5 - Production/Stable',
113 'Environment :: Console',
114 'Intended Audience :: Developers',
115 'Intended Audience :: Science/Research',
116 'Intended Audience :: System Administrators',
117 'License :: OSI Approved :: MIT License',
118 'Natural Language :: English',
119 'Operating System :: POSIX :: Linux',
120 'Programming Language :: Python :: 2.7',
121 'Topic :: Security',
122 'Topic :: Software Development :: Assemblers',
123 'Topic :: Software Development :: Debuggers',
124 'Topic :: Software Development :: Disassemblers',
125 'Topic :: Software Development :: Embedded Systems',
126 'Topic :: Software Development :: Libraries :: Python Modules',
127 'Topic :: System :: System Shells',
128 'Topic :: Utilities',
129 ]
130 )
131
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/version.py b/pwnlib/version.py
--- a/pwnlib/version.py
+++ b/pwnlib/version.py
@@ -1 +1 @@
-__version__ = '4.1.5'
+__version__ = '4.1.6'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,7 +83,7 @@
name = 'pwntools',
python_requires = '>=2.7',
packages = find_packages(),
- version = '4.1.5',
+ version = '4.1.6',
data_files = [('',
glob.glob('*.md') + glob.glob('*.txt')),
],
| {"golden_diff": "diff --git a/pwnlib/version.py b/pwnlib/version.py\n--- a/pwnlib/version.py\n+++ b/pwnlib/version.py\n@@ -1 +1 @@\n-__version__ = '4.1.5'\n+__version__ = '4.1.6'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -83,7 +83,7 @@\n name = 'pwntools',\n python_requires = '>=2.7',\n packages = find_packages(),\n- version = '4.1.5',\n+ version = '4.1.6',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n", "issue": "aarch64.linux.pushstr_array is broken\nThere's also a missing colon after the `if` statement, and it seems this was never never finished since there's no `%endif`.\r\n\r\nhttps://github.com/Gallopsled/pwntools/blob/813749493288fd7330b7b44b81a7f2f7a8a02dac/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm#L76-L79\r\n\r\n\r\n```\r\n>>> shellcraft.pushstr_array(['hello', 'world'])\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri)\r\n 248 if self.filesystem_checks:\r\n--> 249 return self._check(uri, self._collection[uri])\r\n 250 else:\r\n\r\nKeyError: 'aarch64/pushstr_array.asm'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nCompileException Traceback (most recent call last)\r\n<ipython-input-2-b064486e8883> in <module>\r\n----> 1 shellcraft.pushstr_array(['hello', 'world'])\r\n\r\n~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key)\r\n 84 for m in self._context_modules():\r\n 85 try:\r\n---> 86 return getattr(m, key)\r\n 87 except AttributeError:\r\n 88 pass\r\n\r\n~/pwntools/pwnlib/shellcraft/__init__.py in __getattr__(self, key)\r\n 78 # This function lazy-loads the shellcodes\r\n 79 if key in self._shellcodes:\r\n---> 80 real = internal.make_function(key, self._shellcodes[key], self._dir)\r\n 81 setattr(self, key, real)\r\n 82 return real\r\n\r\n~/pwntools/pwnlib/shellcraft/internal.py in make_function(funcname, filename, directory)\r\n 112 import inspect\r\n 113 path = os.path.join(directory, filename)\r\n--> 114 template = lookup_template(path)\r\n 115\r\n 116 args, varargs, keywords, defaults = inspect.getargspec(template.module.render_body)\r\n\r\n~/pwntools/pwnlib/shellcraft/internal.py in lookup_template(filename)\r\n 80\r\n 81 if filename not in loaded:\r\n---> 82 loaded[filename] = lookup.get_template(filename)\r\n 83\r\n 84 return loaded[filename]\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in get_template(self, uri)\r\n 258 srcfile = posixpath.normpath(posixpath.join(dir_, u))\r\n 259 if os.path.isfile(srcfile):\r\n--> 260 return self._load(srcfile, uri)\r\n 261 else:\r\n 262 raise exceptions.TopLevelLookupException(\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lookup.py in _load(self, filename, uri)\r\n 320 else:\r\n 321 module_filename = None\r\n--> 322 self._collection[uri] = template = Template(\r\n 323 uri=uri,\r\n 324 filename=posixpath.normpath(filename),\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in __init__(self, text, filename, uri, format_exceptions, error_handler, lookup, output_encoding, encoding_errors, module_directory, cache_args, cache_impl, cache_enabled, cache_type, cache_dir, cache_url, module_filename, input_encoding, disable_unicode, module_writer, bytestring_passthrough, default_filters, buffer_filters, strict_undefined, imports, future_imports, enable_loop, preprocessor, lexer_cls, include_error_handler)\r\n 346 else:\r\n 347 path = None\r\n--> 348 module = self._compile_from_file(path, filename)\r\n 349 else:\r\n 350 raise exceptions.RuntimeException(\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_from_file(self, path, filename)\r\n 428 # in memory\r\n 429 data = util.read_file(filename)\r\n--> 430 code, module = _compile_text(self, data, filename)\r\n 431 self._source = None\r\n 432 self._code = code\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile_text(template, text, filename)\r\n 731 def _compile_text(template, text, filename):\r\n 732 identifier = template.module_id\r\n--> 733 source, lexer = _compile(\r\n 734 template,\r\n 735 text,\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/template.py in _compile(template, text, filename, generate_magic_comment)\r\n 710 preprocessor=template.preprocessor,\r\n 711 )\r\n--> 712 node = lexer.parse()\r\n 713 source = codegen.compile(\r\n 714 node,\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in parse(self)\r\n 262 if self.match_expression():\r\n 263 continue\r\n--> 264 if self.match_control_line():\r\n 265 continue\r\n 266 if self.match_comment():\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in match_control_line(self)\r\n 474 **self.exception_kwargs\r\n 475 )\r\n--> 476 self.append_node(parsetree.ControlLine, keyword, isend, text)\r\n 477 else:\r\n 478 self.append_node(parsetree.Comment, text)\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/lexer.py in append_node(self, nodecls, *args, **kwargs)\r\n 150 kwargs.setdefault(\"pos\", self.matched_charpos)\r\n 151 kwargs[\"filename\"] = self.filename\r\n--> 152 node = nodecls(*args, **kwargs)\r\n 153 if len(self.tag):\r\n 154 self.tag[-1].nodes.append(node)\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/parsetree.py in __init__(self, keyword, isend, text, **kwargs)\r\n 91 self._undeclared_identifiers = []\r\n 92 else:\r\n---> 93 code = ast.PythonFragment(text, **self.exception_kwargs)\r\n 94 self._declared_identifiers = code.declared_identifiers\r\n 95 self._undeclared_identifiers = code.undeclared_identifiers\r\n\r\n~/.pyenv/versions/3.8.3/lib/python3.8/site-packages/mako/ast.py in __init__(self, code, **exception_kwargs)\r\n 87 m = re.match(r\"^(\\w+)(?:\\s+(.*?))?:\\s*(#|$)\", code.strip(), re.S)\r\n 88 if not m:\r\n---> 89 raise exceptions.CompileException(\r\n 90 \"Fragment '%s' is not a partial control statement\" % code,\r\n 91 **exception_kwargs\r\n\r\nCompileException: Fragment 'if len(array[-1] != 'sp')' is not a partial control statement in file '/Users/heapcrash/pwntools/pwnlib/shellcraft/templates/aarch64/pushstr_array.asm' at line: 76 char: 1\r\n```\n", "before_files": [{"content": "__version__ = '4.1.5'\n", "path": "pwnlib/version.py"}, {"content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport platform\nimport subprocess\nimport sys\nimport traceback\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates'), followlinks=True):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\nconsole_scripts = ['pwn=pwnlib.commandline.main:main']\n\n# Find all of the ancillary console scripts\n# We have a magic flag --include-all-scripts\nflag = '--only-use-pwn-command'\nif flag in sys.argv:\n sys.argv.remove(flag)\nelse:\n flag = False\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.common:main' % filename\n if not flag:\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone>=3.0.5rc2', # See Gallopsled/pwntools#971, Gallopsled/pwntools#1160\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'packaging',\n 'psutil>=3.3.0',\n 'intervaltree>=3.0',\n 'sortedcontainers',\n 'unicorn>=1.0.2rc1,<1.0.2rc4', # see unicorn-engine/unicorn#1100, unicorn-engine/unicorn#1170, Gallopsled/pwntools#1538\n 'six>=1.12.0',\n]\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print(\"You must install the Python development headers!\", file=sys.stderr)\n print(\"$ apt-get install python-dev\", file=sys.stderr)\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n long_description = subprocess.check_output(['pandoc', 'README.md', '--to=rst'], universal_newlines=True)\nexcept Exception as e:\n print(\"Failed to convert README.md through pandoc, proceeding anyway\", file=sys.stderr)\n traceback.print_exc()\n\nsetup(\n name = 'pwntools',\n python_requires = '>=2.7',\n packages = find_packages(),\n version = '4.1.5',\n data_files = [('',\n glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n 'data/templates/*.mako',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"[email protected]\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}]} | 3,837 | 168 |
gh_patches_debug_36784 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-4294 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v23] Renommer un contenu l'envoit deux fois dans les flux rss
En jetant un œil aux [flux rss des billets](https://beta.zestedesavoir.com/tribunes/flux/rss/), on se rend compte que le billet `Un gros Troll de plus sur Javascript` est présent 2 fois.
C'est du au fait que j'ai publié la première fois le billet avec un autre nom. A la deuxième publication, le nom a été modifié et ça apparait deux fois inutilement dans les flux rss.
</issue>
<code>
[start of zds/tutorialv2/managers.py]
1 # -*- coding: utf-8 -*-
2
3 from django.conf import settings
4 from django.db import models
5 from django.db.models import Count, F
6
7 from zds.utils.models import Tag
8 from django.utils.translation import ugettext_lazy as _
9
10
11 class PublishedContentManager(models.Manager):
12 """
13 Custom published content manager.
14 """
15
16 def last_contents_of_a_member_loaded(self, author, _type=None):
17 """
18 Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']
19
20 :param author:
21 :param _type: subtype to filter request
22 :return:
23 :rtype: django.db.models.QuerySet
24 """
25
26 queryset = self.prefetch_related('content') \
27 .prefetch_related('content__authors') \
28 .prefetch_related('content__subcategory') \
29 .filter(content__authors__in=[author]) \
30 .filter(must_redirect=False)
31
32 if _type:
33 queryset = queryset.filter(content_type=_type)
34
35 public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']]
36 return public_contents
37
38 def last_tutorials_of_a_member_loaded(self, author):
39 return self.last_contents_of_a_member_loaded(author, _type='TUTORIAL')
40
41 def last_articles_of_a_member_loaded(self, author):
42 return self.last_contents_of_a_member_loaded(author, _type='ARTICLE')
43
44 def last_opinions_of_a_member_loaded(self, author):
45 return self.last_contents_of_a_member_loaded(author, _type='OPINION')
46
47 def get_contents_count(self):
48 """
49 :rtype: int
50 """
51 return self.filter(must_redirect=False) \
52 .count()
53
54 def get_top_tags(self, displayed_types, limit=-1):
55 """
56 Retrieve all most rated tags.
57
58 :param displayed_types:
59 :param limit: if ``-1`` or ``0`` => no limit. Else just takes the provided number of elements.
60 :return:
61 """
62 published = self.filter(
63 must_redirect=False,
64 content__type__in=displayed_types).values('content__tags').distinct()
65 tags_pk = [tag['content__tags'] for tag in published]
66 queryset = Tag.objects\
67 .filter(pk__in=tags_pk, publishablecontent__public_version__isnull=False,
68 publishablecontent__type__in=displayed_types) \
69 .annotate(num_content=Count('publishablecontent')) \
70 .order_by('-num_content', 'title')
71 if limit > 0:
72 queryset = queryset[:limit]
73 return queryset
74
75 def transfer_paternity(self, unsubscribed_user, replacement_author):
76 """
77 erase or transfer the paternity of all published content owned by a user.
78 if a content has more than one author, the unregistering author just leave its redaction\
79 else just mark ``replacement_author`` as the new author
80
81 """
82 for published in self.filter(authors__in=[unsubscribed_user]):
83 if published.authors.count() == 1:
84 published.authors.add(replacement_author)
85 published.authors.remove(unsubscribed_user)
86 published.save()
87
88
89 class PublishableContentManager(models.Manager):
90 """..."""
91
92 def transfer_paternity(self, unregistered_user, replacement_author, gallery_class):
93 """
94 Erases or transfers the paternity of all publishable content owned by a user. \
95 If a content has more than one author, the unregistering author simply leaves its author list, \
96 otherwise their published content are sent to ``replacement_author``, \
97 unpublished content are deleted and their beta topics closed.
98
99 :param unregistered_user: the user to be unregistered
100 :param replacement_author: the new author
101 :param gallery_class: the class to link tutorial with gallery (perhaps overkill :p)
102 """
103 for content in self.filter(authors__in=[unregistered_user]):
104 # we delete content only if not published with only one author
105 if not content.in_public() and content.authors.count() == 1:
106 if content.in_beta() and content.beta_topic:
107 beta_topic = content.beta_topic
108 beta_topic.is_locked = True
109 beta_topic.save()
110 first_post = beta_topic.first_post()
111 first_post.update_content(_(u"# Le tutoriel présenté par ce topic n'existe plus."))
112 first_post.save()
113 content.delete()
114 else:
115 if content.authors.count() == 1:
116 content.authors.add(replacement_author)
117 external_gallery = gallery_class()
118 external_gallery.user = replacement_author
119 external_gallery.gallery = content.gallery
120 external_gallery.mode = 'W'
121 external_gallery.save()
122 gallery_class.objects.filter(user=unregistered_user).filter(gallery=content.gallery).delete()
123
124 content.authors.remove(unregistered_user)
125 # we add a sentence to the content's introduction stating it was written by a former member.
126 versioned = content.load_version()
127 title = versioned.title
128 introduction = _(u'[[i]]\n|Ce contenu a été rédigé par {} qui a quitté le site.\n\n')\
129 .format(unregistered_user.username) + versioned.get_introduction()
130 conclusion = versioned.get_conclusion()
131 sha = versioned.repo_update(title, introduction, conclusion,
132 commit_message='Author unsubscribed',
133 do_commit=True, update_slug=True)
134 content.sha_draft = sha
135 content.save()
136
137 def get_last_tutorials(self):
138 """
139 This depends on settings.ZDS_APP['tutorial']['home_number'] parameter
140
141 :return: lit of last published content
142 :rtype: list
143 """
144 home_number = settings.ZDS_APP['tutorial']['home_number']
145 all_contents = self.filter(type='TUTORIAL') \
146 .filter(public_version__isnull=False) \
147 .prefetch_related('authors') \
148 .prefetch_related('authors__profile') \
149 .select_related('last_note') \
150 .select_related('public_version') \
151 .prefetch_related('subcategory') \
152 .prefetch_related('tags') \
153 .order_by('-public_version__publication_date')[:home_number]
154 published = []
155 for content in all_contents:
156 content.public_version.content = content
157 published.append(content.public_version)
158 return published
159
160 def get_last_articles(self):
161 """
162 ..attention:
163 this one uses a raw subquery for historical reasons. It will hopefully be replaced one day by an
164 ORM primitive.
165
166 :return: list of last articles expanded with 'count_note' property that prefetches number of comments
167 :rtype: list
168 """
169 sub_query = 'SELECT COUNT(*) FROM {} WHERE {}={}'.format(
170 'tutorialv2_contentreaction',
171 'tutorialv2_contentreaction.related_content_id',
172 'tutorialv2_publishedcontent.content_pk',
173 )
174 home_number = settings.ZDS_APP['article']['home_number']
175 all_contents = self.filter(type='ARTICLE') \
176 .filter(public_version__isnull=False) \
177 .prefetch_related('authors') \
178 .prefetch_related('authors__profile') \
179 .select_related('last_note') \
180 .select_related('public_version') \
181 .prefetch_related('subcategory') \
182 .prefetch_related('tags') \
183 .extra(select={'count_note': sub_query}) \
184 .order_by('-public_version__publication_date')[:home_number]
185 published = []
186 for content in all_contents:
187 content.public_version.content = content
188 published.append(content.public_version)
189 return published
190
191 def get_last_opinions(self):
192 """
193 This depends on settings.ZDS_APP['opinions']['home_number'] parameter.
194
195 :return: list of last opinions
196 :rtype: list
197 """
198 home_number = settings.ZDS_APP['opinions']['home_number']
199 all_contents = self.filter(type='OPINION') \
200 .filter(public_version__isnull=False, sha_picked=F('sha_public')) \
201 .prefetch_related('authors') \
202 .prefetch_related('authors__profile') \
203 .select_related('last_note') \
204 .select_related('public_version') \
205 .prefetch_related('subcategory') \
206 .prefetch_related('tags') \
207 .order_by('-picked_date')[:home_number]
208 published = []
209 for content in all_contents:
210 content.public_version.content = content
211 published.append(content.public_version)
212 return published
213
[end of zds/tutorialv2/managers.py]
[start of zds/tutorialv2/feeds.py]
1 # coding: utf-8
2
3 from django.contrib.syndication.views import Feed
4 from django.conf import settings
5
6 from django.utils.feedgenerator import Atom1Feed
7
8 from zds.tutorialv2.models.models_database import PublishedContent
9 from zds.settings import ZDS_APP
10
11
12 class LastContentFeedRSS(Feed):
13 """
14 RSS feed for any type of content.
15 """
16 title = u'Contenus sur {}'.format(settings.ZDS_APP['site']['litteral_name'])
17 description = u'Les derniers contenus parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])
18 link = ''
19 content_type = None
20
21 def items(self):
22 """
23 :return: The last (typically 5) contents (sorted by publication date).
24 If `self.type` is not `None`, the contents will only be of this type.
25 """
26 contents = PublishedContent.objects\
27 .prefetch_related('content')\
28 .prefetch_related('content__authors')
29
30 if self.content_type is not None:
31 contents = contents.filter(content_type=self.content_type)
32
33 return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]
34
35 def item_title(self, item):
36 return item.content.title
37
38 def item_pubdate(self, item):
39 return item.publication_date
40
41 def item_description(self, item):
42 return item.content.description
43
44 def item_author_name(self, item):
45 authors_list = item.content.authors.all()
46 authors = []
47 for authors_obj in authors_list:
48 authors.append(authors_obj.username)
49 authors = ', '.join(authors)
50 return authors
51
52 def item_link(self, item):
53 return item.get_absolute_url_online()
54
55
56 class LastContentFeedATOM(LastContentFeedRSS):
57 feed_type = Atom1Feed
58 subtitle = LastContentFeedRSS.description
59
60
61 class LastTutorialsFeedRSS(LastContentFeedRSS):
62 """
63 Redefinition of `LastContentFeedRSS` for tutorials only
64 """
65 content_type = 'TUTORIAL'
66 link = '/tutoriels/'
67 title = u'Tutoriels sur {}'.format(settings.ZDS_APP['site']['litteral_name'])
68 description = u'Les derniers tutoriels parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])
69
70
71 class LastTutorialsFeedATOM(LastTutorialsFeedRSS):
72 feed_type = Atom1Feed
73 subtitle = LastTutorialsFeedRSS.description
74
75
76 class LastArticlesFeedRSS(LastContentFeedRSS):
77 """
78 Redefinition of `LastContentFeedRSS` for articles only
79 """
80 content_type = 'ARTICLE'
81 link = '/articles/'
82 title = u'Articles sur {}'.format(settings.ZDS_APP['site']['litteral_name'])
83 description = u'Les derniers articles parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])
84
85
86 class LastArticlesFeedATOM(LastArticlesFeedRSS):
87 feed_type = Atom1Feed
88 subtitle = LastArticlesFeedRSS.description
89
90
91 class LastOpinionsFeedRSS(LastContentFeedRSS):
92 """
93 Redefinition of `LastContentFeedRSS` for opinions only
94 """
95 content_type = 'OPINION'
96 link = '/tribunes/'
97 title = u'Tribunes sur {}'.format(settings.ZDS_APP['site']['litteral_name'])
98 description = u'Les derniers billets des tribunes parus sur {}.'.format(
99 settings.ZDS_APP['site']['litteral_name'])
100
101
102 class LastOpinionsFeedATOM(LastOpinionsFeedRSS):
103 feed_type = Atom1Feed
104 subtitle = LastOpinionsFeedRSS.description
105
[end of zds/tutorialv2/feeds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/tutorialv2/feeds.py b/zds/tutorialv2/feeds.py
--- a/zds/tutorialv2/feeds.py
+++ b/zds/tutorialv2/feeds.py
@@ -23,14 +23,9 @@
:return: The last (typically 5) contents (sorted by publication date).
If `self.type` is not `None`, the contents will only be of this type.
"""
- contents = PublishedContent.objects\
- .prefetch_related('content')\
- .prefetch_related('content__authors')
+ contents = PublishedContent.objects.published_contents(self.content_type)[:ZDS_APP['content']['feed_length']]
- if self.content_type is not None:
- contents = contents.filter(content_type=self.content_type)
-
- return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]
+ return contents
def item_title(self, item):
return item.content.title
diff --git a/zds/tutorialv2/managers.py b/zds/tutorialv2/managers.py
--- a/zds/tutorialv2/managers.py
+++ b/zds/tutorialv2/managers.py
@@ -13,26 +13,37 @@
Custom published content manager.
"""
- def last_contents_of_a_member_loaded(self, author, _type=None):
+ def published_contents(self, _type=None):
"""
- Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']
+ Get contents published order by date.
- :param author:
- :param _type: subtype to filter request
:return:
:rtype: django.db.models.QuerySet
"""
-
queryset = self.prefetch_related('content') \
.prefetch_related('content__authors') \
.prefetch_related('content__subcategory') \
- .filter(content__authors__in=[author]) \
- .filter(must_redirect=False)
+ .filter(must_redirect=False) \
+ .order_by('-publication_date')
if _type:
queryset = queryset.filter(content_type=_type)
- public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']]
+ return queryset
+
+ def last_contents_of_a_member_loaded(self, author, _type=None):
+ """
+ Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']
+
+ :param author:
+ :param _type: subtype to filter request
+ :return:
+ :rtype: django.db.models.QuerySet
+ """
+ queryset = self.published_contents(_type) \
+ .filter(content__authors__in=[author])
+
+ public_contents = queryset.all()[:settings.ZDS_APP['content']['user_page_number']]
return public_contents
def last_tutorials_of_a_member_loaded(self, author):
| {"golden_diff": "diff --git a/zds/tutorialv2/feeds.py b/zds/tutorialv2/feeds.py\n--- a/zds/tutorialv2/feeds.py\n+++ b/zds/tutorialv2/feeds.py\n@@ -23,14 +23,9 @@\n :return: The last (typically 5) contents (sorted by publication date).\n If `self.type` is not `None`, the contents will only be of this type.\n \"\"\"\n- contents = PublishedContent.objects\\\n- .prefetch_related('content')\\\n- .prefetch_related('content__authors')\n+ contents = PublishedContent.objects.published_contents(self.content_type)[:ZDS_APP['content']['feed_length']]\n \n- if self.content_type is not None:\n- contents = contents.filter(content_type=self.content_type)\n-\n- return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]\n+ return contents\n \n def item_title(self, item):\n return item.content.title\ndiff --git a/zds/tutorialv2/managers.py b/zds/tutorialv2/managers.py\n--- a/zds/tutorialv2/managers.py\n+++ b/zds/tutorialv2/managers.py\n@@ -13,26 +13,37 @@\n Custom published content manager.\n \"\"\"\n \n- def last_contents_of_a_member_loaded(self, author, _type=None):\n+ def published_contents(self, _type=None):\n \"\"\"\n- Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n+ Get contents published order by date.\n \n- :param author:\n- :param _type: subtype to filter request\n :return:\n :rtype: django.db.models.QuerySet\n \"\"\"\n-\n queryset = self.prefetch_related('content') \\\n .prefetch_related('content__authors') \\\n .prefetch_related('content__subcategory') \\\n- .filter(content__authors__in=[author]) \\\n- .filter(must_redirect=False)\n+ .filter(must_redirect=False) \\\n+ .order_by('-publication_date')\n \n if _type:\n queryset = queryset.filter(content_type=_type)\n \n- public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']]\n+ return queryset\n+\n+ def last_contents_of_a_member_loaded(self, author, _type=None):\n+ \"\"\"\n+ Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n+\n+ :param author:\n+ :param _type: subtype to filter request\n+ :return:\n+ :rtype: django.db.models.QuerySet\n+ \"\"\"\n+ queryset = self.published_contents(_type) \\\n+ .filter(content__authors__in=[author])\n+\n+ public_contents = queryset.all()[:settings.ZDS_APP['content']['user_page_number']]\n return public_contents\n \n def last_tutorials_of_a_member_loaded(self, author):\n", "issue": "[v23] Renommer un contenu l'envoit deux fois dans les flux rss\nEn jetant un \u0153il aux [flux rss des billets](https://beta.zestedesavoir.com/tribunes/flux/rss/), on se rend compte que le billet `Un gros Troll de plus sur Javascript` est pr\u00e9sent 2 fois.\r\n\r\nC'est du au fait que j'ai publi\u00e9 la premi\u00e8re fois le billet avec un autre nom. A la deuxi\u00e8me publication, le nom a \u00e9t\u00e9 modifi\u00e9 et \u00e7a apparait deux fois inutilement dans les flux rss.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Count, F\n\nfrom zds.utils.models import Tag\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass PublishedContentManager(models.Manager):\n \"\"\"\n Custom published content manager.\n \"\"\"\n\n def last_contents_of_a_member_loaded(self, author, _type=None):\n \"\"\"\n Get contents published by author depends on settings.ZDS_APP['content']['user_page_number']\n\n :param author:\n :param _type: subtype to filter request\n :return:\n :rtype: django.db.models.QuerySet\n \"\"\"\n\n queryset = self.prefetch_related('content') \\\n .prefetch_related('content__authors') \\\n .prefetch_related('content__subcategory') \\\n .filter(content__authors__in=[author]) \\\n .filter(must_redirect=False)\n\n if _type:\n queryset = queryset.filter(content_type=_type)\n\n public_contents = queryset.order_by('-publication_date').all()[:settings.ZDS_APP['content']['user_page_number']]\n return public_contents\n\n def last_tutorials_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='TUTORIAL')\n\n def last_articles_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='ARTICLE')\n\n def last_opinions_of_a_member_loaded(self, author):\n return self.last_contents_of_a_member_loaded(author, _type='OPINION')\n\n def get_contents_count(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.filter(must_redirect=False) \\\n .count()\n\n def get_top_tags(self, displayed_types, limit=-1):\n \"\"\"\n Retrieve all most rated tags.\n\n :param displayed_types:\n :param limit: if ``-1``\u00a0or ``0`` => no limit. Else just takes the provided number of elements.\n :return:\n \"\"\"\n published = self.filter(\n must_redirect=False,\n content__type__in=displayed_types).values('content__tags').distinct()\n tags_pk = [tag['content__tags'] for tag in published]\n queryset = Tag.objects\\\n .filter(pk__in=tags_pk, publishablecontent__public_version__isnull=False,\n publishablecontent__type__in=displayed_types) \\\n .annotate(num_content=Count('publishablecontent')) \\\n .order_by('-num_content', 'title')\n if limit > 0:\n queryset = queryset[:limit]\n return queryset\n\n def transfer_paternity(self, unsubscribed_user, replacement_author):\n \"\"\"\n erase or transfer the paternity of all published content owned by a user.\n if a content has more than one author, the unregistering author just leave its redaction\\\n else just mark ``replacement_author`` as the new author\n\n \"\"\"\n for published in self.filter(authors__in=[unsubscribed_user]):\n if published.authors.count() == 1:\n published.authors.add(replacement_author)\n published.authors.remove(unsubscribed_user)\n published.save()\n\n\nclass PublishableContentManager(models.Manager):\n \"\"\"...\"\"\"\n\n def transfer_paternity(self, unregistered_user, replacement_author, gallery_class):\n \"\"\"\n Erases or transfers the paternity of all publishable content owned by a user. \\\n If a content has more than one author, the unregistering author simply leaves its author list, \\\n otherwise their published content are sent to ``replacement_author``, \\\n unpublished content are deleted and their beta topics closed.\n\n :param unregistered_user: the user to be unregistered\n :param replacement_author: the new author\n :param gallery_class: the class to link tutorial with gallery (perhaps overkill :p)\n \"\"\"\n for content in self.filter(authors__in=[unregistered_user]):\n # we delete content only if not published with only one author\n if not content.in_public() and content.authors.count() == 1:\n if content.in_beta() and content.beta_topic:\n beta_topic = content.beta_topic\n beta_topic.is_locked = True\n beta_topic.save()\n first_post = beta_topic.first_post()\n first_post.update_content(_(u\"# Le tutoriel pr\u00e9sent\u00e9 par ce topic n'existe plus.\"))\n first_post.save()\n content.delete()\n else:\n if content.authors.count() == 1:\n content.authors.add(replacement_author)\n external_gallery = gallery_class()\n external_gallery.user = replacement_author\n external_gallery.gallery = content.gallery\n external_gallery.mode = 'W'\n external_gallery.save()\n gallery_class.objects.filter(user=unregistered_user).filter(gallery=content.gallery).delete()\n\n content.authors.remove(unregistered_user)\n # we add a sentence to the content's introduction stating it was written by a former member.\n versioned = content.load_version()\n title = versioned.title\n introduction = _(u'[[i]]\\n|Ce contenu a \u00e9t\u00e9 r\u00e9dig\u00e9 par {} qui a quitt\u00e9 le site.\\n\\n')\\\n .format(unregistered_user.username) + versioned.get_introduction()\n conclusion = versioned.get_conclusion()\n sha = versioned.repo_update(title, introduction, conclusion,\n commit_message='Author unsubscribed',\n do_commit=True, update_slug=True)\n content.sha_draft = sha\n content.save()\n\n def get_last_tutorials(self):\n \"\"\"\n This depends on settings.ZDS_APP['tutorial']['home_number'] parameter\n\n :return: lit of last published content\n :rtype: list\n \"\"\"\n home_number = settings.ZDS_APP['tutorial']['home_number']\n all_contents = self.filter(type='TUTORIAL') \\\n .filter(public_version__isnull=False) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .order_by('-public_version__publication_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n\n def get_last_articles(self):\n \"\"\"\n ..attention:\n this one uses a raw subquery for historical reasons. It will hopefully be replaced one day by an\n ORM primitive.\n\n :return: list of last articles expanded with 'count_note' property that prefetches number of comments\n :rtype: list\n \"\"\"\n sub_query = 'SELECT COUNT(*) FROM {} WHERE {}={}'.format(\n 'tutorialv2_contentreaction',\n 'tutorialv2_contentreaction.related_content_id',\n 'tutorialv2_publishedcontent.content_pk',\n )\n home_number = settings.ZDS_APP['article']['home_number']\n all_contents = self.filter(type='ARTICLE') \\\n .filter(public_version__isnull=False) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .extra(select={'count_note': sub_query}) \\\n .order_by('-public_version__publication_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n\n def get_last_opinions(self):\n \"\"\"\n This depends on settings.ZDS_APP['opinions']['home_number'] parameter.\n\n :return: list of last opinions\n :rtype: list\n \"\"\"\n home_number = settings.ZDS_APP['opinions']['home_number']\n all_contents = self.filter(type='OPINION') \\\n .filter(public_version__isnull=False, sha_picked=F('sha_public')) \\\n .prefetch_related('authors') \\\n .prefetch_related('authors__profile') \\\n .select_related('last_note') \\\n .select_related('public_version') \\\n .prefetch_related('subcategory') \\\n .prefetch_related('tags') \\\n .order_by('-picked_date')[:home_number]\n published = []\n for content in all_contents:\n content.public_version.content = content\n published.append(content.public_version)\n return published\n", "path": "zds/tutorialv2/managers.py"}, {"content": "# coding: utf-8\n\nfrom django.contrib.syndication.views import Feed\nfrom django.conf import settings\n\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom zds.tutorialv2.models.models_database import PublishedContent\nfrom zds.settings import ZDS_APP\n\n\nclass LastContentFeedRSS(Feed):\n \"\"\"\n RSS feed for any type of content.\n \"\"\"\n title = u'Contenus sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers contenus parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n link = ''\n content_type = None\n\n def items(self):\n \"\"\"\n :return: The last (typically 5) contents (sorted by publication date).\n If `self.type` is not `None`, the contents will only be of this type.\n \"\"\"\n contents = PublishedContent.objects\\\n .prefetch_related('content')\\\n .prefetch_related('content__authors')\n\n if self.content_type is not None:\n contents = contents.filter(content_type=self.content_type)\n\n return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]\n\n def item_title(self, item):\n return item.content.title\n\n def item_pubdate(self, item):\n return item.publication_date\n\n def item_description(self, item):\n return item.content.description\n\n def item_author_name(self, item):\n authors_list = item.content.authors.all()\n authors = []\n for authors_obj in authors_list:\n authors.append(authors_obj.username)\n authors = ', '.join(authors)\n return authors\n\n def item_link(self, item):\n return item.get_absolute_url_online()\n\n\nclass LastContentFeedATOM(LastContentFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastContentFeedRSS.description\n\n\nclass LastTutorialsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for tutorials only\n \"\"\"\n content_type = 'TUTORIAL'\n link = '/tutoriels/'\n title = u'Tutoriels sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers tutoriels parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastTutorialsFeedATOM(LastTutorialsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastTutorialsFeedRSS.description\n\n\nclass LastArticlesFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for articles only\n \"\"\"\n content_type = 'ARTICLE'\n link = '/articles/'\n title = u'Articles sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers articles parus sur {}.'.format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastArticlesFeedATOM(LastArticlesFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastArticlesFeedRSS.description\n\n\nclass LastOpinionsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for opinions only\n \"\"\"\n content_type = 'OPINION'\n link = '/tribunes/'\n title = u'Tribunes sur {}'.format(settings.ZDS_APP['site']['litteral_name'])\n description = u'Les derniers billets des tribunes parus sur {}.'.format(\n settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastOpinionsFeedATOM(LastOpinionsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastOpinionsFeedRSS.description\n", "path": "zds/tutorialv2/feeds.py"}]} | 4,031 | 636 |
gh_patches_debug_41491 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9208 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Making binary tree traversals lazy.
### Feature description
Hi,
In [binary_tree_traversals.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py) file, there are different kinds of traversals such as `preorder`, `inorder`, `postorder` and etc.
Although the implementations are pretty clean one-liner like:
```python
# preorder
return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
```
It isn't memory friendly. We can use generators instead not to load all the nodes into the memory:
```python
# preorder
if not root:
return []
yield root.data
yield from preorder(root.left)
yield from preorder(root.right)
```
Shall we go ahead and change them?
</issue>
<code>
[start of data_structures/binary_tree/binary_tree_traversals.py]
1 # https://en.wikipedia.org/wiki/Tree_traversal
2 from __future__ import annotations
3
4 from collections import deque
5 from collections.abc import Sequence
6 from dataclasses import dataclass
7 from typing import Any
8
9
10 @dataclass
11 class Node:
12 data: int
13 left: Node | None = None
14 right: Node | None = None
15
16
17 def make_tree() -> Node | None:
18 r"""
19 The below tree
20 1
21 / \
22 2 3
23 / \
24 4 5
25 """
26 tree = Node(1)
27 tree.left = Node(2)
28 tree.right = Node(3)
29 tree.left.left = Node(4)
30 tree.left.right = Node(5)
31 return tree
32
33
34 def preorder(root: Node | None) -> list[int]:
35 """
36 Pre-order traversal visits root node, left subtree, right subtree.
37 >>> preorder(make_tree())
38 [1, 2, 4, 5, 3]
39 """
40 return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
41
42
43 def postorder(root: Node | None) -> list[int]:
44 """
45 Post-order traversal visits left subtree, right subtree, root node.
46 >>> postorder(make_tree())
47 [4, 5, 2, 3, 1]
48 """
49 return postorder(root.left) + postorder(root.right) + [root.data] if root else []
50
51
52 def inorder(root: Node | None) -> list[int]:
53 """
54 In-order traversal visits left subtree, root node, right subtree.
55 >>> inorder(make_tree())
56 [4, 2, 5, 1, 3]
57 """
58 return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
59
60
61 def reverse_inorder(root: Node | None) -> list[int]:
62 """
63 Reverse in-order traversal visits right subtree, root node, left subtree.
64 >>> reverse_inorder(make_tree())
65 [3, 1, 5, 2, 4]
66 """
67 return (
68 [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]
69 if root
70 else []
71 )
72
73
74 def height(root: Node | None) -> int:
75 """
76 Recursive function for calculating the height of the binary tree.
77 >>> height(None)
78 0
79 >>> height(make_tree())
80 3
81 """
82 return (max(height(root.left), height(root.right)) + 1) if root else 0
83
84
85 def level_order(root: Node | None) -> Sequence[Node | None]:
86 """
87 Returns a list of nodes value from a whole binary tree in Level Order Traverse.
88 Level Order traverse: Visit nodes of the tree level-by-level.
89 """
90 output: list[Any] = []
91
92 if root is None:
93 return output
94
95 process_queue = deque([root])
96
97 while process_queue:
98 node = process_queue.popleft()
99 output.append(node.data)
100
101 if node.left:
102 process_queue.append(node.left)
103 if node.right:
104 process_queue.append(node.right)
105 return output
106
107
108 def get_nodes_from_left_to_right(
109 root: Node | None, level: int
110 ) -> Sequence[Node | None]:
111 """
112 Returns a list of nodes value from a particular level:
113 Left to right direction of the binary tree.
114 """
115 output: list[Any] = []
116
117 def populate_output(root: Node | None, level: int) -> None:
118 if not root:
119 return
120 if level == 1:
121 output.append(root.data)
122 elif level > 1:
123 populate_output(root.left, level - 1)
124 populate_output(root.right, level - 1)
125
126 populate_output(root, level)
127 return output
128
129
130 def get_nodes_from_right_to_left(
131 root: Node | None, level: int
132 ) -> Sequence[Node | None]:
133 """
134 Returns a list of nodes value from a particular level:
135 Right to left direction of the binary tree.
136 """
137 output: list[Any] = []
138
139 def populate_output(root: Node | None, level: int) -> None:
140 if root is None:
141 return
142 if level == 1:
143 output.append(root.data)
144 elif level > 1:
145 populate_output(root.right, level - 1)
146 populate_output(root.left, level - 1)
147
148 populate_output(root, level)
149 return output
150
151
152 def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]:
153 """
154 ZigZag traverse:
155 Returns a list of nodes value from left to right and right to left, alternatively.
156 """
157 if root is None:
158 return []
159
160 output: list[Sequence[Node | None]] = []
161
162 flag = 0
163 height_tree = height(root)
164
165 for h in range(1, height_tree + 1):
166 if not flag:
167 output.append(get_nodes_from_left_to_right(root, h))
168 flag = 1
169 else:
170 output.append(get_nodes_from_right_to_left(root, h))
171 flag = 0
172
173 return output
174
175
176 def main() -> None: # Main function for testing.
177 # Create binary tree.
178 root = make_tree()
179
180 # All Traversals of the binary are as follows:
181 print(f"In-order Traversal: {inorder(root)}")
182 print(f"Reverse In-order Traversal: {reverse_inorder(root)}")
183 print(f"Pre-order Traversal: {preorder(root)}")
184 print(f"Post-order Traversal: {postorder(root)}", "\n")
185
186 print(f"Height of Tree: {height(root)}", "\n")
187
188 print("Complete Level Order Traversal: ")
189 print(level_order(root), "\n")
190
191 print("Level-wise order Traversal: ")
192
193 for level in range(1, height(root) + 1):
194 print(f"Level {level}:", get_nodes_from_left_to_right(root, level=level))
195
196 print("\nZigZag order Traversal: ")
197 print(zigzag(root))
198
199
200 if __name__ == "__main__":
201 import doctest
202
203 doctest.testmod()
204 main()
205
[end of data_structures/binary_tree/binary_tree_traversals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py
--- a/data_structures/binary_tree/binary_tree_traversals.py
+++ b/data_structures/binary_tree/binary_tree_traversals.py
@@ -1,12 +1,12 @@
-# https://en.wikipedia.org/wiki/Tree_traversal
from __future__ import annotations
from collections import deque
-from collections.abc import Sequence
+from collections.abc import Generator, Sequence
from dataclasses import dataclass
from typing import Any
+# https://en.wikipedia.org/wiki/Tree_traversal
@dataclass
class Node:
data: int
@@ -31,44 +31,56 @@
return tree
-def preorder(root: Node | None) -> list[int]:
+def preorder(root: Node | None) -> Generator[int, None, None]:
"""
Pre-order traversal visits root node, left subtree, right subtree.
- >>> preorder(make_tree())
+ >>> list(preorder(make_tree()))
[1, 2, 4, 5, 3]
"""
- return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
+ if not root:
+ return
+ yield root.data
+ yield from preorder(root.left)
+ yield from preorder(root.right)
-def postorder(root: Node | None) -> list[int]:
+def postorder(root: Node | None) -> Generator[int, None, None]:
"""
Post-order traversal visits left subtree, right subtree, root node.
- >>> postorder(make_tree())
+ >>> list(postorder(make_tree()))
[4, 5, 2, 3, 1]
"""
- return postorder(root.left) + postorder(root.right) + [root.data] if root else []
+ if not root:
+ return
+ yield from postorder(root.left)
+ yield from postorder(root.right)
+ yield root.data
-def inorder(root: Node | None) -> list[int]:
+def inorder(root: Node | None) -> Generator[int, None, None]:
"""
In-order traversal visits left subtree, root node, right subtree.
- >>> inorder(make_tree())
+ >>> list(inorder(make_tree()))
[4, 2, 5, 1, 3]
"""
- return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
+ if not root:
+ return
+ yield from inorder(root.left)
+ yield root.data
+ yield from inorder(root.right)
-def reverse_inorder(root: Node | None) -> list[int]:
+def reverse_inorder(root: Node | None) -> Generator[int, None, None]:
"""
Reverse in-order traversal visits right subtree, root node, left subtree.
- >>> reverse_inorder(make_tree())
+ >>> list(reverse_inorder(make_tree()))
[3, 1, 5, 2, 4]
"""
- return (
- [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]
- if root
- else []
- )
+ if not root:
+ return
+ yield from reverse_inorder(root.right)
+ yield root.data
+ yield from reverse_inorder(root.left)
def height(root: Node | None) -> int:
@@ -178,10 +190,10 @@
root = make_tree()
# All Traversals of the binary are as follows:
- print(f"In-order Traversal: {inorder(root)}")
- print(f"Reverse In-order Traversal: {reverse_inorder(root)}")
- print(f"Pre-order Traversal: {preorder(root)}")
- print(f"Post-order Traversal: {postorder(root)}", "\n")
+ print(f"In-order Traversal: {list(inorder(root))}")
+ print(f"Reverse In-order Traversal: {list(reverse_inorder(root))}")
+ print(f"Pre-order Traversal: {list(preorder(root))}")
+ print(f"Post-order Traversal: {list(postorder(root))}", "\n")
print(f"Height of Tree: {height(root)}", "\n")
| {"golden_diff": "diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py\n--- a/data_structures/binary_tree/binary_tree_traversals.py\n+++ b/data_structures/binary_tree/binary_tree_traversals.py\n@@ -1,12 +1,12 @@\n-# https://en.wikipedia.org/wiki/Tree_traversal\r\n from __future__ import annotations\r\n \r\n from collections import deque\r\n-from collections.abc import Sequence\r\n+from collections.abc import Generator, Sequence\r\n from dataclasses import dataclass\r\n from typing import Any\r\n \r\n \r\n+# https://en.wikipedia.org/wiki/Tree_traversal\r\n @dataclass\r\n class Node:\r\n data: int\r\n@@ -31,44 +31,56 @@\n return tree\r\n \r\n \r\n-def preorder(root: Node | None) -> list[int]:\r\n+def preorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Pre-order traversal visits root node, left subtree, right subtree.\r\n- >>> preorder(make_tree())\r\n+ >>> list(preorder(make_tree()))\r\n [1, 2, 4, 5, 3]\r\n \"\"\"\r\n- return [root.data, *preorder(root.left), *preorder(root.right)] if root else []\r\n+ if not root:\r\n+ return\r\n+ yield root.data\r\n+ yield from preorder(root.left)\r\n+ yield from preorder(root.right)\r\n \r\n \r\n-def postorder(root: Node | None) -> list[int]:\r\n+def postorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Post-order traversal visits left subtree, right subtree, root node.\r\n- >>> postorder(make_tree())\r\n+ >>> list(postorder(make_tree()))\r\n [4, 5, 2, 3, 1]\r\n \"\"\"\r\n- return postorder(root.left) + postorder(root.right) + [root.data] if root else []\r\n+ if not root:\r\n+ return\r\n+ yield from postorder(root.left)\r\n+ yield from postorder(root.right)\r\n+ yield root.data\r\n \r\n \r\n-def inorder(root: Node | None) -> list[int]:\r\n+def inorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n In-order traversal visits left subtree, root node, right subtree.\r\n- >>> inorder(make_tree())\r\n+ >>> list(inorder(make_tree()))\r\n [4, 2, 5, 1, 3]\r\n \"\"\"\r\n- return [*inorder(root.left), root.data, *inorder(root.right)] if root else []\r\n+ if not root:\r\n+ return\r\n+ yield from inorder(root.left)\r\n+ yield root.data\r\n+ yield from inorder(root.right)\r\n \r\n \r\n-def reverse_inorder(root: Node | None) -> list[int]:\r\n+def reverse_inorder(root: Node | None) -> Generator[int, None, None]:\r\n \"\"\"\r\n Reverse in-order traversal visits right subtree, root node, left subtree.\r\n- >>> reverse_inorder(make_tree())\r\n+ >>> list(reverse_inorder(make_tree()))\r\n [3, 1, 5, 2, 4]\r\n \"\"\"\r\n- return (\r\n- [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]\r\n- if root\r\n- else []\r\n- )\r\n+ if not root:\r\n+ return\r\n+ yield from reverse_inorder(root.right)\r\n+ yield root.data\r\n+ yield from reverse_inorder(root.left)\r\n \r\n \r\n def height(root: Node | None) -> int:\r\n@@ -178,10 +190,10 @@\n root = make_tree()\r\n \r\n # All Traversals of the binary are as follows:\r\n- print(f\"In-order Traversal: {inorder(root)}\")\r\n- print(f\"Reverse In-order Traversal: {reverse_inorder(root)}\")\r\n- print(f\"Pre-order Traversal: {preorder(root)}\")\r\n- print(f\"Post-order Traversal: {postorder(root)}\", \"\\n\")\r\n+ print(f\"In-order Traversal: {list(inorder(root))}\")\r\n+ print(f\"Reverse In-order Traversal: {list(reverse_inorder(root))}\")\r\n+ print(f\"Pre-order Traversal: {list(preorder(root))}\")\r\n+ print(f\"Post-order Traversal: {list(postorder(root))}\", \"\\n\")\r\n \r\n print(f\"Height of Tree: {height(root)}\", \"\\n\")\n", "issue": "Making binary tree traversals lazy.\n### Feature description\n\nHi,\r\nIn [binary_tree_traversals.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py) file, there are different kinds of traversals such as `preorder`, `inorder`, `postorder` and etc.\r\n\r\nAlthough the implementations are pretty clean one-liner like:\r\n```python\r\n# preorder\r\nreturn [root.data, *preorder(root.left), *preorder(root.right)] if root else []\r\n```\r\nIt isn't memory friendly. We can use generators instead not to load all the nodes into the memory:\r\n\r\n```python\r\n# preorder\r\n if not root:\r\n return []\r\n yield root.data\r\n yield from preorder(root.left)\r\n yield from preorder(root.right)\r\n```\r\nShall we go ahead and change them?\n", "before_files": [{"content": "# https://en.wikipedia.org/wiki/Tree_traversal\r\nfrom __future__ import annotations\r\n\r\nfrom collections import deque\r\nfrom collections.abc import Sequence\r\nfrom dataclasses import dataclass\r\nfrom typing import Any\r\n\r\n\r\n@dataclass\r\nclass Node:\r\n data: int\r\n left: Node | None = None\r\n right: Node | None = None\r\n\r\n\r\ndef make_tree() -> Node | None:\r\n r\"\"\"\r\n The below tree\r\n 1\r\n / \\\r\n 2 3\r\n / \\\r\n 4 5\r\n \"\"\"\r\n tree = Node(1)\r\n tree.left = Node(2)\r\n tree.right = Node(3)\r\n tree.left.left = Node(4)\r\n tree.left.right = Node(5)\r\n return tree\r\n\r\n\r\ndef preorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n Pre-order traversal visits root node, left subtree, right subtree.\r\n >>> preorder(make_tree())\r\n [1, 2, 4, 5, 3]\r\n \"\"\"\r\n return [root.data, *preorder(root.left), *preorder(root.right)] if root else []\r\n\r\n\r\ndef postorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n Post-order traversal visits left subtree, right subtree, root node.\r\n >>> postorder(make_tree())\r\n [4, 5, 2, 3, 1]\r\n \"\"\"\r\n return postorder(root.left) + postorder(root.right) + [root.data] if root else []\r\n\r\n\r\ndef inorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n In-order traversal visits left subtree, root node, right subtree.\r\n >>> inorder(make_tree())\r\n [4, 2, 5, 1, 3]\r\n \"\"\"\r\n return [*inorder(root.left), root.data, *inorder(root.right)] if root else []\r\n\r\n\r\ndef reverse_inorder(root: Node | None) -> list[int]:\r\n \"\"\"\r\n Reverse in-order traversal visits right subtree, root node, left subtree.\r\n >>> reverse_inorder(make_tree())\r\n [3, 1, 5, 2, 4]\r\n \"\"\"\r\n return (\r\n [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)]\r\n if root\r\n else []\r\n )\r\n\r\n\r\ndef height(root: Node | None) -> int:\r\n \"\"\"\r\n Recursive function for calculating the height of the binary tree.\r\n >>> height(None)\r\n 0\r\n >>> height(make_tree())\r\n 3\r\n \"\"\"\r\n return (max(height(root.left), height(root.right)) + 1) if root else 0\r\n\r\n\r\ndef level_order(root: Node | None) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a whole binary tree in Level Order Traverse.\r\n Level Order traverse: Visit nodes of the tree level-by-level.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n if root is None:\r\n return output\r\n\r\n process_queue = deque([root])\r\n\r\n while process_queue:\r\n node = process_queue.popleft()\r\n output.append(node.data)\r\n\r\n if node.left:\r\n process_queue.append(node.left)\r\n if node.right:\r\n process_queue.append(node.right)\r\n return output\r\n\r\n\r\ndef get_nodes_from_left_to_right(\r\n root: Node | None, level: int\r\n) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a particular level:\r\n Left to right direction of the binary tree.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n def populate_output(root: Node | None, level: int) -> None:\r\n if not root:\r\n return\r\n if level == 1:\r\n output.append(root.data)\r\n elif level > 1:\r\n populate_output(root.left, level - 1)\r\n populate_output(root.right, level - 1)\r\n\r\n populate_output(root, level)\r\n return output\r\n\r\n\r\ndef get_nodes_from_right_to_left(\r\n root: Node | None, level: int\r\n) -> Sequence[Node | None]:\r\n \"\"\"\r\n Returns a list of nodes value from a particular level:\r\n Right to left direction of the binary tree.\r\n \"\"\"\r\n output: list[Any] = []\r\n\r\n def populate_output(root: Node | None, level: int) -> None:\r\n if root is None:\r\n return\r\n if level == 1:\r\n output.append(root.data)\r\n elif level > 1:\r\n populate_output(root.right, level - 1)\r\n populate_output(root.left, level - 1)\r\n\r\n populate_output(root, level)\r\n return output\r\n\r\n\r\ndef zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]:\r\n \"\"\"\r\n ZigZag traverse:\r\n Returns a list of nodes value from left to right and right to left, alternatively.\r\n \"\"\"\r\n if root is None:\r\n return []\r\n\r\n output: list[Sequence[Node | None]] = []\r\n\r\n flag = 0\r\n height_tree = height(root)\r\n\r\n for h in range(1, height_tree + 1):\r\n if not flag:\r\n output.append(get_nodes_from_left_to_right(root, h))\r\n flag = 1\r\n else:\r\n output.append(get_nodes_from_right_to_left(root, h))\r\n flag = 0\r\n\r\n return output\r\n\r\n\r\ndef main() -> None: # Main function for testing.\r\n # Create binary tree.\r\n root = make_tree()\r\n\r\n # All Traversals of the binary are as follows:\r\n print(f\"In-order Traversal: {inorder(root)}\")\r\n print(f\"Reverse In-order Traversal: {reverse_inorder(root)}\")\r\n print(f\"Pre-order Traversal: {preorder(root)}\")\r\n print(f\"Post-order Traversal: {postorder(root)}\", \"\\n\")\r\n\r\n print(f\"Height of Tree: {height(root)}\", \"\\n\")\r\n\r\n print(\"Complete Level Order Traversal: \")\r\n print(level_order(root), \"\\n\")\r\n\r\n print(\"Level-wise order Traversal: \")\r\n\r\n for level in range(1, height(root) + 1):\r\n print(f\"Level {level}:\", get_nodes_from_left_to_right(root, level=level))\r\n\r\n print(\"\\nZigZag order Traversal: \")\r\n print(zigzag(root))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n main()\r\n", "path": "data_structures/binary_tree/binary_tree_traversals.py"}]} | 2,597 | 947 |
gh_patches_debug_29675 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Script to build each component from manifest and assemble bundle.
This script should read a manifest and output all artifacts ready for upload.
Example.
/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle
/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle
/build/opensearch-sql-1.0.0.0-rc1.zip
/build/org/opensearch... <- maven artifacts
Input: to the script should be a manifest file location - format defined here #111
Output: all required artifacts are written to ./build
- [x] Clone each component repository defined in the manifest
- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.
- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134
To make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.
</issue>
<code>
[start of tools/bundle-build/lib/component.py]
1 import os
2 import tempfile
3 import subprocess
4 from lib.git import GitRepository
5
6 class Component:
7 def __init__(self, data):
8 self._name = data['name']
9 self._repository = data['repository']
10 self._ref = data['ref']
11
12 def name(self):
13 return self._name
14
15 def repository(self):
16 return self._repository
17
18 def git_repository(self):
19 return self._git_repository
20
21 def ref(self):
22 return self._ref
23
24 def checkout(self):
25 self._git_repository = GitRepository(self.repository(), self.ref())
26
27 # script overridden in this repo
28 def custom_component_script_path(self):
29 dirname = os.path.dirname(os.path.abspath(__file__))
30 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))
31
32 # script inside the component repo
33 def component_script_path(self):
34 dirname = self.git_repository().dir()
35 return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))
36
37 # default gradle script
38 def default_script_path(self):
39 dirname = os.path.dirname(os.path.abspath(__file__))
40 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))
41
42 def build_script(self):
43 paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]
44 return next(filter(lambda path: os.path.exists(path), paths), None)
45
46 def build(self, version, arch):
47 build_script = f'{self.build_script()} {version} {arch}'
48 print(f'Running {build_script} ...')
49 self.git_repository().execute(build_script)
50
51 def artifacts_path(self):
52 dirname = self.git_repository().dir()
53 return os.path.realpath(os.path.join(dirname, 'artifacts'))
54
55 def export(self, dest):
56 artifacts_path = self.artifacts_path()
57 if os.path.exists(artifacts_path):
58 print(f'Publishing artifacts from {artifacts_path} into {dest} ...')
59 self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"')
60 else:
61 print(f'No artifacts found in {artifacts_path}, skipping.')
62
63 def dict(self):
64 return {
65 'name': self.name(),
66 'repository': self.repository(),
67 'ref': self.ref(),
68 'sha': self.git_repository().sha()
69 }
70
[end of tools/bundle-build/lib/component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py
--- a/tools/bundle-build/lib/component.py
+++ b/tools/bundle-build/lib/component.py
@@ -21,6 +21,9 @@
def ref(self):
return self._ref
+ def artifacts(self):
+ return self._artifacts
+
def checkout(self):
self._git_repository = GitRepository(self.repository(), self.ref())
@@ -57,13 +60,29 @@
if os.path.exists(artifacts_path):
print(f'Publishing artifacts from {artifacts_path} into {dest} ...')
self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"')
+ self.set_artifacts()
else:
print(f'No artifacts found in {artifacts_path}, skipping.')
+ def set_artifacts(self):
+ self._artifacts = {key: self.file_paths(key) for key in ["maven", "plugins", "bundle", "libs"] if self.file_paths(key)}
+
+ def file_paths(self, dir_name):
+ artifacts_path = self.artifacts_path()
+ sub_dir = os.path.join(artifacts_path, dir_name)
+ file_paths = []
+ if os.path.exists(sub_dir):
+ for dir, dirs, files in os.walk(sub_dir):
+ for file_name in files:
+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)
+ file_paths.append(path)
+ return file_paths
+
def dict(self):
return {
'name': self.name(),
'repository': self.repository(),
'ref': self.ref(),
- 'sha': self.git_repository().sha()
+ 'sha': self.git_repository().sha(),
+ 'artifacts': self.artifacts()
}
| {"golden_diff": "diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py\n--- a/tools/bundle-build/lib/component.py\n+++ b/tools/bundle-build/lib/component.py\n@@ -21,6 +21,9 @@\n def ref(self):\n return self._ref\n \n+ def artifacts(self):\n+ return self._artifacts\n+\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n \n@@ -57,13 +60,29 @@\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n+ self.set_artifacts()\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n \n+ def set_artifacts(self):\n+ self._artifacts = {key: self.file_paths(key) for key in [\"maven\", \"plugins\", \"bundle\", \"libs\"] if self.file_paths(key)}\n+\n+ def file_paths(self, dir_name):\n+ artifacts_path = self.artifacts_path()\n+ sub_dir = os.path.join(artifacts_path, dir_name)\n+ file_paths = []\n+ if os.path.exists(sub_dir):\n+ for dir, dirs, files in os.walk(sub_dir):\n+ for file_name in files:\n+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)\n+ file_paths.append(path)\n+ return file_paths\n+\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n- 'sha': self.git_repository().sha()\n+ 'sha': self.git_repository().sha(),\n+ 'artifacts': self.artifacts()\n }\n", "issue": "Script to build each component from manifest and assemble bundle.\nThis script should read a manifest and output all artifacts ready for upload.\r\nExample.\r\n/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle\r\n/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle\r\n/build/opensearch-sql-1.0.0.0-rc1.zip\r\n/build/org/opensearch... <- maven artifacts\r\n\r\nInput: to the script should be a manifest file location - format defined here #111 \r\nOutput: all required artifacts are written to ./build\r\n\r\n- [x] Clone each component repository defined in the manifest\r\n- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.\r\n- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134 \r\n\r\nTo make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.\r\n\n", "before_files": [{"content": "import os\nimport tempfile\nimport subprocess\nfrom lib.git import GitRepository\n\nclass Component:\n def __init__(self, data):\n self._name = data['name']\n self._repository = data['repository']\n self._ref = data['ref']\n\n def name(self):\n return self._name\n\n def repository(self):\n return self._repository\n\n def git_repository(self):\n return self._git_repository\n\n def ref(self):\n return self._ref\n\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n\n # script overridden in this repo\n def custom_component_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))\n\n # script inside the component repo\n def component_script_path(self):\n dirname = self.git_repository().dir() \n return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))\n\n # default gradle script\n def default_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))\n\n def build_script(self):\n paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]\n return next(filter(lambda path: os.path.exists(path), paths), None)\n\n def build(self, version, arch):\n build_script = f'{self.build_script()} {version} {arch}' \n print(f'Running {build_script} ...')\n self.git_repository().execute(build_script)\n\n def artifacts_path(self):\n dirname = self.git_repository().dir()\n return os.path.realpath(os.path.join(dirname, 'artifacts'))\n\n def export(self, dest):\n artifacts_path = self.artifacts_path()\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n 'sha': self.git_repository().sha()\n }\n", "path": "tools/bundle-build/lib/component.py"}]} | 1,489 | 403 |
gh_patches_debug_7646 | rasdani/github-patches | git_diff | lnbits__lnbits-194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LNURLp links give errors on WalletofSatoshi and BlueWallet
Using this LNURLp link: https://lnbits.com/lnurlp/212
BlueWallet Error: "Alert: Bad response from server"
Wallet of Satoshi Error: "Error: Could not complete payment, please try again."
</issue>
<code>
[start of lnbits/extensions/lnurlp/views_api.py]
1 from quart import g, jsonify, request
2 from http import HTTPStatus
3 from lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore
4
5 from lnbits.core.crud import get_user
6 from lnbits.decorators import api_check_wallet_key, api_validate_post_request
7 from lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis
8
9 from . import lnurlp_ext
10 from .crud import (
11 create_pay_link,
12 get_pay_link,
13 get_pay_links,
14 update_pay_link,
15 delete_pay_link,
16 )
17
18
19 @lnurlp_ext.route("/api/v1/currencies", methods=["GET"])
20 async def api_list_currencies_available():
21 return jsonify(list(currencies.keys()))
22
23
24 @lnurlp_ext.route("/api/v1/links", methods=["GET"])
25 @api_check_wallet_key("invoice")
26 async def api_links():
27 wallet_ids = [g.wallet.id]
28
29 if "all_wallets" in request.args:
30 wallet_ids = (await get_user(g.wallet.user)).wallet_ids
31
32 try:
33 return (
34 jsonify(
35 [
36 {**link._asdict(), **{"lnurl": link.lnurl}}
37 for link in await get_pay_links(wallet_ids)
38 ]
39 ),
40 HTTPStatus.OK,
41 )
42 except LnurlInvalidUrl:
43 return (
44 jsonify(
45 {
46 "message": "LNURLs need to be delivered over a publically accessible `https` domain or Tor."
47 }
48 ),
49 HTTPStatus.UPGRADE_REQUIRED,
50 )
51
52
53 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["GET"])
54 @api_check_wallet_key("invoice")
55 async def api_link_retrieve(link_id):
56 link = await get_pay_link(link_id)
57
58 if not link:
59 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND
60
61 if link.wallet != g.wallet.id:
62 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
63
64 return jsonify({**link._asdict(), **{"lnurl": link.lnurl}}), HTTPStatus.OK
65
66
67 @lnurlp_ext.route("/api/v1/links", methods=["POST"])
68 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["PUT"])
69 @api_check_wallet_key("invoice")
70 @api_validate_post_request(
71 schema={
72 "description": {"type": "string", "empty": False, "required": True},
73 "min": {"type": "number", "min": 0.01, "required": True},
74 "max": {"type": "number", "min": 0.01, "required": True},
75 "currency": {"type": "string", "nullable": True, "required": False},
76 "comment_chars": {"type": "integer", "required": True, "min": 0, "max": 800},
77 "webhook_url": {"type": "string", "required": False},
78 "success_text": {"type": "string", "required": False},
79 "success_url": {"type": "string", "required": False},
80 }
81 )
82 async def api_link_create_or_update(link_id=None):
83 if g.data["min"] > g.data["max"]:
84 return jsonify({"message": "Min is greater than max."}), HTTPStatus.BAD_REQUEST
85
86 if g.data.get("currency") == None and (
87 round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"]
88 ):
89 return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST
90
91 if link_id:
92 link = await get_pay_link(link_id)
93
94 if not link:
95 return (
96 jsonify({"message": "Pay link does not exist."}),
97 HTTPStatus.NOT_FOUND,
98 )
99
100 if link.wallet != g.wallet.id:
101 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
102
103 link = await update_pay_link(link_id, **g.data)
104 else:
105 link = await create_pay_link(wallet_id=g.wallet.id, **g.data)
106
107 return (
108 jsonify({**link._asdict(), **{"lnurl": link.lnurl}}),
109 HTTPStatus.OK if link_id else HTTPStatus.CREATED,
110 )
111
112
113 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["DELETE"])
114 @api_check_wallet_key("invoice")
115 async def api_link_delete(link_id):
116 link = await get_pay_link(link_id)
117
118 if not link:
119 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND
120
121 if link.wallet != g.wallet.id:
122 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
123
124 await delete_pay_link(link_id)
125
126 return "", HTTPStatus.NO_CONTENT
127
128
129 @lnurlp_ext.route("/api/v1/rate/<currency>", methods=["GET"])
130 async def api_check_fiat_rate(currency):
131 try:
132 rate = await get_fiat_rate_satoshis(currency)
133 except AssertionError:
134 rate = None
135
136 return jsonify({"rate": rate}), HTTPStatus.OK
137
[end of lnbits/extensions/lnurlp/views_api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py
--- a/lnbits/extensions/lnurlp/views_api.py
+++ b/lnbits/extensions/lnurlp/views_api.py
@@ -87,6 +87,9 @@
round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"]
):
return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST
+
+ if g.data["success_url"][:8] != "https://":
+ return jsonify({"message": "Success URL must be secure https://..."}), HTTPStatus.BAD_REQUEST
if link_id:
link = await get_pay_link(link_id)
| {"golden_diff": "diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py\n--- a/lnbits/extensions/lnurlp/views_api.py\n+++ b/lnbits/extensions/lnurlp/views_api.py\n@@ -87,6 +87,9 @@\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n+ \n+ if g.data[\"success_url\"][:8] != \"https://\":\n+ return jsonify({\"message\": \"Success URL must be secure https://...\"}), HTTPStatus.BAD_REQUEST\n \n if link_id:\n link = await get_pay_link(link_id)\n", "issue": "LNURLp links give errors on WalletofSatoshi and BlueWallet\nUsing this LNURLp link: https://lnbits.com/lnurlp/212\r\n\r\nBlueWallet Error: \"Alert: Bad response from server\"\r\nWallet of Satoshi Error: \"Error: Could not complete payment, please try again.\"\n", "before_files": [{"content": "from quart import g, jsonify, request\nfrom http import HTTPStatus\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n delete_pay_link,\n)\n\n\n@lnurlp_ext.route(\"/api/v1/currencies\", methods=[\"GET\"])\nasync def api_list_currencies_available():\n return jsonify(list(currencies.keys()))\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_links():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n try:\n return (\n jsonify(\n [\n {**link._asdict(), **{\"lnurl\": link.lnurl}}\n for link in await get_pay_links(wallet_ids)\n ]\n ),\n HTTPStatus.OK,\n )\n except LnurlInvalidUrl:\n return (\n jsonify(\n {\n \"message\": \"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\"\n }\n ),\n HTTPStatus.UPGRADE_REQUIRED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_retrieve(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n return jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"POST\"])\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"description\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"min\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"max\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"currency\": {\"type\": \"string\", \"nullable\": True, \"required\": False},\n \"comment_chars\": {\"type\": \"integer\", \"required\": True, \"min\": 0, \"max\": 800},\n \"webhook_url\": {\"type\": \"string\", \"required\": False},\n \"success_text\": {\"type\": \"string\", \"required\": False},\n \"success_url\": {\"type\": \"string\", \"required\": False},\n }\n)\nasync def api_link_create_or_update(link_id=None):\n if g.data[\"min\"] > g.data[\"max\"]:\n return jsonify({\"message\": \"Min is greater than max.\"}), HTTPStatus.BAD_REQUEST\n\n if g.data.get(\"currency\") == None and (\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n return (\n jsonify({\"message\": \"Pay link does not exist.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n link = await update_pay_link(link_id, **g.data)\n else:\n link = await create_pay_link(wallet_id=g.wallet.id, **g.data)\n\n return (\n jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}),\n HTTPStatus.OK if link_id else HTTPStatus.CREATED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_delete(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n await delete_pay_link(link_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n@lnurlp_ext.route(\"/api/v1/rate/<currency>\", methods=[\"GET\"])\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return jsonify({\"rate\": rate}), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/views_api.py"}]} | 2,034 | 173 |
gh_patches_debug_37208 | rasdani/github-patches | git_diff | pymedusa__Medusa-3990 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Growl Not Registering Medusa
Medusa Info: | Branch: master Commit: d6eb72d4e1019e5d1286ae6ac1d7e4b318229ebe Version: v0.2.1 Database: 44.9
Python Version: | 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:53:40) [MSC v.1500 64 bit (AMD64)]
SSL Version: | OpenSSL 1.0.2j 26 Sep 2016
OS: | Windows-7-6.1.7601-SP1
Locale: | en_GB.cp1252
**What you did: Input ip:port to register gowl
**What happened: Nothing!
**What you expected: Successful registration.
**Logs:**
```
2018-04-07 08:09:55 ERROR Thread-298 :: [d6eb72d] Exception generated: 'module' object has no attribute 'GNTPRegister'
Traceback (most recent call last):
File "C:\Medusa\Medusa\medusa\server\web\core\base.py", line 283, in async_call
result = function(**kwargs)
File "C:\Medusa\Medusa\medusa\server\web\home\handler.py", line 354, in testGrowl
result = notifiers.growl_notifier.test_notify(host, password)
File "C:\Medusa\Medusa\medusa\notifiers\growl.py", line 20, in test_notify
self._sendRegistration(host, password)
File "C:\Medusa\Medusa\medusa\notifiers\growl.py", line 172, in _sendRegistration
register = gntp.GNTPRegister()
AttributeError: 'module' object has no attribute 'GNTPRegister'
```
</issue>
<code>
[start of medusa/notifiers/growl.py]
1 # coding=utf-8
2
3 from __future__ import print_function
4 from __future__ import unicode_literals
5
6 import logging
7 import socket
8 from builtins import object
9
10 import gntp
11
12 from medusa import app, common
13 from medusa.helper.exceptions import ex
14 from medusa.logger.adapters.style import BraceAdapter
15
16 log = BraceAdapter(logging.getLogger(__name__))
17 log.logger.addHandler(logging.NullHandler())
18
19
20 class Notifier(object):
21 def test_notify(self, host, password):
22 self._sendRegistration(host, password)
23 return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,
24 force=True)
25
26 def notify_snatch(self, ep_name, is_proper):
27 if app.GROWL_NOTIFY_ONSNATCH:
28 self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name)
29
30 def notify_download(self, ep_name):
31 if app.GROWL_NOTIFY_ONDOWNLOAD:
32 self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
33
34 def notify_subtitle_download(self, ep_name, lang):
35 if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
36 self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang)
37
38 def notify_git_update(self, new_version='??'):
39 update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
40 title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
41 self._sendGrowl(title, update_text + new_version)
42
43 def notify_login(self, ipaddress=''):
44 update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
45 title = common.notifyStrings[common.NOTIFY_LOGIN]
46 self._sendGrowl(title, update_text.format(ipaddress))
47
48 def _send_growl(self, options, message=None):
49
50 # Send Notification
51 notice = gntp.GNTPNotice()
52
53 # Required
54 notice.add_header('Application-Name', options['app'])
55 notice.add_header('Notification-Name', options['name'])
56 notice.add_header('Notification-Title', options['title'])
57
58 if options['password']:
59 notice.set_password(options['password'])
60
61 # Optional
62 if options['sticky']:
63 notice.add_header('Notification-Sticky', options['sticky'])
64 if options['priority']:
65 notice.add_header('Notification-Priority', options['priority'])
66 if options['icon']:
67 notice.add_header('Notification-Icon', app.LOGO_URL)
68
69 if message:
70 notice.add_header('Notification-Text', message)
71
72 response = self._send(options['host'], options['port'], notice.encode(), options['debug'])
73 return True if isinstance(response, gntp.GNTPOK) else False
74
75 @staticmethod
76 def _send(host, port, data, debug=False):
77 if debug:
78 print('<Sending>\n', data, '\n</Sending>')
79
80 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
81 s.connect((host, port))
82 s.send(data)
83 response = gntp.parse_gntp(s.recv(1024))
84 s.close()
85
86 if debug:
87 print('<Received>\n', response, '\n</Received>')
88
89 return response
90
91 def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,
92 force=False):
93 if not app.USE_GROWL and not force:
94 return False
95
96 if name is None:
97 name = title
98
99 if host is None:
100 hostParts = app.GROWL_HOST.split(':')
101 else:
102 hostParts = host.split(':')
103
104 if len(hostParts) != 2 or hostParts[1] == '':
105 port = 23053
106 else:
107 port = int(hostParts[1])
108
109 growlHosts = [(hostParts[0], port)]
110
111 opts = {
112 'name': name,
113 'title': title,
114 'app': 'Medusa',
115 'sticky': None,
116 'priority': None,
117 'debug': False
118 }
119
120 if password is None:
121 opts['password'] = app.GROWL_PASSWORD
122 else:
123 opts['password'] = password
124
125 opts['icon'] = True
126
127 for pc in growlHosts:
128 opts['host'] = pc[0]
129 opts['port'] = pc[1]
130 log.debug(
131 u'GROWL: Sending growl to {host}:{port} - {msg!r}',
132 {'msg': message, 'host': opts['host'], 'port': opts['port']}
133 )
134 try:
135 if self._send_growl(opts, message):
136 return True
137 else:
138 if self._sendRegistration(host, password):
139 return self._send_growl(opts, message)
140 else:
141 return False
142 except Exception as error:
143 log.warning(
144 u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',
145 {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}
146 )
147 return False
148
149 def _sendRegistration(self, host=None, password=None):
150 opts = {}
151
152 if host is None:
153 hostParts = app.GROWL_HOST.split(':')
154 else:
155 hostParts = host.split(':')
156
157 if len(hostParts) != 2 or hostParts[1] == '':
158 port = 23053
159 else:
160 port = int(hostParts[1])
161
162 opts['host'] = hostParts[0]
163 opts['port'] = port
164
165 if password is None:
166 opts['password'] = app.GROWL_PASSWORD
167 else:
168 opts['password'] = password
169
170 opts['app'] = 'Medusa'
171 opts['debug'] = False
172
173 # Send Registration
174 register = gntp.GNTPRegister()
175 register.add_header('Application-Name', opts['app'])
176 register.add_header('Application-Icon', app.LOGO_URL)
177
178 register.add_notification('Test', True)
179 register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)
180 register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)
181 register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)
182
183 if opts['password']:
184 register.set_password(opts['password'])
185
186 try:
187 return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])
188 except Exception as error:
189 log.warning(
190 u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',
191 {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}
192 )
193 return False
194
[end of medusa/notifiers/growl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py
--- a/medusa/notifiers/growl.py
+++ b/medusa/notifiers/growl.py
@@ -25,7 +25,10 @@
def notify_snatch(self, ep_name, is_proper):
if app.GROWL_NOTIFY_ONSNATCH:
- self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name)
+ self._sendGrowl(
+ common.notifyStrings[
+ (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]
+ ], ep_name)
def notify_download(self, ep_name):
if app.GROWL_NOTIFY_ONDOWNLOAD:
@@ -47,16 +50,13 @@
def _send_growl(self, options, message=None):
- # Send Notification
- notice = gntp.GNTPNotice()
-
- # Required
- notice.add_header('Application-Name', options['app'])
- notice.add_header('Notification-Name', options['name'])
- notice.add_header('Notification-Title', options['title'])
-
- if options['password']:
- notice.set_password(options['password'])
+ # Initialize Notification
+ notice = gntp.core.GNTPNotice(
+ app=options['app'],
+ name=options['name'],
+ title=options['title'],
+ password=options['password'],
+ )
# Optional
if options['sticky']:
@@ -70,7 +70,7 @@
notice.add_header('Notification-Text', message)
response = self._send(options['host'], options['port'], notice.encode(), options['debug'])
- return True if isinstance(response, gntp.GNTPOK) else False
+ return True if isinstance(response, gntp.core.GNTPOK) else False
@staticmethod
def _send(host, port, data, debug=False):
@@ -80,7 +80,7 @@
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(data)
- response = gntp.parse_gntp(s.recv(1024))
+ response = gntp.core.parse_gntp(s.recv(1024))
s.close()
if debug:
@@ -171,7 +171,7 @@
opts['debug'] = False
# Send Registration
- register = gntp.GNTPRegister()
+ register = gntp.core.GNTPRegister()
register.add_header('Application-Name', opts['app'])
register.add_header('Application-Icon', app.LOGO_URL)
| {"golden_diff": "diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py\n--- a/medusa/notifiers/growl.py\n+++ b/medusa/notifiers/growl.py\n@@ -25,7 +25,10 @@\n \n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n- self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name)\n+ self._sendGrowl(\n+ common.notifyStrings[\n+ (common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]\n+ ], ep_name)\n \n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n@@ -47,16 +50,13 @@\n \n def _send_growl(self, options, message=None):\n \n- # Send Notification\n- notice = gntp.GNTPNotice()\n-\n- # Required\n- notice.add_header('Application-Name', options['app'])\n- notice.add_header('Notification-Name', options['name'])\n- notice.add_header('Notification-Title', options['title'])\n-\n- if options['password']:\n- notice.set_password(options['password'])\n+ # Initialize Notification\n+ notice = gntp.core.GNTPNotice(\n+ app=options['app'],\n+ name=options['name'],\n+ title=options['title'],\n+ password=options['password'],\n+ )\n \n # Optional\n if options['sticky']:\n@@ -70,7 +70,7 @@\n notice.add_header('Notification-Text', message)\n \n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n- return True if isinstance(response, gntp.GNTPOK) else False\n+ return True if isinstance(response, gntp.core.GNTPOK) else False\n \n @staticmethod\n def _send(host, port, data, debug=False):\n@@ -80,7 +80,7 @@\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n- response = gntp.parse_gntp(s.recv(1024))\n+ response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n \n if debug:\n@@ -171,7 +171,7 @@\n opts['debug'] = False\n \n # Send Registration\n- register = gntp.GNTPRegister()\n+ register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n", "issue": "Growl Not Registering Medusa\nMedusa Info: | Branch: master Commit: d6eb72d4e1019e5d1286ae6ac1d7e4b318229ebe Version: v0.2.1 Database: 44.9\r\nPython Version: | 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:53:40) [MSC v.1500 64 bit (AMD64)]\r\nSSL Version: | OpenSSL 1.0.2j 26 Sep 2016\r\nOS: | Windows-7-6.1.7601-SP1\r\nLocale: | en_GB.cp1252\r\n\r\n**What you did: Input ip:port to register gowl\r\n**What happened: Nothing!\r\n**What you expected: Successful registration.\r\n**Logs:**\r\n```\r\n2018-04-07 08:09:55 ERROR Thread-298 :: [d6eb72d] Exception generated: 'module' object has no attribute 'GNTPRegister'\r\nTraceback (most recent call last):\r\n File \"C:\\Medusa\\Medusa\\medusa\\server\\web\\core\\base.py\", line 283, in async_call\r\n result = function(**kwargs)\r\n File \"C:\\Medusa\\Medusa\\medusa\\server\\web\\home\\handler.py\", line 354, in testGrowl\r\n result = notifiers.growl_notifier.test_notify(host, password)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\growl.py\", line 20, in test_notify\r\n self._sendRegistration(host, password)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\growl.py\", line 172, in _sendRegistration\r\n register = gntp.GNTPRegister()\r\nAttributeError: 'module' object has no attribute 'GNTPRegister'\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, ep_name, is_proper):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]], ep_name)\n\n def notify_download(self, ep_name):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Send Notification\n notice = gntp.GNTPNotice()\n\n # Required\n notice.add_header('Application-Name', options['app'])\n notice.add_header('Notification-Name', options['name'])\n notice.add_header('Notification-Title', options['title'])\n\n if options['password']:\n notice.set_password(options['password'])\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py"}]} | 2,974 | 610 |
gh_patches_debug_37245 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2351 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider using binary search for ExplicitBucketHistogram
https://github.com/open-telemetry/opentelemetry-python/blob/dfb5c66ae310001bb40326f6286345b7fa252aba/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py#L129-L134
Could use some benchmarks, but this is likely to perform better with binary search (builtin `bisect` module) above a certain number of buckets. Also, using a list instead of `OrderedDict` may be faster
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from abc import ABC, abstractmethod
16 from collections import OrderedDict
17 from logging import getLogger
18 from math import inf
19 from threading import Lock
20 from typing import Generic, Optional, Sequence, TypeVar
21
22 from opentelemetry.sdk._metrics.measurement import Measurement
23 from opentelemetry.sdk._metrics.point import (
24 AggregationTemporality,
25 Gauge,
26 Histogram,
27 PointT,
28 Sum,
29 )
30 from opentelemetry.util._time import _time_ns
31
32 _PointVarT = TypeVar("_PointVarT", bound=PointT)
33
34 _logger = getLogger(__name__)
35
36
37 class _InstrumentMonotonicityAwareAggregation:
38 def __init__(self, instrument_is_monotonic: bool):
39 self._instrument_is_monotonic = instrument_is_monotonic
40 super().__init__()
41
42
43 class Aggregation(ABC, Generic[_PointVarT]):
44 def __init__(self):
45 self._lock = Lock()
46
47 @abstractmethod
48 def aggregate(self, measurement: Measurement) -> None:
49 pass
50
51 @abstractmethod
52 def collect(self) -> Optional[_PointVarT]:
53 pass
54
55
56 class SynchronousSumAggregation(
57 _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]
58 ):
59 def __init__(self, instrument_is_monotonic: bool):
60 super().__init__(instrument_is_monotonic)
61 self._value = 0
62 self._start_time_unix_nano = _time_ns()
63
64 def aggregate(self, measurement: Measurement) -> None:
65 with self._lock:
66 self._value = self._value + measurement.value
67
68 def collect(self) -> Optional[Sum]:
69 """
70 Atomically return a point for the current value of the metric and
71 reset the aggregation value.
72 """
73 now = _time_ns()
74
75 with self._lock:
76 value = self._value
77 start_time_unix_nano = self._start_time_unix_nano
78
79 self._value = 0
80 self._start_time_unix_nano = now + 1
81
82 return Sum(
83 aggregation_temporality=AggregationTemporality.DELTA,
84 is_monotonic=self._instrument_is_monotonic,
85 start_time_unix_nano=start_time_unix_nano,
86 time_unix_nano=now,
87 value=value,
88 )
89
90
91 class AsynchronousSumAggregation(
92 _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]
93 ):
94 def __init__(self, instrument_is_monotonic: bool):
95 super().__init__(instrument_is_monotonic)
96 self._value = None
97 self._start_time_unix_nano = _time_ns()
98
99 def aggregate(self, measurement: Measurement) -> None:
100 with self._lock:
101 self._value = measurement.value
102
103 def collect(self) -> Optional[Sum]:
104 """
105 Atomically return a point for the current value of the metric.
106 """
107 if self._value is None:
108 return None
109
110 return Sum(
111 start_time_unix_nano=self._start_time_unix_nano,
112 time_unix_nano=_time_ns(),
113 value=self._value,
114 aggregation_temporality=AggregationTemporality.CUMULATIVE,
115 is_monotonic=self._instrument_is_monotonic,
116 )
117
118
119 class LastValueAggregation(Aggregation[Gauge]):
120 def __init__(self):
121 super().__init__()
122 self._value = None
123
124 def aggregate(self, measurement: Measurement):
125 with self._lock:
126 self._value = measurement.value
127
128 def collect(self) -> Optional[Gauge]:
129 """
130 Atomically return a point for the current value of the metric.
131 """
132 if self._value is None:
133 return None
134
135 return Gauge(
136 time_unix_nano=_time_ns(),
137 value=self._value,
138 )
139
140
141 class ExplicitBucketHistogramAggregation(Aggregation[Histogram]):
142 def __init__(
143 self,
144 boundaries: Sequence[int] = (
145 0,
146 5,
147 10,
148 25,
149 50,
150 75,
151 100,
152 250,
153 500,
154 1000,
155 ),
156 record_min_max: bool = True,
157 ):
158 super().__init__()
159 self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)])
160 self._min = inf
161 self._max = -inf
162 self._sum = 0
163 self._record_min_max = record_min_max
164 self._start_time_unix_nano = _time_ns()
165 self._boundaries = boundaries
166
167 def aggregate(self, measurement: Measurement) -> None:
168
169 value = measurement.value
170
171 if self._record_min_max:
172 self._min = min(self._min, value)
173 self._max = max(self._max, value)
174
175 self._sum += value
176
177 for key in self._value.keys():
178
179 if value < key:
180 self._value[key] = self._value[key] + 1
181
182 break
183
184 def collect(self) -> Optional[Histogram]:
185 """
186 Atomically return a point for the current value of the metric.
187 """
188 now = _time_ns()
189
190 with self._lock:
191 value = self._value
192 start_time_unix_nano = self._start_time_unix_nano
193
194 self._value = OrderedDict(
195 [(key, 0) for key in (*self._boundaries, inf)]
196 )
197 self._start_time_unix_nano = now + 1
198
199 return Histogram(
200 start_time_unix_nano=start_time_unix_nano,
201 time_unix_nano=now,
202 bucket_counts=tuple(value.values()),
203 explicit_bounds=self._boundaries,
204 aggregation_temporality=AggregationTemporality.DELTA,
205 )
206
[end of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
@@ -13,7 +13,7 @@
# limitations under the License.
from abc import ABC, abstractmethod
-from collections import OrderedDict
+from bisect import bisect_left
from logging import getLogger
from math import inf
from threading import Lock
@@ -156,13 +156,14 @@
record_min_max: bool = True,
):
super().__init__()
- self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)])
+ # pylint: disable=unnecessary-comprehension
+ self._boundaries = [boundary for boundary in (*boundaries, inf)]
+ self.value = [0 for _ in range(len(self._boundaries))]
self._min = inf
self._max = -inf
self._sum = 0
self._record_min_max = record_min_max
self._start_time_unix_nano = _time_ns()
- self._boundaries = boundaries
def aggregate(self, measurement: Measurement) -> None:
@@ -174,12 +175,7 @@
self._sum += value
- for key in self._value.keys():
-
- if value < key:
- self._value[key] = self._value[key] + 1
-
- break
+ self.value[bisect_left(self._boundaries, value)] += 1
def collect(self) -> Optional[Histogram]:
"""
@@ -188,18 +184,16 @@
now = _time_ns()
with self._lock:
- value = self._value
+ value = self.value
start_time_unix_nano = self._start_time_unix_nano
- self._value = OrderedDict(
- [(key, 0) for key in (*self._boundaries, inf)]
- )
+ self.value = [0 for _ in range(len(self._boundaries))]
self._start_time_unix_nano = now + 1
return Histogram(
start_time_unix_nano=start_time_unix_nano,
time_unix_nano=now,
- bucket_counts=tuple(value.values()),
+ bucket_counts=tuple(value),
explicit_bounds=self._boundaries,
aggregation_temporality=AggregationTemporality.DELTA,
)
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n from abc import ABC, abstractmethod\n-from collections import OrderedDict\n+from bisect import bisect_left\n from logging import getLogger\n from math import inf\n from threading import Lock\n@@ -156,13 +156,14 @@\n record_min_max: bool = True,\n ):\n super().__init__()\n- self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)])\n+ # pylint: disable=unnecessary-comprehension\n+ self._boundaries = [boundary for boundary in (*boundaries, inf)]\n+ self.value = [0 for _ in range(len(self._boundaries))]\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n- self._boundaries = boundaries\n \n def aggregate(self, measurement: Measurement) -> None:\n \n@@ -174,12 +175,7 @@\n \n self._sum += value\n \n- for key in self._value.keys():\n-\n- if value < key:\n- self._value[key] = self._value[key] + 1\n-\n- break\n+ self.value[bisect_left(self._boundaries, value)] += 1\n \n def collect(self) -> Optional[Histogram]:\n \"\"\"\n@@ -188,18 +184,16 @@\n now = _time_ns()\n \n with self._lock:\n- value = self._value\n+ value = self.value\n start_time_unix_nano = self._start_time_unix_nano\n \n- self._value = OrderedDict(\n- [(key, 0) for key in (*self._boundaries, inf)]\n- )\n+ self.value = [0 for _ in range(len(self._boundaries))]\n self._start_time_unix_nano = now + 1\n \n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n- bucket_counts=tuple(value.values()),\n+ bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n )\n", "issue": "Consider using binary search for ExplicitBucketHistogram\nhttps://github.com/open-telemetry/opentelemetry-python/blob/dfb5c66ae310001bb40326f6286345b7fa252aba/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py#L129-L134\r\n\r\nCould use some benchmarks, but this is likely to perform better with binary search (builtin `bisect` module) above a certain number of buckets. Also, using a list instead of `OrderedDict` may be faster\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom collections import OrderedDict\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, Optional, Sequence, TypeVar\n\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass _InstrumentMonotonicityAwareAggregation:\n def __init__(self, instrument_is_monotonic: bool):\n self._instrument_is_monotonic = instrument_is_monotonic\n super().__init__()\n\n\nclass Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass SynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = 0\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n\nclass AsynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = None\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Sum(\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=_time_ns(),\n value=self._value,\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n )\n\n\nclass LastValueAggregation(Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=self._value,\n )\n\n\nclass ExplicitBucketHistogramAggregation(Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[int] = (\n 0,\n 5,\n 10,\n 25,\n 50,\n 75,\n 100,\n 250,\n 500,\n 1000,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n self._value = OrderedDict([(key, 0) for key in (*boundaries, inf)])\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n self._boundaries = boundaries\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n for key in self._value.keys():\n\n if value < key:\n self._value[key] = self._value[key] + 1\n\n break\n\n def collect(self) -> Optional[Histogram]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = OrderedDict(\n [(key, 0) for key in (*self._boundaries, inf)]\n )\n self._start_time_unix_nano = now + 1\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value.values()),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}]} | 2,577 | 576 |
gh_patches_debug_20567 | rasdani/github-patches | git_diff | pantsbuild__pants-13467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pants package does not build missing docker images if previous build was cached.
**Describe the bug**
Pant's caching of build targets does not take into consideration that the final target does not exist.
Take this example: https://www.pantsbuild.org/v2.8/docs/docker#example
```
$ ./pants package src/docker/hw/Dockerfile
[...]
18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex
18:07:31.83 [INFO] Completed: Building docker image helloworld:latest
18:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
helloworld latest abcdefabcdef 6 seconds ago 420MB
$ docker rmi helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
$ ./pants package src/docker/hw/Dockerfile
19:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
```
If you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.
**Pants version**
2.8rc1
**OS**
Linux
</issue>
<code>
[start of src/python/pants/backend/docker/util_rules/docker_binary.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7 from typing import Mapping
8
9 from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
10 from pants.engine.fs import Digest
11 from pants.engine.process import (
12 BinaryNotFoundError,
13 BinaryPath,
14 BinaryPathRequest,
15 BinaryPaths,
16 BinaryPathTest,
17 Process,
18 SearchPath,
19 )
20 from pants.engine.rules import Get, collect_rules, rule
21 from pants.util.logging import LogLevel
22 from pants.util.strutil import pluralize
23
24
25 class DockerBinary(BinaryPath):
26 """The `docker` binary."""
27
28 DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
29
30 def build_image(
31 self,
32 tags: tuple[str, ...],
33 digest: Digest,
34 dockerfile: str | None = None,
35 build_args: DockerBuildArgs | None = None,
36 env: Mapping[str, str] | None = None,
37 ) -> Process:
38 args = [self.path, "build"]
39
40 for tag in tags:
41 args.extend(["-t", tag])
42
43 if build_args:
44 for build_arg in build_args:
45 args.extend(["--build-arg", build_arg])
46
47 if dockerfile:
48 args.extend(["-f", dockerfile])
49
50 # Add build context root.
51 args.append(".")
52
53 return Process(
54 argv=tuple(args),
55 description=(
56 f"Building docker image {tags[0]}"
57 + (f" +{pluralize(len(tags)-1, 'additional tag')}." if len(tags) > 1 else ".")
58 ),
59 env=env,
60 input_digest=digest,
61 )
62
63 def push_image(self, tags: tuple[str, ...]) -> Process | None:
64 if not tags:
65 return None
66
67 return Process(
68 argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
69 )
70
71
72 @dataclass(frozen=True)
73 class DockerBinaryRequest:
74 search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH
75
76
77 @rule(desc="Finding the `docker` binary", level=LogLevel.DEBUG)
78 async def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:
79 request = BinaryPathRequest(
80 binary_name="docker",
81 search_path=docker_request.search_path,
82 test=BinaryPathTest(args=["-v"]),
83 )
84 paths = await Get(BinaryPaths, BinaryPathRequest, request)
85 first_path = paths.first_path
86 if not first_path:
87 raise BinaryNotFoundError.from_request(request, rationale="interact with the docker daemon")
88 return DockerBinary(first_path.path, first_path.fingerprint)
89
90
91 @rule
92 async def get_docker() -> DockerBinary:
93 return await Get(DockerBinary, DockerBinaryRequest())
94
95
96 def rules():
97 return collect_rules()
98
[end of src/python/pants/backend/docker/util_rules/docker_binary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py
--- a/src/python/pants/backend/docker/util_rules/docker_binary.py
+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py
@@ -15,6 +15,7 @@
BinaryPaths,
BinaryPathTest,
Process,
+ ProcessCacheScope,
SearchPath,
)
from pants.engine.rules import Get, collect_rules, rule
@@ -58,6 +59,7 @@
),
env=env,
input_digest=digest,
+ cache_scope=ProcessCacheScope.PER_SESSION,
)
def push_image(self, tags: tuple[str, ...]) -> Process | None:
@@ -65,7 +67,9 @@
return None
return Process(
- argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
+ argv=(self.path, "push", *tags),
+ cache_scope=ProcessCacheScope.PER_SESSION,
+ description=f"Pushing docker image {tags[0]}",
)
| {"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py\n--- a/src/python/pants/backend/docker/util_rules/docker_binary.py\n+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py\n@@ -15,6 +15,7 @@\n BinaryPaths,\n BinaryPathTest,\n Process,\n+ ProcessCacheScope,\n SearchPath,\n )\n from pants.engine.rules import Get, collect_rules, rule\n@@ -58,6 +59,7 @@\n ),\n env=env,\n input_digest=digest,\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n )\n \n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n@@ -65,7 +67,9 @@\n return None\n \n return Process(\n- argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n+ argv=(self.path, \"push\", *tags),\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n+ description=f\"Pushing docker image {tags[0]}\",\n )\n", "issue": "pants package does not build missing docker images if previous build was cached.\n**Describe the bug**\r\nPant's caching of build targets does not take into consideration that the final target does not exist.\r\n\r\nTake this example: https://www.pantsbuild.org/v2.8/docs/docker#example\r\n\r\n```\r\n$ ./pants package src/docker/hw/Dockerfile\r\n[...]\r\n18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex\r\n18:07:31.83 [INFO] Completed: Building docker image helloworld:latest\r\n18:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\nhelloworld latest abcdefabcdef 6 seconds ago 420MB\r\n\r\n$ docker rmi helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n\r\n$ ./pants package src/docker/hw/Dockerfile\r\n19:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n```\r\nIf you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.\r\n\r\n**Pants version**\r\n2.8rc1\r\n\r\n**OS**\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}]} | 1,682 | 245 |
gh_patches_debug_17413 | rasdani/github-patches | git_diff | pyodide__pyodide-3853 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proxied JS method.apply(context, list) fail while method(*list) doesn't
## 🐛 Bug
While working on [this PyScript issue](https://github.com/pyscript/pyscript/pull/1459) I've noticed that `context.method.apply(context, list)` doesn't work while `context.method(*list)` does.
I don't mind using the latter as that's also more Pythonic but that might surprise JS developers using Pyodide proxies that mimic JS APIs.
### To Reproduce
```python
import js
classList = js.document.body.classList
classList.add.apply(classList, ["a", "b"])
```
### Expected behavior
The method should be invoked with *n* arguments as by JS specs.
### Environment
- Pyodide Version<!-- (e.g. 1.8.1) -->: latest
- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: any
- Any other relevant information: nope
<!-- If you are building Pyodide by yourself, please also include these information: -->
<!--
- Commit hash of Pyodide git repository:
- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:
-->
### Additional context
Happy to have it as won't fix but at least there's a related issue that explain the *gotcha*.
</issue>
<code>
[start of docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py]
1 from sphinx.application import Sphinx
2 from sphinx.ext.intersphinx import InventoryAdapter
3
4 DATA = {
5 "js:function": {
6 "setTimeout": "API/",
7 "clearTimeout": "API/",
8 "setInterval": "API/",
9 "clearInterval": "API/",
10 "fetch": "API/",
11 "eval": "$global/",
12 "Object.fromEntries": "$global/",
13 "Reflect.ownKeys": "$global/",
14 "Array.from": "$global/",
15 "Atomics.wait": "$global/",
16 },
17 "js:class": {
18 "Array": "$global/",
19 "NodeList": "API/",
20 "HTMLCollection": "API/",
21 "Generator": "$global/",
22 "AsyncGenerator": "$global/",
23 "Date": "$global/",
24 "ArrayBuffer": "$global/",
25 "SharedArrayBuffer": "$global/",
26 "TypedArray": "$global/",
27 "TextEncoder": "$global/",
28 "TextDecoder": "$global/",
29 "DataView": "$global/",
30 "Uint8Array": "$global/",
31 "Map": "$global/",
32 "Set": "$global/",
33 # the JavaScript domain has no exception type for some reason...
34 "Error": "$global/",
35 "Function": "$global/",
36 "Promise": "$global/",
37 "FileSystemDirectoryHandle": "API/",
38 },
39 "js:method": {
40 "Iterator.next": "$reference/Iteration_protocols#next",
41 "AsyncIterator.next": "$reference/Iteration_protocols#next_2",
42 "Generator.next": "$global/",
43 "Generator.throw": "$global/",
44 "Generator.return": "$global/",
45 "AsyncGenerator.next": "$global/",
46 "AsyncGenerator.throw": "$global/",
47 "AsyncGenerator.return": "$global/",
48 "Response.clone": "API/",
49 "Response.arrayBuffer": "API/",
50 "EventTarget.addEventListener": "API/",
51 "EventTarget.removeEventListener": "API/",
52 "Promise.then": "$global/",
53 "Promise.catch": "$global/",
54 "Promise.finally": "$global/",
55 "Function.apply": "$global/",
56 "Function.bind": "$global/",
57 "Function.call": "$global/",
58 },
59 "js:data": {
60 "Iterable": "$reference/Iteration_protocols#the_iterable_protocol",
61 "IteratorResult": "$reference/Iteration_protocols#next",
62 "Iterator": "$reference/Iteration_protocols#the_iterator_protocol",
63 "AsyncIterator": "$reference/Iteration_protocols#the_async_iterator_and_async_iterable_protocols",
64 "Symbol.asyncIterator": "$global/",
65 "Symbol.iterator": "$global/",
66 "Symbol.toStringTag": "$global/",
67 "FinalizationRegistry": "$global/",
68 "globalThis": "$global/",
69 "NaN": "$global/",
70 "undefined": "$global/",
71 "BigInt": "$global/",
72 "Number": "$global/",
73 "String": "$global/",
74 "Boolean": "$global/",
75 "Object": "$global/",
76 "Number.MAX_SAFE_INTEGER": "$global/",
77 "null": "$reference/Operators/",
78 "Response": "API/",
79 "TypedArray.BYTES_PER_ELEMENT": "$global/",
80 },
81 "js:attribute": {
82 "TypedArray.byteLength": "$global/",
83 "Response.type": "API/",
84 "Response.url": "API/",
85 "Response.statusText": "API/",
86 "Response.bodyUsed": "API/",
87 "Response.ok": "API/",
88 "Response.redirected": "API/",
89 "Response.status": "API/",
90 },
91 "std:label": {"async function": "$reference/Statements/async_function"},
92 }
93
94 JSDATA = set(DATA["js:data"].keys())
95 JSDATA.update([x.lower() for x in JSDATA])
96 JSDATA.add("void")
97 JSDATA.add("any")
98 JSCLASS = set(DATA["js:class"].keys())
99
100 # Each entry is a four tuple:
101 # (project_name, project_version, url, link_text)
102 #
103 # If link_text is "-" the original name of the xref will be used as the link
104 # text which is good enough for us.
105 PROJECT_NAME = "MDN docs"
106 PROJECT_VERSION = "" # MDN docs are not really versioned
107 USE_NAME_AS_LINK_TEXT = "-"
108
109 INVDATA: dict[str, dict[str, tuple[str, str, str, str]]] = {}
110 for type, entries in DATA.items():
111 type_values = INVDATA.setdefault(type, {})
112 for key, value in entries.items():
113 value = value.replace("$reference", "JavaScript/Reference")
114 value = value.replace("$global", "JavaScript/Reference/Global_Objects")
115 if value.endswith("/"):
116 value += key.replace(".", "/")
117 url = f"https://developer.mozilla.org/en-US/docs/Web/{value}"
118 type_values[key] = (PROJECT_NAME, PROJECT_VERSION, url, USE_NAME_AS_LINK_TEXT)
119 type_values[key.lower()] = (
120 PROJECT_NAME,
121 PROJECT_VERSION,
122 url,
123 USE_NAME_AS_LINK_TEXT,
124 )
125
126 for key, url in [
127 ("void", "https://www.typescriptlang.org/docs/handbook/2/functions.html#void"),
128 ("any", "https://www.typescriptlang.org/docs/handbook/2/everyday-types.html#any"),
129 ]:
130 INVDATA["js:data"][key] = (
131 "typescript docs",
132 "",
133 url,
134 "-",
135 )
136
137 for key in ["stdin", "stdout", "stderr"]:
138 INVDATA["js:data"][f"process.{key}"] = (
139 "node docs",
140 "",
141 f"https://nodejs.org/api/process.html#process{key}",
142 "-",
143 )
144
145
146 def add_mdn_xrefs(app: Sphinx) -> None:
147 """Add cross referencing to Mozilla Developer Network documentation"""
148 inventories = InventoryAdapter(app.builder.env)
149 inventories.named_inventory["mdn"] = INVDATA
150 for type, objects in INVDATA.items():
151 inventories.main_inventory.setdefault(type, {}).update(objects)
152
153
154 __all__ = ["add_mdn_xrefs"]
155
[end of docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py
--- a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py
+++ b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py
@@ -55,6 +55,25 @@
"Function.apply": "$global/",
"Function.bind": "$global/",
"Function.call": "$global/",
+ "Array.join": "$global/",
+ "Array.slice": "$global/",
+ "Array.lastIndexOf": "$global/",
+ "Array.indexOf": "$global/",
+ "Array.forEach": "$global/",
+ "Array.map": "$global/",
+ "Array.filter": "$global/",
+ "Array.reduce": "$global/",
+ "Array.reduceRight": "$global/",
+ "Array.some": "$global/",
+ "Array.every": "$global/",
+ "Array.at": "$global/",
+ "Array.concat": "$global/",
+ "Array.includes": "$global/",
+ "Array.entries": "$global/",
+ "Array.keys": "$global/",
+ "Array.values": "$global/",
+ "Array.find": "$global/",
+ "Array.findIndex": "$global/",
},
"js:data": {
"Iterable": "$reference/Iteration_protocols#the_iterable_protocol",
| {"golden_diff": "diff --git a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py\n--- a/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py\n+++ b/docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py\n@@ -55,6 +55,25 @@\n \"Function.apply\": \"$global/\",\n \"Function.bind\": \"$global/\",\n \"Function.call\": \"$global/\",\n+ \"Array.join\": \"$global/\",\n+ \"Array.slice\": \"$global/\",\n+ \"Array.lastIndexOf\": \"$global/\",\n+ \"Array.indexOf\": \"$global/\",\n+ \"Array.forEach\": \"$global/\",\n+ \"Array.map\": \"$global/\",\n+ \"Array.filter\": \"$global/\",\n+ \"Array.reduce\": \"$global/\",\n+ \"Array.reduceRight\": \"$global/\",\n+ \"Array.some\": \"$global/\",\n+ \"Array.every\": \"$global/\",\n+ \"Array.at\": \"$global/\",\n+ \"Array.concat\": \"$global/\",\n+ \"Array.includes\": \"$global/\",\n+ \"Array.entries\": \"$global/\",\n+ \"Array.keys\": \"$global/\",\n+ \"Array.values\": \"$global/\",\n+ \"Array.find\": \"$global/\",\n+ \"Array.findIndex\": \"$global/\",\n },\n \"js:data\": {\n \"Iterable\": \"$reference/Iteration_protocols#the_iterable_protocol\",\n", "issue": "Proxied JS method.apply(context, list) fail while method(*list) doesn't\n## \ud83d\udc1b Bug\r\n\r\nWhile working on [this PyScript issue](https://github.com/pyscript/pyscript/pull/1459) I've noticed that `context.method.apply(context, list)` doesn't work while `context.method(*list)` does.\r\n\r\nI don't mind using the latter as that's also more Pythonic but that might surprise JS developers using Pyodide proxies that mimic JS APIs.\r\n\r\n### To Reproduce\r\n\r\n```python\r\nimport js\r\nclassList = js.document.body.classList\r\nclassList.add.apply(classList, [\"a\", \"b\"])\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe method should be invoked with *n* arguments as by JS specs.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: latest\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: any\r\n- Any other relevant information: nope\r\n\r\n<!-- If you are building Pyodide by yourself, please also include these information: -->\r\n\r\n<!--\r\n- Commit hash of Pyodide git repository:\r\n- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:\r\n-->\r\n\r\n### Additional context\r\n\r\nHappy to have it as won't fix but at least there's a related issue that explain the *gotcha*.\r\n\n", "before_files": [{"content": "from sphinx.application import Sphinx\nfrom sphinx.ext.intersphinx import InventoryAdapter\n\nDATA = {\n \"js:function\": {\n \"setTimeout\": \"API/\",\n \"clearTimeout\": \"API/\",\n \"setInterval\": \"API/\",\n \"clearInterval\": \"API/\",\n \"fetch\": \"API/\",\n \"eval\": \"$global/\",\n \"Object.fromEntries\": \"$global/\",\n \"Reflect.ownKeys\": \"$global/\",\n \"Array.from\": \"$global/\",\n \"Atomics.wait\": \"$global/\",\n },\n \"js:class\": {\n \"Array\": \"$global/\",\n \"NodeList\": \"API/\",\n \"HTMLCollection\": \"API/\",\n \"Generator\": \"$global/\",\n \"AsyncGenerator\": \"$global/\",\n \"Date\": \"$global/\",\n \"ArrayBuffer\": \"$global/\",\n \"SharedArrayBuffer\": \"$global/\",\n \"TypedArray\": \"$global/\",\n \"TextEncoder\": \"$global/\",\n \"TextDecoder\": \"$global/\",\n \"DataView\": \"$global/\",\n \"Uint8Array\": \"$global/\",\n \"Map\": \"$global/\",\n \"Set\": \"$global/\",\n # the JavaScript domain has no exception type for some reason...\n \"Error\": \"$global/\",\n \"Function\": \"$global/\",\n \"Promise\": \"$global/\",\n \"FileSystemDirectoryHandle\": \"API/\",\n },\n \"js:method\": {\n \"Iterator.next\": \"$reference/Iteration_protocols#next\",\n \"AsyncIterator.next\": \"$reference/Iteration_protocols#next_2\",\n \"Generator.next\": \"$global/\",\n \"Generator.throw\": \"$global/\",\n \"Generator.return\": \"$global/\",\n \"AsyncGenerator.next\": \"$global/\",\n \"AsyncGenerator.throw\": \"$global/\",\n \"AsyncGenerator.return\": \"$global/\",\n \"Response.clone\": \"API/\",\n \"Response.arrayBuffer\": \"API/\",\n \"EventTarget.addEventListener\": \"API/\",\n \"EventTarget.removeEventListener\": \"API/\",\n \"Promise.then\": \"$global/\",\n \"Promise.catch\": \"$global/\",\n \"Promise.finally\": \"$global/\",\n \"Function.apply\": \"$global/\",\n \"Function.bind\": \"$global/\",\n \"Function.call\": \"$global/\",\n },\n \"js:data\": {\n \"Iterable\": \"$reference/Iteration_protocols#the_iterable_protocol\",\n \"IteratorResult\": \"$reference/Iteration_protocols#next\",\n \"Iterator\": \"$reference/Iteration_protocols#the_iterator_protocol\",\n \"AsyncIterator\": \"$reference/Iteration_protocols#the_async_iterator_and_async_iterable_protocols\",\n \"Symbol.asyncIterator\": \"$global/\",\n \"Symbol.iterator\": \"$global/\",\n \"Symbol.toStringTag\": \"$global/\",\n \"FinalizationRegistry\": \"$global/\",\n \"globalThis\": \"$global/\",\n \"NaN\": \"$global/\",\n \"undefined\": \"$global/\",\n \"BigInt\": \"$global/\",\n \"Number\": \"$global/\",\n \"String\": \"$global/\",\n \"Boolean\": \"$global/\",\n \"Object\": \"$global/\",\n \"Number.MAX_SAFE_INTEGER\": \"$global/\",\n \"null\": \"$reference/Operators/\",\n \"Response\": \"API/\",\n \"TypedArray.BYTES_PER_ELEMENT\": \"$global/\",\n },\n \"js:attribute\": {\n \"TypedArray.byteLength\": \"$global/\",\n \"Response.type\": \"API/\",\n \"Response.url\": \"API/\",\n \"Response.statusText\": \"API/\",\n \"Response.bodyUsed\": \"API/\",\n \"Response.ok\": \"API/\",\n \"Response.redirected\": \"API/\",\n \"Response.status\": \"API/\",\n },\n \"std:label\": {\"async function\": \"$reference/Statements/async_function\"},\n}\n\nJSDATA = set(DATA[\"js:data\"].keys())\nJSDATA.update([x.lower() for x in JSDATA])\nJSDATA.add(\"void\")\nJSDATA.add(\"any\")\nJSCLASS = set(DATA[\"js:class\"].keys())\n\n# Each entry is a four tuple:\n# (project_name, project_version, url, link_text)\n#\n# If link_text is \"-\" the original name of the xref will be used as the link\n# text which is good enough for us.\nPROJECT_NAME = \"MDN docs\"\nPROJECT_VERSION = \"\" # MDN docs are not really versioned\nUSE_NAME_AS_LINK_TEXT = \"-\"\n\nINVDATA: dict[str, dict[str, tuple[str, str, str, str]]] = {}\nfor type, entries in DATA.items():\n type_values = INVDATA.setdefault(type, {})\n for key, value in entries.items():\n value = value.replace(\"$reference\", \"JavaScript/Reference\")\n value = value.replace(\"$global\", \"JavaScript/Reference/Global_Objects\")\n if value.endswith(\"/\"):\n value += key.replace(\".\", \"/\")\n url = f\"https://developer.mozilla.org/en-US/docs/Web/{value}\"\n type_values[key] = (PROJECT_NAME, PROJECT_VERSION, url, USE_NAME_AS_LINK_TEXT)\n type_values[key.lower()] = (\n PROJECT_NAME,\n PROJECT_VERSION,\n url,\n USE_NAME_AS_LINK_TEXT,\n )\n\nfor key, url in [\n (\"void\", \"https://www.typescriptlang.org/docs/handbook/2/functions.html#void\"),\n (\"any\", \"https://www.typescriptlang.org/docs/handbook/2/everyday-types.html#any\"),\n]:\n INVDATA[\"js:data\"][key] = (\n \"typescript docs\",\n \"\",\n url,\n \"-\",\n )\n\nfor key in [\"stdin\", \"stdout\", \"stderr\"]:\n INVDATA[\"js:data\"][f\"process.{key}\"] = (\n \"node docs\",\n \"\",\n f\"https://nodejs.org/api/process.html#process{key}\",\n \"-\",\n )\n\n\ndef add_mdn_xrefs(app: Sphinx) -> None:\n \"\"\"Add cross referencing to Mozilla Developer Network documentation\"\"\"\n inventories = InventoryAdapter(app.builder.env)\n inventories.named_inventory[\"mdn\"] = INVDATA\n for type, objects in INVDATA.items():\n inventories.main_inventory.setdefault(type, {}).update(objects)\n\n\n__all__ = [\"add_mdn_xrefs\"]\n", "path": "docs/sphinx_pyodide/sphinx_pyodide/mdn_xrefs.py"}]} | 2,501 | 309 |
gh_patches_debug_23656 | rasdani/github-patches | git_diff | OpenMined__PySyft-4991 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Does the framework support IPv6 networks?
Is this framework suitable for IPv6 network environment?
</issue>
<code>
[start of src/syft/grid/example_nodes/network.py]
1 """
2 The purpose of this application is to allow us to dev and test PySyft
3 functionality on an actual local network. This is NOT meant to be run in
4 production (that's the *actual* grid's job).
5
6 For example:
7 $ python src/syft/grid/example_nodes/network.py
8
9 """
10 # stdlib
11 import os
12
13 # third party
14 import flask
15 from flask import Flask
16 from flask import Response
17 from nacl.encoding import HexEncoder
18
19 # syft absolute
20 from syft.core.common.message import SignedImmediateSyftMessageWithReply
21 from syft.core.common.message import SignedImmediateSyftMessageWithoutReply
22 from syft.core.common.serde.deserialize import _deserialize
23 from syft.core.node.network.network import Network
24 from syft.grid.services.signaling_service import PullSignalingService
25 from syft.grid.services.signaling_service import PushSignalingService
26 from syft.grid.services.signaling_service import RegisterDuetPeerService
27
28 app = Flask(__name__)
29
30 network = Network(name="om-net")
31
32 network.immediate_services_without_reply.append(PushSignalingService)
33 network.immediate_services_with_reply.append(PullSignalingService)
34 network.immediate_services_with_reply.append(RegisterDuetPeerService)
35 network._register_services() # re-register all services including SignalingService
36
37
38 @app.route("/metadata")
39 def get_metadata() -> flask.Response:
40 metadata = network.get_metadata_for_client()
41 metadata_proto = metadata.serialize()
42 r = Response(
43 response=metadata_proto.SerializeToString(),
44 status=200,
45 )
46 r.headers["Content-Type"] = "application/octet-stream"
47 return r
48
49
50 @app.route("/", methods=["POST"])
51 def process_network_msgs() -> flask.Response:
52 data = flask.request.get_data()
53 obj_msg = _deserialize(blob=data, from_bytes=True)
54 if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):
55 print(
56 f"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch"
57 )
58 reply = network.recv_immediate_msg_with_reply(msg=obj_msg)
59 r = Response(response=reply.serialize(to_bytes=True), status=200)
60 r.headers["Content-Type"] = "application/octet-stream"
61 return r
62 elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):
63 print(
64 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch"
65 )
66 network.recv_immediate_msg_without_reply(msg=obj_msg)
67 r = Response(status=200)
68 return r
69 else:
70 print(
71 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch"
72 )
73 network.recv_eventual_msg_without_reply(msg=obj_msg)
74 r = Response(status=200)
75 return r
76
77
78 def run() -> None:
79 global network
80 print("====================================")
81 print("========== NODE ROOT KEY ===========")
82 print("====================================")
83 # this signing_key is to aid in local development and is not used in the real
84 # PyGrid implementation
85 PORT = os.getenv("PORT", 5000)
86 print(f"Starting Node on PORT: {PORT}")
87 print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n")
88 app.run(host="0.0.0.0", port=int(PORT)) # nosec
89
90
91 run()
92
[end of src/syft/grid/example_nodes/network.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py
--- a/src/syft/grid/example_nodes/network.py
+++ b/src/syft/grid/example_nodes/network.py
@@ -9,6 +9,7 @@
"""
# stdlib
import os
+import sys
# third party
import flask
@@ -77,15 +78,25 @@
def run() -> None:
global network
- print("====================================")
- print("========== NODE ROOT KEY ===========")
- print("====================================")
+
+ IP_MODE = os.getenv("IP_MODE", "IPV4") # default to ipv4
+ if len(sys.argv) > 1:
+ IP_MODE = sys.argv[1]
+
+ IP_MODE = "IPV6" if IP_MODE == "IPV6" else "IPV4"
# this signing_key is to aid in local development and is not used in the real
# PyGrid implementation
+ HOST = "0.0.0.0" if IP_MODE == "IPV4" else "::" # nosec
PORT = os.getenv("PORT", 5000)
- print(f"Starting Node on PORT: {PORT}")
+
+ print("====================================")
+ print("========== NODE ROOT KEY ===========")
+ print("====================================")
print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n")
- app.run(host="0.0.0.0", port=int(PORT)) # nosec
+
+ print(f"Using {IP_MODE} and listening on port {PORT}")
+
+ app.run(host=HOST, port=int(PORT))
run()
| {"golden_diff": "diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py\n--- a/src/syft/grid/example_nodes/network.py\n+++ b/src/syft/grid/example_nodes/network.py\n@@ -9,6 +9,7 @@\n \"\"\"\n # stdlib\n import os\n+import sys\n \n # third party\n import flask\n@@ -77,15 +78,25 @@\n \n def run() -> None:\n global network\n- print(\"====================================\")\n- print(\"========== NODE ROOT KEY ===========\")\n- print(\"====================================\")\n+\n+ IP_MODE = os.getenv(\"IP_MODE\", \"IPV4\") # default to ipv4\n+ if len(sys.argv) > 1:\n+ IP_MODE = sys.argv[1]\n+\n+ IP_MODE = \"IPV6\" if IP_MODE == \"IPV6\" else \"IPV4\"\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n+ HOST = \"0.0.0.0\" if IP_MODE == \"IPV4\" else \"::\" # nosec\n PORT = os.getenv(\"PORT\", 5000)\n- print(f\"Starting Node on PORT: {PORT}\")\n+\n+ print(\"====================================\")\n+ print(\"========== NODE ROOT KEY ===========\")\n+ print(\"====================================\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n- app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n+\n+ print(f\"Using {IP_MODE} and listening on port {PORT}\")\n+\n+ app.run(host=HOST, port=int(PORT))\n \n \n run()\n", "issue": "Does the framework support IPv6 networks?\nIs this framework suitable for IPv6 network environment? \n", "before_files": [{"content": "\"\"\"\nThe purpose of this application is to allow us to dev and test PySyft\nfunctionality on an actual local network. This is NOT meant to be run in\nproduction (that's the *actual* grid's job).\n\nFor example:\n$ python src/syft/grid/example_nodes/network.py\n\n\"\"\"\n# stdlib\nimport os\n\n# third party\nimport flask\nfrom flask import Flask\nfrom flask import Response\nfrom nacl.encoding import HexEncoder\n\n# syft absolute\nfrom syft.core.common.message import SignedImmediateSyftMessageWithReply\nfrom syft.core.common.message import SignedImmediateSyftMessageWithoutReply\nfrom syft.core.common.serde.deserialize import _deserialize\nfrom syft.core.node.network.network import Network\nfrom syft.grid.services.signaling_service import PullSignalingService\nfrom syft.grid.services.signaling_service import PushSignalingService\nfrom syft.grid.services.signaling_service import RegisterDuetPeerService\n\napp = Flask(__name__)\n\nnetwork = Network(name=\"om-net\")\n\nnetwork.immediate_services_without_reply.append(PushSignalingService)\nnetwork.immediate_services_with_reply.append(PullSignalingService)\nnetwork.immediate_services_with_reply.append(RegisterDuetPeerService)\nnetwork._register_services() # re-register all services including SignalingService\n\n\[email protected](\"/metadata\")\ndef get_metadata() -> flask.Response:\n metadata = network.get_metadata_for_client()\n metadata_proto = metadata.serialize()\n r = Response(\n response=metadata_proto.SerializeToString(),\n status=200,\n )\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef process_network_msgs() -> flask.Response:\n data = flask.request.get_data()\n obj_msg = _deserialize(blob=data, from_bytes=True)\n if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch\"\n )\n reply = network.recv_immediate_msg_with_reply(msg=obj_msg)\n r = Response(response=reply.serialize(to_bytes=True), status=200)\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_immediate_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n else:\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_eventual_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n\n\ndef run() -> None:\n global network\n print(\"====================================\")\n print(\"========== NODE ROOT KEY ===========\")\n print(\"====================================\")\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n PORT = os.getenv(\"PORT\", 5000)\n print(f\"Starting Node on PORT: {PORT}\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n\n\nrun()\n", "path": "src/syft/grid/example_nodes/network.py"}]} | 1,460 | 384 |
gh_patches_debug_19544 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🪲 [CI] MyPy test failure
### Thank you for taking the time to report a problem with Opteryx.
_To help us to respond to your request we ask that you try to provide the below detail about the bug._
**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._
**Expected behaviour** _A clear and concise description of what you expected to happen._
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
~~~sql
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
</issue>
<code>
[start of opteryx/planner/views/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import orjson
14
15 from opteryx.managers.expression import NodeType
16 from opteryx.third_party.travers import Graph
17
18
19 def _load_views():
20 try:
21 with open("views.json", "rb") as defs:
22 return orjson.loads(defs.read())
23 except Exception as err:
24 print(f"[OPTERYX] Unable to open views definition file. {err}")
25 return {}
26
27
28 VIEWS = _load_views()
29
30
31 def is_view(view_name: str) -> bool:
32 return view_name in VIEWS
33
34
35 def view_as_plan(view_name: str) -> Graph:
36 from opteryx.planner.logical_planner import do_logical_planning_phase
37 from opteryx.third_party import sqloxide
38 from opteryx.utils.sql import clean_statement
39 from opteryx.utils.sql import remove_comments
40
41 operation = VIEWS.get(view_name)["statement"]
42
43 clean_sql = clean_statement(remove_comments(operation))
44 parsed_statements = sqloxide.parse_sql(clean_sql, dialect="mysql")
45 logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))
46
47 return logical_plan
48
[end of opteryx/planner/views/__init__.py]
[start of opteryx/__version__.py]
1 __build__ = 522
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 16
31 _revision = 0
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
[end of opteryx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 522
+__build__ = 523
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py
--- a/opteryx/planner/views/__init__.py
+++ b/opteryx/planner/views/__init__.py
@@ -12,8 +12,7 @@
import orjson
-from opteryx.managers.expression import NodeType
-from opteryx.third_party.travers import Graph
+from opteryx.planner.logical_planner import LogicalPlan
def _load_views():
@@ -32,7 +31,7 @@
return view_name in VIEWS
-def view_as_plan(view_name: str) -> Graph:
+def view_as_plan(view_name: str) -> LogicalPlan:
from opteryx.planner.logical_planner import do_logical_planning_phase
from opteryx.third_party import sqloxide
from opteryx.utils.sql import clean_statement
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 522\n+__build__ = 523\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py\n--- a/opteryx/planner/views/__init__.py\n+++ b/opteryx/planner/views/__init__.py\n@@ -12,8 +12,7 @@\n \n import orjson\n \n-from opteryx.managers.expression import NodeType\n-from opteryx.third_party.travers import Graph\n+from opteryx.planner.logical_planner import LogicalPlan\n \n \n def _load_views():\n@@ -32,7 +31,7 @@\n return view_name in VIEWS\n \n \n-def view_as_plan(view_name: str) -> Graph:\n+def view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n", "issue": "\ud83e\udeb2 [CI] MyPy test failure\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.travers import Graph\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> Graph:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 522\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 1,535 | 300 |
gh_patches_debug_37940 | rasdani/github-patches | git_diff | deepset-ai__haystack-6753 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feat: Add split by `page` to `DocumentSplitter`
**Is your feature request related to a problem? Please describe.**
There are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility.
**Describe the solution you'd like**
Add a new `split_by` value of `page` that would split on `"\f"`.
**Describe alternatives you've considered**
Split up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this.
</issue>
<code>
[start of haystack/components/preprocessors/document_splitter.py]
1 from copy import deepcopy
2 from typing import List, Literal
3
4 from more_itertools import windowed
5
6 from haystack import component, Document
7
8
9 @component
10 class DocumentSplitter:
11 """
12 Splits a list of text documents into a list of text documents with shorter texts.
13 This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.
14 """
15
16 def __init__(
17 self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0
18 ):
19 """
20 :param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ",
21 "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n".
22 :param split_length: The maximum number of units in each split.
23 :param split_overlap: The number of units that each split should overlap.
24 """
25
26 self.split_by = split_by
27 if split_by not in ["word", "sentence", "passage"]:
28 raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.")
29 if split_length <= 0:
30 raise ValueError("split_length must be greater than 0.")
31 self.split_length = split_length
32 if split_overlap < 0:
33 raise ValueError("split_overlap must be greater than or equal to 0.")
34 self.split_overlap = split_overlap
35
36 @component.output_types(documents=List[Document])
37 def run(self, documents: List[Document]):
38 """
39 Splits the documents by split_by after split_length units with an overlap of split_overlap units.
40 Returns a list of documents with the split texts.
41 A metadata field "source_id" is added to each document to keep track of the original document that was split.
42 Other metadata are copied from the original document.
43 :param documents: The documents to split.
44 :return: A list of documents with the split texts.
45 """
46
47 if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
48 raise TypeError("DocumentSplitter expects a List of Documents as input.")
49
50 split_docs = []
51 for doc in documents:
52 if doc.content is None:
53 raise ValueError(
54 f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None."
55 )
56 units = self._split_into_units(doc.content, self.split_by)
57 text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)
58 metadata = deepcopy(doc.meta)
59 metadata["source_id"] = doc.id
60 split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
61 return {"documents": split_docs}
62
63 def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]:
64 if split_by == "passage":
65 split_at = "\n\n"
66 elif split_by == "sentence":
67 split_at = "."
68 elif split_by == "word":
69 split_at = " "
70 else:
71 raise NotImplementedError(
72 "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options."
73 )
74 units = text.split(split_at)
75 # Add the delimiter back to all units except the last one
76 for i in range(len(units) - 1):
77 units[i] += split_at
78 return units
79
80 def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:
81 """
82 Concatenates the elements into parts of split_length units.
83 """
84 text_splits = []
85 segments = windowed(elements, n=split_length, step=split_length - split_overlap)
86 for seg in segments:
87 current_units = [unit for unit in seg if unit is not None]
88 txt = "".join(current_units)
89 if len(txt) > 0:
90 text_splits.append(txt)
91 return text_splits
92
[end of haystack/components/preprocessors/document_splitter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py
--- a/haystack/components/preprocessors/document_splitter.py
+++ b/haystack/components/preprocessors/document_splitter.py
@@ -14,18 +14,21 @@
"""
def __init__(
- self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0
+ self,
+ split_by: Literal["word", "sentence", "page", "passage"] = "word",
+ split_length: int = 200,
+ split_overlap: int = 0,
):
"""
:param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ",
- "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n".
+ "sentence" for splitting by ".", "page" for splitting by "\f" or "passage" for splitting by "\\n\\n".
:param split_length: The maximum number of units in each split.
:param split_overlap: The number of units that each split should overlap.
"""
self.split_by = split_by
- if split_by not in ["word", "sentence", "passage"]:
- raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.")
+ if split_by not in ["word", "sentence", "page", "passage"]:
+ raise ValueError("split_by must be one of 'word', 'sentence', 'page' or 'passage'.")
if split_length <= 0:
raise ValueError("split_length must be greater than 0.")
self.split_length = split_length
@@ -60,8 +63,10 @@
split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
return {"documents": split_docs}
- def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]:
- if split_by == "passage":
+ def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]:
+ if split_by == "page":
+ split_at = "\f"
+ elif split_by == "passage":
split_at = "\n\n"
elif split_by == "sentence":
split_at = "."
@@ -69,7 +74,7 @@
split_at = " "
else:
raise NotImplementedError(
- "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options."
+ "DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options."
)
units = text.split(split_at)
# Add the delimiter back to all units except the last one
| {"golden_diff": "diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py\n--- a/haystack/components/preprocessors/document_splitter.py\n+++ b/haystack/components/preprocessors/document_splitter.py\n@@ -14,18 +14,21 @@\n \"\"\"\n \n def __init__(\n- self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n+ self,\n+ split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n+ split_length: int = 200,\n+ split_overlap: int = 0,\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n- \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n+ \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n \n self.split_by = split_by\n- if split_by not in [\"word\", \"sentence\", \"passage\"]:\n- raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n+ if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n+ raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n@@ -60,8 +63,10 @@\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n \n- def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n- if split_by == \"passage\":\n+ def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n+ if split_by == \"page\":\n+ split_at = \"\\f\"\n+ elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n@@ -69,7 +74,7 @@\n split_at = \" \"\n else:\n raise NotImplementedError(\n- \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n+ \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n", "issue": "feat: Add split by `page` to `DocumentSplitter`\n**Is your feature request related to a problem? Please describe.**\r\nThere are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility. \r\n\r\n**Describe the solution you'd like**\r\nAdd a new `split_by` value of `page` that would split on `\"\\f\"`.\r\n\r\n**Describe alternatives you've considered**\r\nSplit up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this. \r\n\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import component, Document\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.\n \"\"\"\n\n def __init__(\n self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Splits the documents by split_by after split_length units with an overlap of split_overlap units.\n Returns a list of documents with the split texts.\n A metadata field \"source_id\" is added to each document to keep track of the original document that was split.\n Other metadata are copied from the original document.\n :param documents: The documents to split.\n :return: A list of documents with the split texts.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n if split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}]} | 1,799 | 667 |
gh_patches_debug_1503 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-11075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build: support Ruby under `build.tools`
We should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.
Work required:
- [x] Update the documentation
- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images
- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)
- [x] Update `settings.py` to add this tool and version
- [x] Update config v2 to accept this value
- [x] Create a branch on `test-builds` for this use case
> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462
</issue>
<code>
[start of readthedocs/builds/constants_docker.py]
1 """
2 Define constants here to allow import them without any external dependency.
3
4 There are situations where we want to have access to these values without Django installed
5 (e.g. common/dockerfiles/tasks.py)
6
7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
8 """
9
10 DOCKER_DEFAULT_IMAGE = "readthedocs/build"
11
12 # Adding a new tool/version to this setting requires:
13 #
14 # - a mapping between the expected version in the config file, to the full
15 # version installed via asdf (found via ``asdf list all <tool>``)
16 #
17 # - running the script ``./scripts/compile_version_upload.sh`` in
18 # development and production environments to compile and cache the new
19 # tool/version
20 #
21 # Note that when updating this options, you should also update the file:
22 # readthedocs/rtd_tests/fixtures/spec/v2/schema.json
23 RTD_DOCKER_BUILD_SETTINGS = {
24 # Mapping of build.os options to docker image.
25 "os": {
26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
28 },
29 # Mapping of build.tools options to specific versions.
30 "tools": {
31 "python": {
32 "2.7": "2.7.18",
33 "3.6": "3.6.15",
34 "3.7": "3.7.17",
35 "3.8": "3.8.18",
36 "3.9": "3.9.18",
37 "3.10": "3.10.13",
38 "3.11": "3.11.6",
39 "3.12": "3.12.0",
40 # Always point to the latest stable release.
41 "3": "3.12.0",
42 "miniconda3-4.7": "miniconda3-4.7.12",
43 "mambaforge-4.10": "mambaforge-4.10.3-10",
44 "mambaforge-22.9": "mambaforge-22.9.0-3",
45 },
46 "nodejs": {
47 "14": "14.20.1",
48 "16": "16.18.1",
49 "18": "18.16.1", # LTS
50 "19": "19.0.1",
51 "20": "20.3.1",
52 },
53 "rust": {
54 "1.55": "1.55.0",
55 "1.61": "1.61.0",
56 "1.64": "1.64.0",
57 "1.70": "1.70.0",
58 },
59 "golang": {
60 "1.17": "1.17.13",
61 "1.18": "1.18.10",
62 "1.19": "1.19.10",
63 "1.20": "1.20.5",
64 },
65 },
66 }
67
[end of readthedocs/builds/constants_docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -50,6 +50,9 @@
"19": "19.0.1",
"20": "20.3.1",
},
+ "ruby": {
+ "3.3": "3.3.0",
+ },
"rust": {
"1.55": "1.55.0",
"1.61": "1.61.0",
| {"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -50,6 +50,9 @@\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n+ \"ruby\": {\n+ \"3.3\": \"3.3.0\",\n+ },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n", "issue": "Build: support Ruby under `build.tools` \nWe should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.\r\n\r\nWork required:\r\n\r\n- [x] Update the documentation\r\n- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images\r\n- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)\r\n- [x] Update `settings.py` to add this tool and version\r\n- [x] Update config v2 to accept this value\r\n- [x] Create a branch on `test-builds` for this use case\r\n\r\n\r\n> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462\n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}]} | 1,627 | 146 |
gh_patches_debug_18744 | rasdani/github-patches | git_diff | carpentries__amy-2381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add filter for default training requests view
The [training requests view](https://amy.carpentries.org/requests/training_requests/) returns all training requests (currently over 7000 results). This makes the page load very slowly (possibly related to #2314).
Default search results should be:
* State: Pending or accepted
* Is Matched: Unmatched
We should then be able to adjust search as we currently do.
Edit to note in effect, the link to Training requests should go [here](https://amy.carpentries.org/requests/training_requests/?search=&group_name=&state=no_d&matched=u&affiliation=&location=&order_by=) instead of [here](https://amy.carpentries.org/requests/training_requests/).
</issue>
<code>
[start of amy/extrequests/filters.py]
1 import re
2
3 from django.db.models import Q
4 from django.forms import widgets
5 import django_filters
6
7 from extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest
8 from workshops.fields import Select2Widget
9 from workshops.filters import (
10 AllCountriesFilter,
11 AMYFilterSet,
12 ContinentFilter,
13 ForeignKeyAllValuesFilter,
14 NamesOrderingFilter,
15 StateFilterSet,
16 )
17 from workshops.models import Curriculum, Person, TrainingRequest, WorkshopRequest
18
19 # ------------------------------------------------------------
20 # TrainingRequest related filter and filter methods
21 # ------------------------------------------------------------
22
23
24 class TrainingRequestFilter(AMYFilterSet):
25 search = django_filters.CharFilter(
26 label="Name or Email",
27 method="filter_by_person",
28 )
29
30 group_name = django_filters.CharFilter(
31 field_name="group_name", lookup_expr="icontains", label="Group"
32 )
33
34 state = django_filters.ChoiceFilter(
35 label="State",
36 choices=(("no_d", "Pending or accepted"),) + TrainingRequest.STATE_CHOICES,
37 method="filter_training_requests_by_state",
38 )
39
40 matched = django_filters.ChoiceFilter(
41 label="Is Matched?",
42 choices=(
43 ("", "Unknown"),
44 ("u", "Unmatched"),
45 ("p", "Matched trainee, unmatched training"),
46 ("t", "Matched trainee and training"),
47 ),
48 method="filter_matched",
49 )
50
51 nonnull_manual_score = django_filters.BooleanFilter(
52 label="Manual score applied",
53 method="filter_non_null_manual_score",
54 widget=widgets.CheckboxInput,
55 )
56
57 affiliation = django_filters.CharFilter(
58 method="filter_affiliation",
59 )
60
61 location = django_filters.CharFilter(lookup_expr="icontains")
62
63 order_by = NamesOrderingFilter(
64 fields=(
65 "created_at",
66 "score_total",
67 ),
68 )
69
70 class Meta:
71 model = TrainingRequest
72 fields = [
73 "search",
74 "group_name",
75 "state",
76 "matched",
77 "affiliation",
78 "location",
79 ]
80
81 def filter_matched(self, queryset, name, choice):
82 if choice == "":
83 return queryset
84 elif choice == "u": # unmatched
85 return queryset.filter(person=None)
86 elif choice == "p": # matched trainee, unmatched training
87 return (
88 queryset.filter(person__isnull=False)
89 .exclude(
90 person__task__role__name="learner",
91 person__task__event__tags__name="TTT",
92 )
93 .distinct()
94 )
95 else: # choice == 't' <==> matched trainee and training
96 return queryset.filter(
97 person__task__role__name="learner",
98 person__task__event__tags__name="TTT",
99 ).distinct()
100
101 def filter_by_person(self, queryset, name, value):
102 if value == "":
103 return queryset
104 else:
105 # 'Harry Potter' -> ['Harry', 'Potter']
106 tokens = re.split(r"\s+", value)
107 # Each token must match email address or github username or
108 # personal, or family name.
109 for token in tokens:
110 queryset = queryset.filter(
111 Q(personal__icontains=token)
112 | Q(middle__icontains=token)
113 | Q(family__icontains=token)
114 | Q(email__icontains=token)
115 | Q(person__personal__icontains=token)
116 | Q(person__middle__icontains=token)
117 | Q(person__family__icontains=token)
118 | Q(person__email__icontains=token)
119 )
120 return queryset
121
122 def filter_affiliation(self, queryset, name, affiliation):
123 if affiliation == "":
124 return queryset
125 else:
126 q = Q(affiliation__icontains=affiliation) | Q(
127 person__affiliation__icontains=affiliation
128 )
129 return queryset.filter(q).distinct()
130
131 def filter_training_requests_by_state(self, queryset, name, choice):
132 if choice == "no_d":
133 return queryset.exclude(state="d")
134 else:
135 return queryset.filter(state=choice)
136
137 def filter_non_null_manual_score(self, queryset, name, manual_score):
138 if manual_score:
139 return queryset.filter(score_manual__isnull=False)
140 return queryset
141
142
143 # ------------------------------------------------------------
144 # WorkshopRequest related filter and filter methods
145 # ------------------------------------------------------------
146
147
148 class WorkshopRequestFilter(AMYFilterSet, StateFilterSet):
149 assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
150 country = AllCountriesFilter(widget=Select2Widget)
151 continent = ContinentFilter(widget=Select2Widget, label="Continent")
152 requested_workshop_types = django_filters.ModelMultipleChoiceFilter(
153 label="Requested workshop types",
154 queryset=Curriculum.objects.all(),
155 widget=widgets.CheckboxSelectMultiple(),
156 )
157
158 order_by = django_filters.OrderingFilter(
159 fields=("created_at",),
160 )
161
162 class Meta:
163 model = WorkshopRequest
164 fields = [
165 "state",
166 "assigned_to",
167 "requested_workshop_types",
168 "country",
169 ]
170
171
172 # ------------------------------------------------------------
173 # WorkshopInquiryRequest related filter and filter methods
174 # ------------------------------------------------------------
175
176
177 class WorkshopInquiryFilter(AMYFilterSet, StateFilterSet):
178 assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
179 country = AllCountriesFilter(widget=Select2Widget)
180 continent = ContinentFilter(widget=Select2Widget, label="Continent")
181 requested_workshop_types = django_filters.ModelMultipleChoiceFilter(
182 label="Requested workshop types",
183 queryset=Curriculum.objects.all(),
184 widget=widgets.CheckboxSelectMultiple(),
185 )
186
187 order_by = django_filters.OrderingFilter(
188 fields=("created_at",),
189 )
190
191 class Meta:
192 model = WorkshopInquiryRequest
193 fields = [
194 "state",
195 "assigned_to",
196 "requested_workshop_types",
197 "country",
198 ]
199
200
201 # ------------------------------------------------------------
202 # SelfOrganisedSubmission related filter and filter methods
203 # ------------------------------------------------------------
204
205
206 class SelfOrganisedSubmissionFilter(AMYFilterSet, StateFilterSet):
207 assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
208 country = AllCountriesFilter(widget=Select2Widget)
209 continent = ContinentFilter(widget=Select2Widget, label="Continent")
210 workshop_types = django_filters.ModelMultipleChoiceFilter(
211 label="Requested workshop types",
212 queryset=Curriculum.objects.all(),
213 widget=widgets.CheckboxSelectMultiple(),
214 )
215
216 order_by = django_filters.OrderingFilter(
217 fields=("created_at",),
218 )
219
220 class Meta:
221 model = SelfOrganisedSubmission
222 fields = [
223 "state",
224 "assigned_to",
225 "workshop_types",
226 "workshop_format",
227 ]
228
[end of amy/extrequests/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/extrequests/filters.py b/amy/extrequests/filters.py
--- a/amy/extrequests/filters.py
+++ b/amy/extrequests/filters.py
@@ -2,6 +2,7 @@
from django.db.models import Q
from django.forms import widgets
+from django.http import QueryDict
import django_filters
from extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest
@@ -22,6 +23,16 @@
class TrainingRequestFilter(AMYFilterSet):
+ def __init__(self, data=None, *args, **kwargs):
+ # If no filters are set, use some default settings.
+ # This avoids handling the full list of training requests
+ # client-side unless the user deliberately chooses to do so.
+ # See https://github.com/carpentries/amy/issues/2314
+ if not data:
+ data = QueryDict("state=no_d&matched=u")
+
+ super().__init__(data, *args, **kwargs)
+
search = django_filters.CharFilter(
label="Name or Email",
method="filter_by_person",
| {"golden_diff": "diff --git a/amy/extrequests/filters.py b/amy/extrequests/filters.py\n--- a/amy/extrequests/filters.py\n+++ b/amy/extrequests/filters.py\n@@ -2,6 +2,7 @@\n \n from django.db.models import Q\n from django.forms import widgets\n+from django.http import QueryDict\n import django_filters\n \n from extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest\n@@ -22,6 +23,16 @@\n \n \n class TrainingRequestFilter(AMYFilterSet):\n+ def __init__(self, data=None, *args, **kwargs):\n+ # If no filters are set, use some default settings.\n+ # This avoids handling the full list of training requests\n+ # client-side unless the user deliberately chooses to do so.\n+ # See https://github.com/carpentries/amy/issues/2314\n+ if not data:\n+ data = QueryDict(\"state=no_d&matched=u\")\n+\n+ super().__init__(data, *args, **kwargs)\n+\n search = django_filters.CharFilter(\n label=\"Name or Email\",\n method=\"filter_by_person\",\n", "issue": "Add filter for default training requests view \nThe [training requests view](https://amy.carpentries.org/requests/training_requests/) returns all training requests (currently over 7000 results). This makes the page load very slowly (possibly related to #2314).\r\n\r\nDefault search results should be:\r\n* State: Pending or accepted\r\n* Is Matched: Unmatched \r\n\r\nWe should then be able to adjust search as we currently do.\r\n\r\nEdit to note in effect, the link to Training requests should go [here](https://amy.carpentries.org/requests/training_requests/?search=&group_name=&state=no_d&matched=u&affiliation=&location=&order_by=) instead of [here](https://amy.carpentries.org/requests/training_requests/).\n", "before_files": [{"content": "import re\n\nfrom django.db.models import Q\nfrom django.forms import widgets\nimport django_filters\n\nfrom extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest\nfrom workshops.fields import Select2Widget\nfrom workshops.filters import (\n AllCountriesFilter,\n AMYFilterSet,\n ContinentFilter,\n ForeignKeyAllValuesFilter,\n NamesOrderingFilter,\n StateFilterSet,\n)\nfrom workshops.models import Curriculum, Person, TrainingRequest, WorkshopRequest\n\n# ------------------------------------------------------------\n# TrainingRequest related filter and filter methods\n# ------------------------------------------------------------\n\n\nclass TrainingRequestFilter(AMYFilterSet):\n search = django_filters.CharFilter(\n label=\"Name or Email\",\n method=\"filter_by_person\",\n )\n\n group_name = django_filters.CharFilter(\n field_name=\"group_name\", lookup_expr=\"icontains\", label=\"Group\"\n )\n\n state = django_filters.ChoiceFilter(\n label=\"State\",\n choices=((\"no_d\", \"Pending or accepted\"),) + TrainingRequest.STATE_CHOICES,\n method=\"filter_training_requests_by_state\",\n )\n\n matched = django_filters.ChoiceFilter(\n label=\"Is Matched?\",\n choices=(\n (\"\", \"Unknown\"),\n (\"u\", \"Unmatched\"),\n (\"p\", \"Matched trainee, unmatched training\"),\n (\"t\", \"Matched trainee and training\"),\n ),\n method=\"filter_matched\",\n )\n\n nonnull_manual_score = django_filters.BooleanFilter(\n label=\"Manual score applied\",\n method=\"filter_non_null_manual_score\",\n widget=widgets.CheckboxInput,\n )\n\n affiliation = django_filters.CharFilter(\n method=\"filter_affiliation\",\n )\n\n location = django_filters.CharFilter(lookup_expr=\"icontains\")\n\n order_by = NamesOrderingFilter(\n fields=(\n \"created_at\",\n \"score_total\",\n ),\n )\n\n class Meta:\n model = TrainingRequest\n fields = [\n \"search\",\n \"group_name\",\n \"state\",\n \"matched\",\n \"affiliation\",\n \"location\",\n ]\n\n def filter_matched(self, queryset, name, choice):\n if choice == \"\":\n return queryset\n elif choice == \"u\": # unmatched\n return queryset.filter(person=None)\n elif choice == \"p\": # matched trainee, unmatched training\n return (\n queryset.filter(person__isnull=False)\n .exclude(\n person__task__role__name=\"learner\",\n person__task__event__tags__name=\"TTT\",\n )\n .distinct()\n )\n else: # choice == 't' <==> matched trainee and training\n return queryset.filter(\n person__task__role__name=\"learner\",\n person__task__event__tags__name=\"TTT\",\n ).distinct()\n\n def filter_by_person(self, queryset, name, value):\n if value == \"\":\n return queryset\n else:\n # 'Harry Potter' -> ['Harry', 'Potter']\n tokens = re.split(r\"\\s+\", value)\n # Each token must match email address or github username or\n # personal, or family name.\n for token in tokens:\n queryset = queryset.filter(\n Q(personal__icontains=token)\n | Q(middle__icontains=token)\n | Q(family__icontains=token)\n | Q(email__icontains=token)\n | Q(person__personal__icontains=token)\n | Q(person__middle__icontains=token)\n | Q(person__family__icontains=token)\n | Q(person__email__icontains=token)\n )\n return queryset\n\n def filter_affiliation(self, queryset, name, affiliation):\n if affiliation == \"\":\n return queryset\n else:\n q = Q(affiliation__icontains=affiliation) | Q(\n person__affiliation__icontains=affiliation\n )\n return queryset.filter(q).distinct()\n\n def filter_training_requests_by_state(self, queryset, name, choice):\n if choice == \"no_d\":\n return queryset.exclude(state=\"d\")\n else:\n return queryset.filter(state=choice)\n\n def filter_non_null_manual_score(self, queryset, name, manual_score):\n if manual_score:\n return queryset.filter(score_manual__isnull=False)\n return queryset\n\n\n# ------------------------------------------------------------\n# WorkshopRequest related filter and filter methods\n# ------------------------------------------------------------\n\n\nclass WorkshopRequestFilter(AMYFilterSet, StateFilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)\n country = AllCountriesFilter(widget=Select2Widget)\n continent = ContinentFilter(widget=Select2Widget, label=\"Continent\")\n requested_workshop_types = django_filters.ModelMultipleChoiceFilter(\n label=\"Requested workshop types\",\n queryset=Curriculum.objects.all(),\n widget=widgets.CheckboxSelectMultiple(),\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\"created_at\",),\n )\n\n class Meta:\n model = WorkshopRequest\n fields = [\n \"state\",\n \"assigned_to\",\n \"requested_workshop_types\",\n \"country\",\n ]\n\n\n# ------------------------------------------------------------\n# WorkshopInquiryRequest related filter and filter methods\n# ------------------------------------------------------------\n\n\nclass WorkshopInquiryFilter(AMYFilterSet, StateFilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)\n country = AllCountriesFilter(widget=Select2Widget)\n continent = ContinentFilter(widget=Select2Widget, label=\"Continent\")\n requested_workshop_types = django_filters.ModelMultipleChoiceFilter(\n label=\"Requested workshop types\",\n queryset=Curriculum.objects.all(),\n widget=widgets.CheckboxSelectMultiple(),\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\"created_at\",),\n )\n\n class Meta:\n model = WorkshopInquiryRequest\n fields = [\n \"state\",\n \"assigned_to\",\n \"requested_workshop_types\",\n \"country\",\n ]\n\n\n# ------------------------------------------------------------\n# SelfOrganisedSubmission related filter and filter methods\n# ------------------------------------------------------------\n\n\nclass SelfOrganisedSubmissionFilter(AMYFilterSet, StateFilterSet):\n assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)\n country = AllCountriesFilter(widget=Select2Widget)\n continent = ContinentFilter(widget=Select2Widget, label=\"Continent\")\n workshop_types = django_filters.ModelMultipleChoiceFilter(\n label=\"Requested workshop types\",\n queryset=Curriculum.objects.all(),\n widget=widgets.CheckboxSelectMultiple(),\n )\n\n order_by = django_filters.OrderingFilter(\n fields=(\"created_at\",),\n )\n\n class Meta:\n model = SelfOrganisedSubmission\n fields = [\n \"state\",\n \"assigned_to\",\n \"workshop_types\",\n \"workshop_format\",\n ]\n", "path": "amy/extrequests/filters.py"}]} | 2,706 | 250 |
gh_patches_debug_34507 | rasdani/github-patches | git_diff | getpelican__pelican-424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for acronym/abbr
Like in Sphinx, for example:
https://bitbucket.org/birkenfeld/sphinx/changeset/f69ec6dd2c54
Docutils has this in the To Do List, but I like Georg's syntax better.
http://docutils.sourceforge.net/docs/dev/todo.html
</issue>
<code>
[start of pelican/readers.py]
1 # -*- coding: utf-8 -*-
2 try:
3 import docutils
4 import docutils.core
5 import docutils.io
6 from docutils.writers.html4css1 import HTMLTranslator
7
8 # import the directives to have pygments support
9 from pelican import rstdirectives # NOQA
10 except ImportError:
11 core = False
12 try:
13 from markdown import Markdown
14 except ImportError:
15 Markdown = False # NOQA
16 import re
17
18 from pelican.contents import Category, Tag, Author
19 from pelican.utils import get_date, open
20
21
22 _METADATA_PROCESSORS = {
23 'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')],
24 'date': lambda x, y: get_date(x),
25 'status': lambda x, y: unicode.strip(x),
26 'category': Category,
27 'author': Author,
28 }
29
30
31 class Reader(object):
32 enabled = True
33 extensions = None
34
35 def __init__(self, settings):
36 self.settings = settings
37
38 def process_metadata(self, name, value):
39 if name in _METADATA_PROCESSORS:
40 return _METADATA_PROCESSORS[name](value, self.settings)
41 return value
42
43
44 class _FieldBodyTranslator(HTMLTranslator):
45
46 def __init__(self, document):
47 HTMLTranslator.__init__(self, document)
48 self.compact_p = None
49
50 def astext(self):
51 return ''.join(self.body)
52
53 def visit_field_body(self, node):
54 pass
55
56 def depart_field_body(self, node):
57 pass
58
59
60 def render_node_to_html(document, node):
61 visitor = _FieldBodyTranslator(document)
62 node.walkabout(visitor)
63 return visitor.astext()
64
65
66 class RstReader(Reader):
67 enabled = bool(docutils)
68 file_extensions = ['rst']
69
70 def _parse_metadata(self, document):
71 """Return the dict containing document metadata"""
72 output = {}
73 for docinfo in document.traverse(docutils.nodes.docinfo):
74 for element in docinfo.children:
75 if element.tagname == 'field': # custom fields (e.g. summary)
76 name_elem, body_elem = element.children
77 name = name_elem.astext()
78 if name == 'summary':
79 value = render_node_to_html(document, body_elem)
80 else:
81 value = body_elem.astext()
82 else: # standard fields (e.g. address)
83 name = element.tagname
84 value = element.astext()
85 name = name.lower()
86
87 output[name] = self.process_metadata(name, value)
88 return output
89
90 def _get_publisher(self, filename):
91 extra_params = {'initial_header_level': '2'}
92 pub = docutils.core.Publisher(
93 destination_class=docutils.io.StringOutput)
94 pub.set_components('standalone', 'restructuredtext', 'html')
95 pub.process_programmatic_settings(None, extra_params, None)
96 pub.set_source(source_path=filename)
97 pub.publish()
98 return pub
99
100 def read(self, filename):
101 """Parses restructured text"""
102 pub = self._get_publisher(filename)
103 parts = pub.writer.parts
104 content = parts.get('body')
105
106 metadata = self._parse_metadata(pub.document)
107 metadata.setdefault('title', parts.get('title'))
108
109 return content, metadata
110
111
112 class MarkdownReader(Reader):
113 enabled = bool(Markdown)
114 file_extensions = ['md', 'markdown', 'mkd']
115 extensions = ['codehilite', 'extra']
116
117 def read(self, filename):
118 """Parse content and metadata of markdown files"""
119 text = open(filename)
120 md = Markdown(extensions=set(self.extensions + ['meta']))
121 content = md.convert(text)
122
123 metadata = {}
124 for name, value in md.Meta.items():
125 name = name.lower()
126 metadata[name] = self.process_metadata(name, value[0])
127 return content, metadata
128
129
130 class HtmlReader(Reader):
131 file_extensions = ['html', 'htm']
132 _re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>')
133
134 def read(self, filename):
135 """Parse content and metadata of (x)HTML files"""
136 with open(filename) as content:
137 metadata = {'title': 'unnamed'}
138 for i in self._re.findall(content):
139 key = i.split(':')[0][5:].strip()
140 value = i.split(':')[-1][:-3].strip()
141 name = key.lower()
142 metadata[name] = self.process_metadata(name, value)
143
144 return content, metadata
145
146
147 _EXTENSIONS = {}
148
149 for cls in Reader.__subclasses__():
150 for ext in cls.file_extensions:
151 _EXTENSIONS[ext] = cls
152
153
154 def read_file(filename, fmt=None, settings=None):
155 """Return a reader object using the given format."""
156 if not fmt:
157 fmt = filename.split('.')[-1]
158
159 if fmt not in _EXTENSIONS:
160 raise TypeError('Pelican does not know how to parse %s' % filename)
161
162 reader = _EXTENSIONS[fmt](settings)
163 settings_key = '%s_EXTENSIONS' % fmt.upper()
164
165 if settings and settings_key in settings:
166 reader.extensions = settings[settings_key]
167
168 if not reader.enabled:
169 raise ValueError("Missing dependencies for %s" % fmt)
170
171 content, metadata = reader.read(filename)
172
173 # eventually filter the content with typogrify if asked so
174 if settings and settings['TYPOGRIFY']:
175 from typogrify import Typogrify
176 content = Typogrify.typogrify(content)
177 metadata['title'] = Typogrify.typogrify(metadata['title'])
178
179 return content, metadata
180
[end of pelican/readers.py]
[start of pelican/rstdirectives.py]
1 # -*- coding: utf-8 -*-
2 from docutils import nodes
3 from docutils.parsers.rst import directives, Directive
4 from pygments.formatters import HtmlFormatter
5 from pygments import highlight
6 from pygments.lexers import get_lexer_by_name, TextLexer
7
8 INLINESTYLES = False
9 DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
10 VARIANTS = {
11 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
12 }
13
14
15 class Pygments(Directive):
16 """ Source code syntax hightlighting.
17 """
18 required_arguments = 1
19 optional_arguments = 0
20 final_argument_whitespace = True
21 option_spec = dict([(key, directives.flag) for key in VARIANTS])
22 has_content = True
23
24 def run(self):
25 self.assert_has_content()
26 try:
27 lexer = get_lexer_by_name(self.arguments[0])
28 except ValueError:
29 # no lexer found - use the text one instead of an exception
30 lexer = TextLexer()
31 # take an arbitrary option if more than one is given
32 formatter = self.options and VARIANTS[self.options.keys()[0]] \
33 or DEFAULT
34 parsed = highlight(u'\n'.join(self.content), lexer, formatter)
35 return [nodes.raw('', parsed, format='html')]
36
37 directives.register_directive('code-block', Pygments)
38 directives.register_directive('sourcecode', Pygments)
39
40
41 class YouTube(Directive):
42 """ Embed YouTube video in posts.
43
44 Courtesy of Brian Hsu: https://gist.github.com/1422773
45
46 VIDEO_ID is required, with / height are optional integer,
47 and align could be left / center / right.
48
49 Usage:
50 .. youtube:: VIDEO_ID
51 :width: 640
52 :height: 480
53 :align: center
54 """
55
56 def align(argument):
57 """Conversion function for the "align" option."""
58 return directives.choice(argument, ('left', 'center', 'right'))
59
60 required_arguments = 1
61 optional_arguments = 2
62 option_spec = {
63 'width': directives.positive_int,
64 'height': directives.positive_int,
65 'align': align
66 }
67
68 final_argument_whitespace = False
69 has_content = False
70
71 def run(self):
72 videoID = self.arguments[0].strip()
73 width = 420
74 height = 315
75 align = 'left'
76
77 if 'width' in self.options:
78 width = self.options['width']
79
80 if 'height' in self.options:
81 height = self.options['height']
82
83 if 'align' in self.options:
84 align = self.options['align']
85
86 url = 'http://www.youtube.com/embed/%s' % videoID
87 div_block = '<div class="youtube" align="%s">' % align
88 embed_block = '<iframe width="%s" height="%s" src="%s" '\
89 'frameborder="0"></iframe>' % (width, height, url)
90
91 return [
92 nodes.raw('', div_block, format='html'),
93 nodes.raw('', embed_block, format='html'),
94 nodes.raw('', '</div>', format='html')]
95
96 directives.register_directive('youtube', YouTube)
97
[end of pelican/rstdirectives.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/readers.py b/pelican/readers.py
--- a/pelican/readers.py
+++ b/pelican/readers.py
@@ -63,6 +63,18 @@
return visitor.astext()
+class PelicanHTMLTranslator(HTMLTranslator):
+
+ def visit_abbreviation(self, node):
+ attrs = {}
+ if node.hasattr('explanation'):
+ attrs['title'] = node['explanation']
+ self.body.append(self.starttag(node, 'abbr', '', **attrs))
+
+ def depart_abbreviation(self, node):
+ self.body.append('</abbr>')
+
+
class RstReader(Reader):
enabled = bool(docutils)
file_extensions = ['rst']
@@ -92,6 +104,7 @@
pub = docutils.core.Publisher(
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
+ pub.writer.translator_class = PelicanHTMLTranslator
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=filename)
pub.publish()
diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py
--- a/pelican/rstdirectives.py
+++ b/pelican/rstdirectives.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
-from docutils import nodes
-from docutils.parsers.rst import directives, Directive
+from docutils import nodes, utils
+from docutils.parsers.rst import directives, roles, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
+import re
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
@@ -94,3 +95,18 @@
nodes.raw('', '</div>', format='html')]
directives.register_directive('youtube', YouTube)
+
+_abbr_re = re.compile('\((.*)\)$')
+
+class abbreviation(nodes.Inline, nodes.TextElement): pass
+
+def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ text = utils.unescape(text)
+ m = _abbr_re.search(text)
+ if m is None:
+ return [abbreviation(text, text)], []
+ abbr = text[:m.start()].strip()
+ expl = m.group(1)
+ return [abbreviation(abbr, abbr, explanation=expl)], []
+
+roles.register_local_role('abbr', abbr_role)
| {"golden_diff": "diff --git a/pelican/readers.py b/pelican/readers.py\n--- a/pelican/readers.py\n+++ b/pelican/readers.py\n@@ -63,6 +63,18 @@\n return visitor.astext()\n \n \n+class PelicanHTMLTranslator(HTMLTranslator):\n+\n+ def visit_abbreviation(self, node):\n+ attrs = {}\n+ if node.hasattr('explanation'):\n+ attrs['title'] = node['explanation']\n+ self.body.append(self.starttag(node, 'abbr', '', **attrs))\n+\n+ def depart_abbreviation(self, node):\n+ self.body.append('</abbr>')\n+\n+\n class RstReader(Reader):\n enabled = bool(docutils)\n file_extensions = ['rst']\n@@ -92,6 +104,7 @@\n pub = docutils.core.Publisher(\n destination_class=docutils.io.StringOutput)\n pub.set_components('standalone', 'restructuredtext', 'html')\n+ pub.writer.translator_class = PelicanHTMLTranslator\n pub.process_programmatic_settings(None, extra_params, None)\n pub.set_source(source_path=filename)\n pub.publish()\ndiff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py\n--- a/pelican/rstdirectives.py\n+++ b/pelican/rstdirectives.py\n@@ -1,9 +1,10 @@\n # -*- coding: utf-8 -*-\n-from docutils import nodes\n-from docutils.parsers.rst import directives, Directive\n+from docutils import nodes, utils\n+from docutils.parsers.rst import directives, roles, Directive\n from pygments.formatters import HtmlFormatter\n from pygments import highlight\n from pygments.lexers import get_lexer_by_name, TextLexer\n+import re\n \n INLINESTYLES = False\n DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)\n@@ -94,3 +95,18 @@\n nodes.raw('', '</div>', format='html')]\n \n directives.register_directive('youtube', YouTube)\n+\n+_abbr_re = re.compile('\\((.*)\\)$')\n+\n+class abbreviation(nodes.Inline, nodes.TextElement): pass\n+\n+def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):\n+\ttext = utils.unescape(text)\n+\tm = _abbr_re.search(text)\n+\tif m is None:\n+\t\treturn [abbreviation(text, text)], []\n+\tabbr = text[:m.start()].strip()\n+\texpl = m.group(1)\n+\treturn [abbreviation(abbr, abbr, explanation=expl)], []\n+\n+roles.register_local_role('abbr', abbr_role)\n", "issue": "Add support for acronym/abbr\nLike in Sphinx, for example:\n\nhttps://bitbucket.org/birkenfeld/sphinx/changeset/f69ec6dd2c54\n\nDocutils has this in the To Do List, but I like Georg's syntax better.\n\nhttp://docutils.sourceforge.net/docs/dev/todo.html\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\ntry:\n import docutils\n import docutils.core\n import docutils.io\n from docutils.writers.html4css1 import HTMLTranslator\n\n # import the directives to have pygments support\n from pelican import rstdirectives # NOQA\nexcept ImportError:\n core = False\ntry:\n from markdown import Markdown\nexcept ImportError:\n Markdown = False # NOQA\nimport re\n\nfrom pelican.contents import Category, Tag, Author\nfrom pelican.utils import get_date, open\n\n\n_METADATA_PROCESSORS = {\n 'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')],\n 'date': lambda x, y: get_date(x),\n 'status': lambda x, y: unicode.strip(x),\n 'category': Category,\n 'author': Author,\n}\n\n\nclass Reader(object):\n enabled = True\n extensions = None\n\n def __init__(self, settings):\n self.settings = settings\n\n def process_metadata(self, name, value):\n if name in _METADATA_PROCESSORS:\n return _METADATA_PROCESSORS[name](value, self.settings)\n return value\n\n\nclass _FieldBodyTranslator(HTMLTranslator):\n\n def __init__(self, document):\n HTMLTranslator.__init__(self, document)\n self.compact_p = None\n\n def astext(self):\n return ''.join(self.body)\n\n def visit_field_body(self, node):\n pass\n\n def depart_field_body(self, node):\n pass\n\n\ndef render_node_to_html(document, node):\n visitor = _FieldBodyTranslator(document)\n node.walkabout(visitor)\n return visitor.astext()\n\n\nclass RstReader(Reader):\n enabled = bool(docutils)\n file_extensions = ['rst']\n\n def _parse_metadata(self, document):\n \"\"\"Return the dict containing document metadata\"\"\"\n output = {}\n for docinfo in document.traverse(docutils.nodes.docinfo):\n for element in docinfo.children:\n if element.tagname == 'field': # custom fields (e.g. summary)\n name_elem, body_elem = element.children\n name = name_elem.astext()\n if name == 'summary':\n value = render_node_to_html(document, body_elem)\n else:\n value = body_elem.astext()\n else: # standard fields (e.g. address)\n name = element.tagname\n value = element.astext()\n name = name.lower()\n\n output[name] = self.process_metadata(name, value)\n return output\n\n def _get_publisher(self, filename):\n extra_params = {'initial_header_level': '2'}\n pub = docutils.core.Publisher(\n destination_class=docutils.io.StringOutput)\n pub.set_components('standalone', 'restructuredtext', 'html')\n pub.process_programmatic_settings(None, extra_params, None)\n pub.set_source(source_path=filename)\n pub.publish()\n return pub\n\n def read(self, filename):\n \"\"\"Parses restructured text\"\"\"\n pub = self._get_publisher(filename)\n parts = pub.writer.parts\n content = parts.get('body')\n\n metadata = self._parse_metadata(pub.document)\n metadata.setdefault('title', parts.get('title'))\n\n return content, metadata\n\n\nclass MarkdownReader(Reader):\n enabled = bool(Markdown)\n file_extensions = ['md', 'markdown', 'mkd']\n extensions = ['codehilite', 'extra']\n\n def read(self, filename):\n \"\"\"Parse content and metadata of markdown files\"\"\"\n text = open(filename)\n md = Markdown(extensions=set(self.extensions + ['meta']))\n content = md.convert(text)\n\n metadata = {}\n for name, value in md.Meta.items():\n name = name.lower()\n metadata[name] = self.process_metadata(name, value[0])\n return content, metadata\n\n\nclass HtmlReader(Reader):\n file_extensions = ['html', 'htm']\n _re = re.compile('\\<\\!\\-\\-\\#\\s?[A-z0-9_-]*\\s?\\:s?[A-z0-9\\s_-]*\\s?\\-\\-\\>')\n\n def read(self, filename):\n \"\"\"Parse content and metadata of (x)HTML files\"\"\"\n with open(filename) as content:\n metadata = {'title': 'unnamed'}\n for i in self._re.findall(content):\n key = i.split(':')[0][5:].strip()\n value = i.split(':')[-1][:-3].strip()\n name = key.lower()\n metadata[name] = self.process_metadata(name, value)\n\n return content, metadata\n\n\n_EXTENSIONS = {}\n\nfor cls in Reader.__subclasses__():\n for ext in cls.file_extensions:\n _EXTENSIONS[ext] = cls\n\n\ndef read_file(filename, fmt=None, settings=None):\n \"\"\"Return a reader object using the given format.\"\"\"\n if not fmt:\n fmt = filename.split('.')[-1]\n\n if fmt not in _EXTENSIONS:\n raise TypeError('Pelican does not know how to parse %s' % filename)\n\n reader = _EXTENSIONS[fmt](settings)\n settings_key = '%s_EXTENSIONS' % fmt.upper()\n\n if settings and settings_key in settings:\n reader.extensions = settings[settings_key]\n\n if not reader.enabled:\n raise ValueError(\"Missing dependencies for %s\" % fmt)\n\n content, metadata = reader.read(filename)\n\n # eventually filter the content with typogrify if asked so\n if settings and settings['TYPOGRIFY']:\n from typogrify import Typogrify\n content = Typogrify.typogrify(content)\n metadata['title'] = Typogrify.typogrify(metadata['title'])\n\n return content, metadata\n", "path": "pelican/readers.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\nfrom pygments.formatters import HtmlFormatter\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name, TextLexer\n\nINLINESTYLES = False\nDEFAULT = HtmlFormatter(noclasses=INLINESTYLES)\nVARIANTS = {\n 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),\n}\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = dict([(key, directives.flag) for key in VARIANTS])\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n # take an arbitrary option if more than one is given\n formatter = self.options and VARIANTS[self.options.keys()[0]] \\\n or DEFAULT\n parsed = highlight(u'\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\ndirectives.register_directive('code-block', Pygments)\ndirectives.register_directive('sourcecode', Pygments)\n\n\nclass YouTube(Directive):\n \"\"\" Embed YouTube video in posts.\n\n Courtesy of Brian Hsu: https://gist.github.com/1422773\n\n VIDEO_ID is required, with / height are optional integer,\n and align could be left / center / right.\n\n Usage:\n .. youtube:: VIDEO_ID\n :width: 640\n :height: 480\n :align: center\n \"\"\"\n\n def align(argument):\n \"\"\"Conversion function for the \"align\" option.\"\"\"\n return directives.choice(argument, ('left', 'center', 'right'))\n\n required_arguments = 1\n optional_arguments = 2\n option_spec = {\n 'width': directives.positive_int,\n 'height': directives.positive_int,\n 'align': align\n }\n\n final_argument_whitespace = False\n has_content = False\n\n def run(self):\n videoID = self.arguments[0].strip()\n width = 420\n height = 315\n align = 'left'\n\n if 'width' in self.options:\n width = self.options['width']\n\n if 'height' in self.options:\n height = self.options['height']\n\n if 'align' in self.options:\n align = self.options['align']\n\n url = 'http://www.youtube.com/embed/%s' % videoID\n div_block = '<div class=\"youtube\" align=\"%s\">' % align\n embed_block = '<iframe width=\"%s\" height=\"%s\" src=\"%s\" '\\\n 'frameborder=\"0\"></iframe>' % (width, height, url)\n\n return [\n nodes.raw('', div_block, format='html'),\n nodes.raw('', embed_block, format='html'),\n nodes.raw('', '</div>', format='html')]\n\ndirectives.register_directive('youtube', YouTube)\n", "path": "pelican/rstdirectives.py"}]} | 3,193 | 570 |
gh_patches_debug_30689 | rasdani/github-patches | git_diff | nonebot__nonebot2-1104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature: 正向驱动器 startup/shutdown hook 同步函数支持
**描述问题:**
BlockDriver注册同步on_startup与on_shutdown钩子函数时启动崩溃
**如何复现?**
```python
from nonebot import get_driver
@get_driver().on_startup
def i_will_crash():
print("oops")
```
**期望的结果**
**环境信息:**
- OS: Windows 10
- Python Version: 3.10
- Nonebot Version: 2.0.0b4
**协议端信息:**
- 协议端: Kaiheila
- 协议端版本: 0.0.2
**截图或日志**
```
07-12 23:56:09 [SUCCESS] nonebot | NoneBot is initializing...
07-12 23:56:09 [INFO] nonebot | Current Env: dev
07-12 23:56:09 [DEBUG] nonebot | Loaded Config: {'driver': '~httpx+~websockets', 'host': IPv4Address('127.0.0.1'), 'port': 8080, 'log_level': 'DEBUG', 'api_timeout': 30.0, 'superusers': set(), 'nickname': set(), 'command_start': {'/'}, 'command_sep': {'.'}, 'session_expire_timeout': datetime.timedelta(seconds=120)}
07-12 23:56:09 [DEBUG] nonebot | Succeeded to load adapter "Kaiheila"
07-12 23:56:09 [SUCCESS] nonebot | Succeeded to import "echo"
07-12 23:56:09 [SUCCESS] nonebot | Succeeded to import "nonebot_plugin_pixivbot_kook"
07-12 23:56:09 [SUCCESS] nonebot | Succeeded to import "test"
07-12 23:56:09 [WARNING] __main__ | Always use `nb run` to start the bot instead of manually running!
07-12 23:56:09 [SUCCESS] nonebot | Running NoneBot...
07-12 23:56:09 [DEBUG] nonebot | Loaded adapters: Kaiheila
oops
07-12 23:56:09 [ERROR] nonebot | Error when running startup function. Ignored!
Traceback (most recent call last):
File "C:\Users\huang\PycharmProjects\kuraku-kook\bot.py", line 21, in <module>
nonebot.run()
File "C:\Users\huang\PycharmProjects\kuraku-kook\venv\lib\site-packages\nonebot\__init__.py", line 261, in run
get_driver().run(*args, **kwargs)
File "C:\Users\huang\PycharmProjects\kuraku-kook\venv\lib\site-packages\nonebot\drivers\_block_driver.py", line 60, in run
loop.run_until_complete(self.serve())
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\base_events.py", line 628, in run_until_complete
self.run_forever()
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\windows_events.py", line 321, in run_forever
super().run_forever()
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\base_events.py", line 595, in run_forever
self._run_once()
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\base_events.py", line 1881, in _run_once
handle._run()
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\huang\PycharmProjects\kuraku-kook\venv\lib\site-packages\nonebot\drivers\_block_driver.py", line 64, in serve
await self.startup()
> File "C:\Users\huang\PycharmProjects\kuraku-kook\venv\lib\site-packages\nonebot\drivers\_block_driver.py", line 75, in startup
await asyncio.gather(*cors)
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\tasks.py", line 776, in gather
fut = _ensure_future(arg, loop=loop)
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\tasks.py", line 629, in _ensure_future
raise TypeError('An asyncio.Future, a coroutine or an awaitable '
TypeError: An asyncio.Future, a coroutine or an awaitable is required
07-12 23:56:09 [INFO] nonebot | Application startup completed.
Exception in callback gather.<locals>._done_callback(<Task finishe...> result=None>) at C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\tasks.py:714
handle: <Handle gather.<locals>._done_callback(<Task finishe...> result=None>) at C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\tasks.py:714>
Traceback (most recent call last):
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\huang\AppData\Local\Programs\Python\Python310\lib\asyncio\tasks.py", line 718, in _done_callback
if outer.done():
NameError: free variable 'outer' referenced before assignment in enclosing scope
07-12 23:56:09 [DEBUG] nonebot | Kaiheila | WebSocket Connection to wss://ws.kaiheila.cn/gateway?compress=0&token=***************** established
07-12 23:56:09 [INFO] nonebot | Kaiheila | Bot ******** connected
07-12 23:56:09 [DEBUG] nonebot | Kaiheila | Bot ******** HeartBeat
07-12 23:56:35 [DEBUG] nonebot | Kaiheila | Bot ******** HeartBeat
07-12 23:57:01 [DEBUG] nonebot | Kaiheila | Bot ******** HeartBeat
```
</issue>
<code>
[start of nonebot/drivers/_block_driver.py]
1 import signal
2 import asyncio
3 import threading
4 from typing import Set, Callable, Awaitable
5
6 from nonebot.log import logger
7 from nonebot.drivers import Driver
8 from nonebot.typing import overrides
9 from nonebot.config import Env, Config
10
11 STARTUP_FUNC = Callable[[], Awaitable[None]]
12 SHUTDOWN_FUNC = Callable[[], Awaitable[None]]
13 HANDLED_SIGNALS = (
14 signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
15 signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
16 )
17
18
19 class BlockDriver(Driver):
20 def __init__(self, env: Env, config: Config):
21 super().__init__(env, config)
22 self.startup_funcs: Set[STARTUP_FUNC] = set()
23 self.shutdown_funcs: Set[SHUTDOWN_FUNC] = set()
24 self.should_exit: asyncio.Event = asyncio.Event()
25 self.force_exit: bool = False
26
27 @property
28 @overrides(Driver)
29 def type(self) -> str:
30 """驱动名称: `block_driver`"""
31 return "block_driver"
32
33 @property
34 @overrides(Driver)
35 def logger(self):
36 """block driver 使用的 logger"""
37 return logger
38
39 @overrides(Driver)
40 def on_startup(self, func: STARTUP_FUNC) -> STARTUP_FUNC:
41 """
42 注册一个启动时执行的函数
43 """
44 self.startup_funcs.add(func)
45 return func
46
47 @overrides(Driver)
48 def on_shutdown(self, func: SHUTDOWN_FUNC) -> SHUTDOWN_FUNC:
49 """
50 注册一个停止时执行的函数
51 """
52 self.shutdown_funcs.add(func)
53 return func
54
55 @overrides(Driver)
56 def run(self, *args, **kwargs):
57 """启动 block driver"""
58 super().run(*args, **kwargs)
59 loop = asyncio.get_event_loop()
60 loop.run_until_complete(self.serve())
61
62 async def serve(self):
63 self.install_signal_handlers()
64 await self.startup()
65 if self.should_exit.is_set():
66 return
67 await self.main_loop()
68 await self.shutdown()
69
70 async def startup(self):
71 # run startup
72 cors = [startup() for startup in self.startup_funcs]
73 if cors:
74 try:
75 await asyncio.gather(*cors)
76 except Exception as e:
77 logger.opt(colors=True, exception=e).error(
78 "<r><bg #f8bbd0>Error when running startup function. "
79 "Ignored!</bg #f8bbd0></r>"
80 )
81
82 logger.info("Application startup completed.")
83
84 async def main_loop(self):
85 await self.should_exit.wait()
86
87 async def shutdown(self):
88 logger.info("Shutting down")
89
90 logger.info("Waiting for application shutdown.")
91 # run shutdown
92 cors = [shutdown() for shutdown in self.shutdown_funcs]
93 if cors:
94 try:
95 await asyncio.gather(*cors)
96 except Exception as e:
97 logger.opt(colors=True, exception=e).error(
98 "<r><bg #f8bbd0>Error when running shutdown function. "
99 "Ignored!</bg #f8bbd0></r>"
100 )
101
102 for task in asyncio.all_tasks():
103 if task is not asyncio.current_task() and not task.done():
104 task.cancel()
105 await asyncio.sleep(0.1)
106
107 tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
108 if tasks and not self.force_exit:
109 logger.info("Waiting for tasks to finish. (CTRL+C to force quit)")
110 while tasks and not self.force_exit:
111 await asyncio.sleep(0.1)
112 tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
113
114 for task in tasks:
115 task.cancel()
116
117 await asyncio.gather(*tasks, return_exceptions=True)
118
119 logger.info("Application shutdown complete.")
120 loop = asyncio.get_event_loop()
121 loop.stop()
122
123 def install_signal_handlers(self) -> None:
124 if threading.current_thread() is not threading.main_thread():
125 # Signals can only be listened to from the main thread.
126 return
127
128 loop = asyncio.get_event_loop()
129
130 try:
131 for sig in HANDLED_SIGNALS:
132 loop.add_signal_handler(sig, self.handle_exit, sig, None)
133 except NotImplementedError:
134 # Windows
135 for sig in HANDLED_SIGNALS:
136 signal.signal(sig, self.handle_exit)
137
138 def handle_exit(self, sig, frame):
139 if self.should_exit.is_set():
140 self.force_exit = True
141 else:
142 self.should_exit.set()
143
[end of nonebot/drivers/_block_driver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nonebot/drivers/_block_driver.py b/nonebot/drivers/_block_driver.py
--- a/nonebot/drivers/_block_driver.py
+++ b/nonebot/drivers/_block_driver.py
@@ -1,15 +1,16 @@
import signal
import asyncio
import threading
-from typing import Set, Callable, Awaitable
+from typing import Set, Union, Callable, Awaitable
from nonebot.log import logger
from nonebot.drivers import Driver
from nonebot.typing import overrides
from nonebot.config import Env, Config
+from nonebot.utils import run_sync, is_coroutine_callable
-STARTUP_FUNC = Callable[[], Awaitable[None]]
-SHUTDOWN_FUNC = Callable[[], Awaitable[None]]
+STARTUP_FUNC = Callable[[], Union[None, Awaitable[None]]]
+SHUTDOWN_FUNC = Callable[[], Union[None, Awaitable[None]]]
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
@@ -69,7 +70,10 @@
async def startup(self):
# run startup
- cors = [startup() for startup in self.startup_funcs]
+ cors = [
+ startup() if is_coroutine_callable(startup) else run_sync(startup)()
+ for startup in self.startup_funcs
+ ]
if cors:
try:
await asyncio.gather(*cors)
@@ -89,7 +93,10 @@
logger.info("Waiting for application shutdown.")
# run shutdown
- cors = [shutdown() for shutdown in self.shutdown_funcs]
+ cors = [
+ shutdown() if is_coroutine_callable(shutdown) else run_sync(shutdown)()
+ for shutdown in self.shutdown_funcs
+ ]
if cors:
try:
await asyncio.gather(*cors)
| {"golden_diff": "diff --git a/nonebot/drivers/_block_driver.py b/nonebot/drivers/_block_driver.py\n--- a/nonebot/drivers/_block_driver.py\n+++ b/nonebot/drivers/_block_driver.py\n@@ -1,15 +1,16 @@\n import signal\n import asyncio\n import threading\n-from typing import Set, Callable, Awaitable\n+from typing import Set, Union, Callable, Awaitable\n \n from nonebot.log import logger\n from nonebot.drivers import Driver\n from nonebot.typing import overrides\n from nonebot.config import Env, Config\n+from nonebot.utils import run_sync, is_coroutine_callable\n \n-STARTUP_FUNC = Callable[[], Awaitable[None]]\n-SHUTDOWN_FUNC = Callable[[], Awaitable[None]]\n+STARTUP_FUNC = Callable[[], Union[None, Awaitable[None]]]\n+SHUTDOWN_FUNC = Callable[[], Union[None, Awaitable[None]]]\n HANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n@@ -69,7 +70,10 @@\n \n async def startup(self):\n # run startup\n- cors = [startup() for startup in self.startup_funcs]\n+ cors = [\n+ startup() if is_coroutine_callable(startup) else run_sync(startup)()\n+ for startup in self.startup_funcs\n+ ]\n if cors:\n try:\n await asyncio.gather(*cors)\n@@ -89,7 +93,10 @@\n \n logger.info(\"Waiting for application shutdown.\")\n # run shutdown\n- cors = [shutdown() for shutdown in self.shutdown_funcs]\n+ cors = [\n+ shutdown() if is_coroutine_callable(shutdown) else run_sync(shutdown)()\n+ for shutdown in self.shutdown_funcs\n+ ]\n if cors:\n try:\n await asyncio.gather(*cors)\n", "issue": "Feature: \u6b63\u5411\u9a71\u52a8\u5668 startup/shutdown hook \u540c\u6b65\u51fd\u6570\u652f\u6301\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\nBlockDriver\u6ce8\u518c\u540c\u6b65on_startup\u4e0eon_shutdown\u94a9\u5b50\u51fd\u6570\u65f6\u542f\u52a8\u5d29\u6e83\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n```python\r\nfrom nonebot import get_driver\r\n\r\n\r\n@get_driver().on_startup\r\ndef i_will_crash():\r\n print(\"oops\")\r\n\r\n```\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: Windows 10\r\n - Python Version: 3.10\r\n - Nonebot Version: 2.0.0b4\r\n\r\n**\u534f\u8bae\u7aef\u4fe1\u606f\uff1a**\r\n\r\n - \u534f\u8bae\u7aef: Kaiheila\r\n - \u534f\u8bae\u7aef\u7248\u672c: 0.0.2\r\n\r\n**\u622a\u56fe\u6216\u65e5\u5fd7**\r\n```\r\n07-12 23:56:09 [SUCCESS] nonebot | NoneBot is initializing...\r\n07-12 23:56:09 [INFO] nonebot | Current Env: dev\r\n07-12 23:56:09 [DEBUG] nonebot | Loaded Config: {'driver': '~httpx+~websockets', 'host': IPv4Address('127.0.0.1'), 'port': 8080, 'log_level': 'DEBUG', 'api_timeout': 30.0, 'superusers': set(), 'nickname': set(), 'command_start': {'/'}, 'command_sep': {'.'}, 'session_expire_timeout': datetime.timedelta(seconds=120)}\r\n07-12 23:56:09 [DEBUG] nonebot | Succeeded to load adapter \"Kaiheila\"\r\n07-12 23:56:09 [SUCCESS] nonebot | Succeeded to import \"echo\"\r\n07-12 23:56:09 [SUCCESS] nonebot | Succeeded to import \"nonebot_plugin_pixivbot_kook\"\r\n07-12 23:56:09 [SUCCESS] nonebot | Succeeded to import \"test\"\r\n07-12 23:56:09 [WARNING] __main__ | Always use `nb run` to start the bot instead of manually running!\r\n07-12 23:56:09 [SUCCESS] nonebot | Running NoneBot...\r\n07-12 23:56:09 [DEBUG] nonebot | Loaded adapters: Kaiheila\r\noops\r\n07-12 23:56:09 [ERROR] nonebot | Error when running startup function. Ignored!\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\huang\\PycharmProjects\\kuraku-kook\\bot.py\", line 21, in <module>\r\n nonebot.run()\r\n File \"C:\\Users\\huang\\PycharmProjects\\kuraku-kook\\venv\\lib\\site-packages\\nonebot\\__init__.py\", line 261, in run\r\n get_driver().run(*args, **kwargs)\r\n File \"C:\\Users\\huang\\PycharmProjects\\kuraku-kook\\venv\\lib\\site-packages\\nonebot\\drivers\\_block_driver.py\", line 60, in run\r\n loop.run_until_complete(self.serve())\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\base_events.py\", line 628, in run_until_complete\r\n self.run_forever()\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\windows_events.py\", line 321, in run_forever\r\n super().run_forever()\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\base_events.py\", line 595, in run_forever\r\n self._run_once()\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\base_events.py\", line 1881, in _run_once\r\n handle._run()\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"C:\\Users\\huang\\PycharmProjects\\kuraku-kook\\venv\\lib\\site-packages\\nonebot\\drivers\\_block_driver.py\", line 64, in serve\r\n await self.startup()\r\n> File \"C:\\Users\\huang\\PycharmProjects\\kuraku-kook\\venv\\lib\\site-packages\\nonebot\\drivers\\_block_driver.py\", line 75, in startup\r\n await asyncio.gather(*cors)\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\tasks.py\", line 776, in gather\r\n fut = _ensure_future(arg, loop=loop)\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\tasks.py\", line 629, in _ensure_future\r\n raise TypeError('An asyncio.Future, a coroutine or an awaitable '\r\nTypeError: An asyncio.Future, a coroutine or an awaitable is required\r\n07-12 23:56:09 [INFO] nonebot | Application startup completed.\r\nException in callback gather.<locals>._done_callback(<Task finishe...> result=None>) at C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\tasks.py:714\r\nhandle: <Handle gather.<locals>._done_callback(<Task finishe...> result=None>) at C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\tasks.py:714>\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"C:\\Users\\huang\\AppData\\Local\\Programs\\Python\\Python310\\lib\\asyncio\\tasks.py\", line 718, in _done_callback\r\n if outer.done():\r\nNameError: free variable 'outer' referenced before assignment in enclosing scope\r\n07-12 23:56:09 [DEBUG] nonebot | Kaiheila | WebSocket Connection to wss://ws.kaiheila.cn/gateway?compress=0&token=***************** established\r\n07-12 23:56:09 [INFO] nonebot | Kaiheila | Bot ******** connected\r\n07-12 23:56:09 [DEBUG] nonebot | Kaiheila | Bot ******** HeartBeat\r\n07-12 23:56:35 [DEBUG] nonebot | Kaiheila | Bot ******** HeartBeat\r\n07-12 23:57:01 [DEBUG] nonebot | Kaiheila | Bot ******** HeartBeat\r\n```\r\n\n", "before_files": [{"content": "import signal\nimport asyncio\nimport threading\nfrom typing import Set, Callable, Awaitable\n\nfrom nonebot.log import logger\nfrom nonebot.drivers import Driver\nfrom nonebot.typing import overrides\nfrom nonebot.config import Env, Config\n\nSTARTUP_FUNC = Callable[[], Awaitable[None]]\nSHUTDOWN_FUNC = Callable[[], Awaitable[None]]\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\n\n\nclass BlockDriver(Driver):\n def __init__(self, env: Env, config: Config):\n super().__init__(env, config)\n self.startup_funcs: Set[STARTUP_FUNC] = set()\n self.shutdown_funcs: Set[SHUTDOWN_FUNC] = set()\n self.should_exit: asyncio.Event = asyncio.Event()\n self.force_exit: bool = False\n\n @property\n @overrides(Driver)\n def type(self) -> str:\n \"\"\"\u9a71\u52a8\u540d\u79f0: `block_driver`\"\"\"\n return \"block_driver\"\n\n @property\n @overrides(Driver)\n def logger(self):\n \"\"\"block driver \u4f7f\u7528\u7684 logger\"\"\"\n return logger\n\n @overrides(Driver)\n def on_startup(self, func: STARTUP_FUNC) -> STARTUP_FUNC:\n \"\"\"\n \u6ce8\u518c\u4e00\u4e2a\u542f\u52a8\u65f6\u6267\u884c\u7684\u51fd\u6570\n \"\"\"\n self.startup_funcs.add(func)\n return func\n\n @overrides(Driver)\n def on_shutdown(self, func: SHUTDOWN_FUNC) -> SHUTDOWN_FUNC:\n \"\"\"\n \u6ce8\u518c\u4e00\u4e2a\u505c\u6b62\u65f6\u6267\u884c\u7684\u51fd\u6570\n \"\"\"\n self.shutdown_funcs.add(func)\n return func\n\n @overrides(Driver)\n def run(self, *args, **kwargs):\n \"\"\"\u542f\u52a8 block driver\"\"\"\n super().run(*args, **kwargs)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.serve())\n\n async def serve(self):\n self.install_signal_handlers()\n await self.startup()\n if self.should_exit.is_set():\n return\n await self.main_loop()\n await self.shutdown()\n\n async def startup(self):\n # run startup\n cors = [startup() for startup in self.startup_funcs]\n if cors:\n try:\n await asyncio.gather(*cors)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running startup function. \"\n \"Ignored!</bg #f8bbd0></r>\"\n )\n\n logger.info(\"Application startup completed.\")\n\n async def main_loop(self):\n await self.should_exit.wait()\n\n async def shutdown(self):\n logger.info(\"Shutting down\")\n\n logger.info(\"Waiting for application shutdown.\")\n # run shutdown\n cors = [shutdown() for shutdown in self.shutdown_funcs]\n if cors:\n try:\n await asyncio.gather(*cors)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running shutdown function. \"\n \"Ignored!</bg #f8bbd0></r>\"\n )\n\n for task in asyncio.all_tasks():\n if task is not asyncio.current_task() and not task.done():\n task.cancel()\n await asyncio.sleep(0.1)\n\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n if tasks and not self.force_exit:\n logger.info(\"Waiting for tasks to finish. (CTRL+C to force quit)\")\n while tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n\n for task in tasks:\n task.cancel()\n\n await asyncio.gather(*tasks, return_exceptions=True)\n\n logger.info(\"Application shutdown complete.\")\n loop = asyncio.get_event_loop()\n loop.stop()\n\n def install_signal_handlers(self) -> None:\n if threading.current_thread() is not threading.main_thread():\n # Signals can only be listened to from the main thread.\n return\n\n loop = asyncio.get_event_loop()\n\n try:\n for sig in HANDLED_SIGNALS:\n loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError:\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig, frame):\n if self.should_exit.is_set():\n self.force_exit = True\n else:\n self.should_exit.set()\n", "path": "nonebot/drivers/_block_driver.py"}]} | 3,456 | 425 |
gh_patches_debug_33693 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-110 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Investigate why adaptation does not work well with default HMC parameters
For many models that seem to work well in Pyro with the default values for `trajectory_length` and `step_size` (see `test_mcmc.py`), the behavior in numpyro can be finicky in that either HMC / NUTS is too slow or we get wrong results, despite our tests running for many more steps than in Pyro.
</issue>
<code>
[start of numpyro/mcmc.py]
1 import math
2
3 import jax.numpy as np
4 from jax import partial, random
5 from jax.flatten_util import ravel_pytree
6 from jax.random import PRNGKey
7
8 import numpyro.distributions as dist
9 from numpyro.hmc_util import IntegratorState, build_tree, find_reasonable_step_size, velocity_verlet, warmup_adapter
10 from numpyro.util import cond, fori_loop, laxtuple
11
12 HMCState = laxtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',
13 'step_size', 'inverse_mass_matrix', 'rng'])
14
15
16 def _get_num_steps(step_size, trajectory_length):
17 num_steps = np.array(trajectory_length / step_size, dtype=np.int32)
18 return np.where(num_steps < 1, np.array(1, dtype=np.int32), num_steps)
19
20
21 def _sample_momentum(unpack_fn, inverse_mass_matrix, rng):
22 if inverse_mass_matrix.ndim == 1:
23 r = dist.norm(0., np.sqrt(np.reciprocal(inverse_mass_matrix))).rvs(random_state=rng)
24 return unpack_fn(r)
25 elif inverse_mass_matrix.ndim == 2:
26 raise NotImplementedError
27
28
29 def _euclidean_ke(inverse_mass_matrix, r):
30 r, _ = ravel_pytree(r)
31
32 if inverse_mass_matrix.ndim == 2:
33 v = np.matmul(inverse_mass_matrix, r)
34 elif inverse_mass_matrix.ndim == 1:
35 v = np.multiply(inverse_mass_matrix, r)
36
37 return 0.5 * np.dot(v, r)
38
39
40 def hmc(potential_fn, kinetic_fn=None, algo='NUTS'):
41 if kinetic_fn is None:
42 kinetic_fn = _euclidean_ke
43 vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)
44 trajectory_length = None
45 momentum_generator = None
46 wa_update = None
47
48 def init_kernel(init_samples,
49 num_warmup_steps,
50 step_size=1.0,
51 num_steps=None,
52 adapt_step_size=True,
53 adapt_mass_matrix=True,
54 diag_mass=True,
55 target_accept_prob=0.8,
56 run_warmup=True,
57 rng=PRNGKey(0)):
58 step_size = float(step_size)
59 nonlocal trajectory_length, momentum_generator, wa_update
60
61 if num_steps is None:
62 trajectory_length = 2 * math.pi
63 else:
64 trajectory_length = num_steps * step_size
65
66 z = init_samples
67 z_flat, unravel_fn = ravel_pytree(z)
68 momentum_generator = partial(_sample_momentum, unravel_fn)
69
70 find_reasonable_ss = partial(find_reasonable_step_size,
71 potential_fn, kinetic_fn, momentum_generator)
72
73 wa_init, wa_update = warmup_adapter(num_warmup_steps,
74 find_reasonable_step_size=find_reasonable_ss,
75 adapt_step_size=adapt_step_size,
76 adapt_mass_matrix=adapt_mass_matrix,
77 diag_mass=diag_mass,
78 target_accept_prob=target_accept_prob)
79
80 rng_hmc, rng_wa = random.split(rng)
81 wa_state = wa_init(z, rng_wa, step_size, mass_matrix_size=np.size(z_flat))
82 r = momentum_generator(wa_state.inverse_mass_matrix, rng)
83 vv_state = vv_init(z, r)
84 hmc_state = HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, 0, 0.,
85 wa_state.step_size, wa_state.inverse_mass_matrix, rng_hmc)
86
87 if run_warmup:
88 hmc_state, _ = fori_loop(0, num_warmup_steps, warmup_update, (hmc_state, wa_state))
89 return hmc_state
90 else:
91 return hmc_state, wa_state, warmup_update
92
93 def warmup_update(t, states):
94 hmc_state, wa_state = states
95 hmc_state = sample_kernel(hmc_state)
96 wa_state = wa_update(t, hmc_state.accept_prob, hmc_state.z, wa_state)
97 hmc_state = hmc_state.update(step_size=wa_state.step_size,
98 inverse_mass_matrix=wa_state.inverse_mass_matrix)
99 return hmc_state, wa_state
100
101 def _hmc_next(step_size, inverse_mass_matrix, vv_state, rng):
102 num_steps = _get_num_steps(step_size, trajectory_length)
103 vv_state_new = fori_loop(0, num_steps,
104 lambda i, val: vv_update(step_size, inverse_mass_matrix, val),
105 vv_state)
106 energy_old = vv_state.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state.r)
107 energy_new = vv_state_new.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state_new.r)
108 delta_energy = energy_new - energy_old
109 delta_energy = np.where(np.isnan(delta_energy), np.inf, delta_energy)
110 accept_prob = np.clip(np.exp(-delta_energy), a_max=1.0)
111 transition = random.bernoulli(rng, accept_prob)
112 vv_state = cond(transition,
113 vv_state_new, lambda state: state,
114 vv_state, lambda state: state)
115 return vv_state, num_steps, accept_prob
116
117 def _nuts_next(step_size, inverse_mass_matrix, vv_state, rng):
118 binary_tree = build_tree(vv_update, kinetic_fn, vv_state,
119 inverse_mass_matrix, step_size, rng)
120 accept_prob = binary_tree.sum_accept_probs / binary_tree.num_proposals
121 num_steps = binary_tree.num_proposals
122 vv_state = vv_state.update(z=binary_tree.z_proposal,
123 potential_energy=binary_tree.z_proposal_pe,
124 z_grad=binary_tree.z_proposal_grad)
125 return vv_state, num_steps, accept_prob
126
127 _next = _nuts_next if algo == 'NUTS' else _hmc_next
128
129 def sample_kernel(hmc_state):
130 rng, rng_momentum, rng_transition = random.split(hmc_state.rng, 3)
131 r = momentum_generator(hmc_state.inverse_mass_matrix, rng_momentum)
132 vv_state = IntegratorState(hmc_state.z, r, hmc_state.potential_energy, hmc_state.z_grad)
133 vv_state, num_steps, accept_prob = _next(hmc_state.step_size,
134 hmc_state.inverse_mass_matrix,
135 vv_state, rng_transition)
136 return HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, num_steps,
137 accept_prob, hmc_state.step_size, hmc_state.inverse_mass_matrix, rng)
138
139 return init_kernel, sample_kernel
140
[end of numpyro/mcmc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpyro/mcmc.py b/numpyro/mcmc.py
--- a/numpyro/mcmc.py
+++ b/numpyro/mcmc.py
@@ -41,28 +41,23 @@
if kinetic_fn is None:
kinetic_fn = _euclidean_ke
vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)
- trajectory_length = None
+ trajectory_len = None
momentum_generator = None
wa_update = None
def init_kernel(init_samples,
num_warmup_steps,
step_size=1.0,
- num_steps=None,
adapt_step_size=True,
adapt_mass_matrix=True,
diag_mass=True,
target_accept_prob=0.8,
+ trajectory_length=2*math.pi,
run_warmup=True,
rng=PRNGKey(0)):
step_size = float(step_size)
- nonlocal trajectory_length, momentum_generator, wa_update
-
- if num_steps is None:
- trajectory_length = 2 * math.pi
- else:
- trajectory_length = num_steps * step_size
-
+ nonlocal momentum_generator, wa_update, trajectory_len
+ trajectory_len = float(trajectory_length)
z = init_samples
z_flat, unravel_fn = ravel_pytree(z)
momentum_generator = partial(_sample_momentum, unravel_fn)
@@ -99,7 +94,7 @@
return hmc_state, wa_state
def _hmc_next(step_size, inverse_mass_matrix, vv_state, rng):
- num_steps = _get_num_steps(step_size, trajectory_length)
+ num_steps = _get_num_steps(step_size, trajectory_len)
vv_state_new = fori_loop(0, num_steps,
lambda i, val: vv_update(step_size, inverse_mass_matrix, val),
vv_state)
| {"golden_diff": "diff --git a/numpyro/mcmc.py b/numpyro/mcmc.py\n--- a/numpyro/mcmc.py\n+++ b/numpyro/mcmc.py\n@@ -41,28 +41,23 @@\n if kinetic_fn is None:\n kinetic_fn = _euclidean_ke\n vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)\n- trajectory_length = None\n+ trajectory_len = None\n momentum_generator = None\n wa_update = None\n \n def init_kernel(init_samples,\n num_warmup_steps,\n step_size=1.0,\n- num_steps=None,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n diag_mass=True,\n target_accept_prob=0.8,\n+ trajectory_length=2*math.pi,\n run_warmup=True,\n rng=PRNGKey(0)):\n step_size = float(step_size)\n- nonlocal trajectory_length, momentum_generator, wa_update\n-\n- if num_steps is None:\n- trajectory_length = 2 * math.pi\n- else:\n- trajectory_length = num_steps * step_size\n-\n+ nonlocal momentum_generator, wa_update, trajectory_len\n+ trajectory_len = float(trajectory_length)\n z = init_samples\n z_flat, unravel_fn = ravel_pytree(z)\n momentum_generator = partial(_sample_momentum, unravel_fn)\n@@ -99,7 +94,7 @@\n return hmc_state, wa_state\n \n def _hmc_next(step_size, inverse_mass_matrix, vv_state, rng):\n- num_steps = _get_num_steps(step_size, trajectory_length)\n+ num_steps = _get_num_steps(step_size, trajectory_len)\n vv_state_new = fori_loop(0, num_steps,\n lambda i, val: vv_update(step_size, inverse_mass_matrix, val),\n vv_state)\n", "issue": "Investigate why adaptation does not work well with default HMC parameters\nFor many models that seem to work well in Pyro with the default values for `trajectory_length` and `step_size` (see `test_mcmc.py`), the behavior in numpyro can be finicky in that either HMC / NUTS is too slow or we get wrong results, despite our tests running for many more steps than in Pyro. \n", "before_files": [{"content": "import math\n\nimport jax.numpy as np\nfrom jax import partial, random\nfrom jax.flatten_util import ravel_pytree\nfrom jax.random import PRNGKey\n\nimport numpyro.distributions as dist\nfrom numpyro.hmc_util import IntegratorState, build_tree, find_reasonable_step_size, velocity_verlet, warmup_adapter\nfrom numpyro.util import cond, fori_loop, laxtuple\n\nHMCState = laxtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',\n 'step_size', 'inverse_mass_matrix', 'rng'])\n\n\ndef _get_num_steps(step_size, trajectory_length):\n num_steps = np.array(trajectory_length / step_size, dtype=np.int32)\n return np.where(num_steps < 1, np.array(1, dtype=np.int32), num_steps)\n\n\ndef _sample_momentum(unpack_fn, inverse_mass_matrix, rng):\n if inverse_mass_matrix.ndim == 1:\n r = dist.norm(0., np.sqrt(np.reciprocal(inverse_mass_matrix))).rvs(random_state=rng)\n return unpack_fn(r)\n elif inverse_mass_matrix.ndim == 2:\n raise NotImplementedError\n\n\ndef _euclidean_ke(inverse_mass_matrix, r):\n r, _ = ravel_pytree(r)\n\n if inverse_mass_matrix.ndim == 2:\n v = np.matmul(inverse_mass_matrix, r)\n elif inverse_mass_matrix.ndim == 1:\n v = np.multiply(inverse_mass_matrix, r)\n\n return 0.5 * np.dot(v, r)\n\n\ndef hmc(potential_fn, kinetic_fn=None, algo='NUTS'):\n if kinetic_fn is None:\n kinetic_fn = _euclidean_ke\n vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)\n trajectory_length = None\n momentum_generator = None\n wa_update = None\n\n def init_kernel(init_samples,\n num_warmup_steps,\n step_size=1.0,\n num_steps=None,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n diag_mass=True,\n target_accept_prob=0.8,\n run_warmup=True,\n rng=PRNGKey(0)):\n step_size = float(step_size)\n nonlocal trajectory_length, momentum_generator, wa_update\n\n if num_steps is None:\n trajectory_length = 2 * math.pi\n else:\n trajectory_length = num_steps * step_size\n\n z = init_samples\n z_flat, unravel_fn = ravel_pytree(z)\n momentum_generator = partial(_sample_momentum, unravel_fn)\n\n find_reasonable_ss = partial(find_reasonable_step_size,\n potential_fn, kinetic_fn, momentum_generator)\n\n wa_init, wa_update = warmup_adapter(num_warmup_steps,\n find_reasonable_step_size=find_reasonable_ss,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n diag_mass=diag_mass,\n target_accept_prob=target_accept_prob)\n\n rng_hmc, rng_wa = random.split(rng)\n wa_state = wa_init(z, rng_wa, step_size, mass_matrix_size=np.size(z_flat))\n r = momentum_generator(wa_state.inverse_mass_matrix, rng)\n vv_state = vv_init(z, r)\n hmc_state = HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, 0, 0.,\n wa_state.step_size, wa_state.inverse_mass_matrix, rng_hmc)\n\n if run_warmup:\n hmc_state, _ = fori_loop(0, num_warmup_steps, warmup_update, (hmc_state, wa_state))\n return hmc_state\n else:\n return hmc_state, wa_state, warmup_update\n\n def warmup_update(t, states):\n hmc_state, wa_state = states\n hmc_state = sample_kernel(hmc_state)\n wa_state = wa_update(t, hmc_state.accept_prob, hmc_state.z, wa_state)\n hmc_state = hmc_state.update(step_size=wa_state.step_size,\n inverse_mass_matrix=wa_state.inverse_mass_matrix)\n return hmc_state, wa_state\n\n def _hmc_next(step_size, inverse_mass_matrix, vv_state, rng):\n num_steps = _get_num_steps(step_size, trajectory_length)\n vv_state_new = fori_loop(0, num_steps,\n lambda i, val: vv_update(step_size, inverse_mass_matrix, val),\n vv_state)\n energy_old = vv_state.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state.r)\n energy_new = vv_state_new.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state_new.r)\n delta_energy = energy_new - energy_old\n delta_energy = np.where(np.isnan(delta_energy), np.inf, delta_energy)\n accept_prob = np.clip(np.exp(-delta_energy), a_max=1.0)\n transition = random.bernoulli(rng, accept_prob)\n vv_state = cond(transition,\n vv_state_new, lambda state: state,\n vv_state, lambda state: state)\n return vv_state, num_steps, accept_prob\n\n def _nuts_next(step_size, inverse_mass_matrix, vv_state, rng):\n binary_tree = build_tree(vv_update, kinetic_fn, vv_state,\n inverse_mass_matrix, step_size, rng)\n accept_prob = binary_tree.sum_accept_probs / binary_tree.num_proposals\n num_steps = binary_tree.num_proposals\n vv_state = vv_state.update(z=binary_tree.z_proposal,\n potential_energy=binary_tree.z_proposal_pe,\n z_grad=binary_tree.z_proposal_grad)\n return vv_state, num_steps, accept_prob\n\n _next = _nuts_next if algo == 'NUTS' else _hmc_next\n\n def sample_kernel(hmc_state):\n rng, rng_momentum, rng_transition = random.split(hmc_state.rng, 3)\n r = momentum_generator(hmc_state.inverse_mass_matrix, rng_momentum)\n vv_state = IntegratorState(hmc_state.z, r, hmc_state.potential_energy, hmc_state.z_grad)\n vv_state, num_steps, accept_prob = _next(hmc_state.step_size,\n hmc_state.inverse_mass_matrix,\n vv_state, rng_transition)\n return HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, num_steps,\n accept_prob, hmc_state.step_size, hmc_state.inverse_mass_matrix, rng)\n\n return init_kernel, sample_kernel\n", "path": "numpyro/mcmc.py"}]} | 2,364 | 405 |
gh_patches_debug_857 | rasdani/github-patches | git_diff | pypi__warehouse-3598 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set samesite=lax on session cookies
This is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well.
</issue>
<code>
[start of warehouse/sessions.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import functools
14 import time
15
16 import msgpack
17 import msgpack.exceptions
18 import redis
19
20 from pyramid import viewderivers
21 from pyramid.interfaces import ISession, ISessionFactory
22 from zope.interface import implementer
23
24 from warehouse.cache.http import add_vary
25 from warehouse.utils import crypto
26
27
28 def _invalid_method(method):
29 @functools.wraps(method)
30 def wrapped(self, *args, **kwargs):
31 self._error_message()
32 return wrapped
33
34
35 @implementer(ISession)
36 class InvalidSession(dict):
37
38 __contains__ = _invalid_method(dict.__contains__)
39 __delitem__ = _invalid_method(dict.__delitem__)
40 __getitem__ = _invalid_method(dict.__getitem__)
41 __iter__ = _invalid_method(dict.__iter__)
42 __len__ = _invalid_method(dict.__len__)
43 __setitem__ = _invalid_method(dict.__setitem__)
44 clear = _invalid_method(dict.clear)
45 copy = _invalid_method(dict.copy)
46 fromkeys = _invalid_method(dict.fromkeys)
47 get = _invalid_method(dict.get)
48 items = _invalid_method(dict.items)
49 keys = _invalid_method(dict.keys)
50 pop = _invalid_method(dict.pop)
51 popitem = _invalid_method(dict.popitem)
52 setdefault = _invalid_method(dict.setdefault)
53 update = _invalid_method(dict.update)
54 values = _invalid_method(dict.values)
55
56 def _error_message(self):
57 raise RuntimeError(
58 "Cannot use request.session in a view without uses_session=True."
59 )
60
61 def __getattr__(self, name):
62 self._error_message()
63
64 @property
65 def created(self):
66 self._error_message()
67
68
69 def _changed_method(method):
70 @functools.wraps(method)
71 def wrapped(self, *args, **kwargs):
72 self.changed()
73 return method(self, *args, **kwargs)
74 return wrapped
75
76
77 @implementer(ISession)
78 class Session(dict):
79
80 _csrf_token_key = "_csrf_token"
81 _flash_key = "_flash_messages"
82
83 # A number of our methods need to be decorated so that they also call
84 # self.changed()
85 __delitem__ = _changed_method(dict.__delitem__)
86 __setitem__ = _changed_method(dict.__setitem__)
87 clear = _changed_method(dict.clear)
88 pop = _changed_method(dict.pop)
89 popitem = _changed_method(dict.popitem)
90 setdefault = _changed_method(dict.setdefault)
91 update = _changed_method(dict.update)
92
93 def __init__(self, data=None, session_id=None, new=True):
94 # Brand new sessions don't have any data, so we'll just create an empty
95 # dictionary for them.
96 if data is None:
97 data = {}
98
99 # Initialize our actual dictionary here.
100 super().__init__(data)
101
102 # We need to track the state of our Session.
103 self._sid = session_id
104 self._changed = False
105 self.new = new
106 self.created = int(time.time())
107
108 # We'll track all of the IDs that have been invalidated here
109 self.invalidated = set()
110
111 @property
112 def sid(self):
113 if self._sid is None:
114 self._sid = crypto.random_token()
115 return self._sid
116
117 def changed(self):
118 self._changed = True
119
120 def invalidate(self):
121 self.clear()
122 self.new = True
123 self.created = int(time.time())
124 self._changed = False
125
126 # If the current session id isn't None we'll want to record it as one
127 # of the ones that have been invalidated.
128 if self._sid is not None:
129 self.invalidated.add(self._sid)
130 self._sid = None
131
132 def should_save(self):
133 return self._changed
134
135 # Flash Messages Methods
136 def _get_flash_queue_key(self, queue):
137 return ".".join(filter(None, [self._flash_key, queue]))
138
139 def flash(self, msg, queue="", allow_duplicate=True):
140 queue_key = self._get_flash_queue_key(queue)
141
142 # If we're not allowing duplicates check if this message is already
143 # in the queue, and if it is just return immediately.
144 if not allow_duplicate and msg in self[queue_key]:
145 return
146
147 self.setdefault(queue_key, []).append(msg)
148
149 def peek_flash(self, queue=""):
150 return self.get(self._get_flash_queue_key(queue), [])
151
152 def pop_flash(self, queue=""):
153 queue_key = self._get_flash_queue_key(queue)
154 messages = self.get(queue_key, [])
155 self.pop(queue_key, None)
156 return messages
157
158 # CSRF Methods
159 def new_csrf_token(self):
160 self[self._csrf_token_key] = crypto.random_token()
161 return self[self._csrf_token_key]
162
163 def get_csrf_token(self):
164 token = self.get(self._csrf_token_key)
165 if token is None:
166 token = self.new_csrf_token()
167 return token
168
169
170 @implementer(ISessionFactory)
171 class SessionFactory:
172
173 cookie_name = "session_id"
174 max_age = 12 * 60 * 60 # 12 hours
175
176 def __init__(self, secret, url):
177 self.redis = redis.StrictRedis.from_url(url)
178 self.signer = crypto.TimestampSigner(secret, salt="session")
179
180 def __call__(self, request):
181 return self._process_request(request)
182
183 def _redis_key(self, session_id):
184 return "warehouse/session/data/{}".format(session_id)
185
186 def _process_request(self, request):
187 # Register a callback with the request so we can save the session once
188 # it's finished.
189 request.add_response_callback(self._process_response)
190
191 # Load our session ID from the request.
192 session_id = request.cookies.get(self.cookie_name)
193
194 # If we do not have a session ID then we'll just use a new empty
195 # session.
196 if session_id is None:
197 return Session()
198
199 # Check to make sure we have a valid session id
200 try:
201 session_id = self.signer.unsign(session_id, max_age=self.max_age)
202 session_id = session_id.decode("utf8")
203 except crypto.BadSignature:
204 return Session()
205
206 # Fetch the serialized data from redis
207 bdata = self.redis.get(self._redis_key(session_id))
208
209 # If the session didn't exist in redis, we'll give the user a new
210 # session.
211 if bdata is None:
212 return Session()
213
214 # De-serialize our session data
215 try:
216 data = msgpack.unpackb(bdata, encoding="utf8", use_list=True)
217 except (msgpack.exceptions.UnpackException,
218 msgpack.exceptions.ExtraData):
219 # If the session data was invalid we'll give the user a new session
220 return Session()
221
222 # If we were able to load existing session data, load it into a
223 # Session class
224 session = Session(data, session_id, False)
225
226 return session
227
228 def _process_response(self, request, response):
229 # If the request has an InvalidSession, then the view can't have
230 # accessed the session, and we can just skip all of this anyways.
231 if isinstance(request.session, InvalidSession):
232 return
233
234 # Check to see if the session has been marked to be deleted, if it has
235 # benn then we'll delete it, and tell our response to delete the
236 # session cookie as well.
237 if request.session.invalidated:
238 for session_id in request.session.invalidated:
239 self.redis.delete(self._redis_key(session_id))
240
241 if not request.session.should_save():
242 response.delete_cookie(self.cookie_name)
243
244 # Check to see if the session has been marked to be saved, generally
245 # this means that the session data has been modified and thus we need
246 # to store the new data.
247 if request.session.should_save():
248 # Save our session in Redis
249 self.redis.setex(
250 self._redis_key(request.session.sid),
251 self.max_age,
252 msgpack.packb(
253 request.session,
254 encoding="utf8",
255 use_bin_type=True,
256 ),
257 )
258
259 # Send our session cookie to the client
260 response.set_cookie(
261 self.cookie_name,
262 self.signer.sign(request.session.sid.encode("utf8")),
263 max_age=self.max_age,
264 httponly=True,
265 secure=request.scheme == "https",
266 samesite=b"lax"
267 )
268
269
270 def session_view(view, info):
271 if info.options.get("uses_session"):
272 # If we're using the session, then we'll just return the original view
273 # with a small wrapper around it to ensure that it has a Vary: Cookie
274 # header.
275 return add_vary("Cookie")(view)
276 elif info.exception_only:
277 return view
278 else:
279 # If we're not using the session on this view, then we'll wrap the view
280 # with a wrapper that just ensures that the session cannot be used.
281 @functools.wraps(view)
282 def wrapped(context, request):
283 # This whole method is a little bit of an odd duck, we want to make
284 # sure that we don't actually *access* request.session, because
285 # doing so triggers the machinery to create a new session. So
286 # instead we will dig into the request object __dict__ to
287 # effectively do the same thing, jsut without triggering an access
288 # on request.session.
289
290 # Save the original session so that we can restore it once the
291 # inner views have been called.
292 nothing = object()
293 original_session = request.__dict__.get("session", nothing)
294
295 # This particular view hasn't been set to allow access to the
296 # session, so we'll just assign an InvalidSession to
297 # request.session
298 request.__dict__["session"] = InvalidSession()
299
300 try:
301 # Invoke the real view
302 return view(context, request)
303 finally:
304 # Restore the original session so that things like
305 # pyramid_debugtoolbar can access it.
306 if original_session is nothing:
307 del request.__dict__["session"]
308 else:
309 request.__dict__["session"] = original_session
310
311 return wrapped
312
313
314 session_view.options = {"uses_session"}
315
316
317 def includeme(config):
318 config.set_session_factory(
319 SessionFactory(
320 config.registry.settings["sessions.secret"],
321 config.registry.settings["sessions.url"],
322 ),
323 )
324
325 config.add_view_deriver(
326 session_view,
327 over="csrf_view",
328 under=viewderivers.INGRESS,
329 )
330
[end of warehouse/sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/sessions.py b/warehouse/sessions.py
--- a/warehouse/sessions.py
+++ b/warehouse/sessions.py
@@ -263,7 +263,6 @@
max_age=self.max_age,
httponly=True,
secure=request.scheme == "https",
- samesite=b"lax"
)
| {"golden_diff": "diff --git a/warehouse/sessions.py b/warehouse/sessions.py\n--- a/warehouse/sessions.py\n+++ b/warehouse/sessions.py\n@@ -263,7 +263,6 @@\n max_age=self.max_age,\n httponly=True,\n secure=request.scheme == \"https\",\n- samesite=b\"lax\"\n )\n", "issue": "Set samesite=lax on session cookies\nThis is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport time\n\nimport msgpack\nimport msgpack.exceptions\nimport redis\n\nfrom pyramid import viewderivers\nfrom pyramid.interfaces import ISession, ISessionFactory\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary\nfrom warehouse.utils import crypto\n\n\ndef _invalid_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self._error_message()\n return wrapped\n\n\n@implementer(ISession)\nclass InvalidSession(dict):\n\n __contains__ = _invalid_method(dict.__contains__)\n __delitem__ = _invalid_method(dict.__delitem__)\n __getitem__ = _invalid_method(dict.__getitem__)\n __iter__ = _invalid_method(dict.__iter__)\n __len__ = _invalid_method(dict.__len__)\n __setitem__ = _invalid_method(dict.__setitem__)\n clear = _invalid_method(dict.clear)\n copy = _invalid_method(dict.copy)\n fromkeys = _invalid_method(dict.fromkeys)\n get = _invalid_method(dict.get)\n items = _invalid_method(dict.items)\n keys = _invalid_method(dict.keys)\n pop = _invalid_method(dict.pop)\n popitem = _invalid_method(dict.popitem)\n setdefault = _invalid_method(dict.setdefault)\n update = _invalid_method(dict.update)\n values = _invalid_method(dict.values)\n\n def _error_message(self):\n raise RuntimeError(\n \"Cannot use request.session in a view without uses_session=True.\"\n )\n\n def __getattr__(self, name):\n self._error_message()\n\n @property\n def created(self):\n self._error_message()\n\n\ndef _changed_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self.changed()\n return method(self, *args, **kwargs)\n return wrapped\n\n\n@implementer(ISession)\nclass Session(dict):\n\n _csrf_token_key = \"_csrf_token\"\n _flash_key = \"_flash_messages\"\n\n # A number of our methods need to be decorated so that they also call\n # self.changed()\n __delitem__ = _changed_method(dict.__delitem__)\n __setitem__ = _changed_method(dict.__setitem__)\n clear = _changed_method(dict.clear)\n pop = _changed_method(dict.pop)\n popitem = _changed_method(dict.popitem)\n setdefault = _changed_method(dict.setdefault)\n update = _changed_method(dict.update)\n\n def __init__(self, data=None, session_id=None, new=True):\n # Brand new sessions don't have any data, so we'll just create an empty\n # dictionary for them.\n if data is None:\n data = {}\n\n # Initialize our actual dictionary here.\n super().__init__(data)\n\n # We need to track the state of our Session.\n self._sid = session_id\n self._changed = False\n self.new = new\n self.created = int(time.time())\n\n # We'll track all of the IDs that have been invalidated here\n self.invalidated = set()\n\n @property\n def sid(self):\n if self._sid is None:\n self._sid = crypto.random_token()\n return self._sid\n\n def changed(self):\n self._changed = True\n\n def invalidate(self):\n self.clear()\n self.new = True\n self.created = int(time.time())\n self._changed = False\n\n # If the current session id isn't None we'll want to record it as one\n # of the ones that have been invalidated.\n if self._sid is not None:\n self.invalidated.add(self._sid)\n self._sid = None\n\n def should_save(self):\n return self._changed\n\n # Flash Messages Methods\n def _get_flash_queue_key(self, queue):\n return \".\".join(filter(None, [self._flash_key, queue]))\n\n def flash(self, msg, queue=\"\", allow_duplicate=True):\n queue_key = self._get_flash_queue_key(queue)\n\n # If we're not allowing duplicates check if this message is already\n # in the queue, and if it is just return immediately.\n if not allow_duplicate and msg in self[queue_key]:\n return\n\n self.setdefault(queue_key, []).append(msg)\n\n def peek_flash(self, queue=\"\"):\n return self.get(self._get_flash_queue_key(queue), [])\n\n def pop_flash(self, queue=\"\"):\n queue_key = self._get_flash_queue_key(queue)\n messages = self.get(queue_key, [])\n self.pop(queue_key, None)\n return messages\n\n # CSRF Methods\n def new_csrf_token(self):\n self[self._csrf_token_key] = crypto.random_token()\n return self[self._csrf_token_key]\n\n def get_csrf_token(self):\n token = self.get(self._csrf_token_key)\n if token is None:\n token = self.new_csrf_token()\n return token\n\n\n@implementer(ISessionFactory)\nclass SessionFactory:\n\n cookie_name = \"session_id\"\n max_age = 12 * 60 * 60 # 12 hours\n\n def __init__(self, secret, url):\n self.redis = redis.StrictRedis.from_url(url)\n self.signer = crypto.TimestampSigner(secret, salt=\"session\")\n\n def __call__(self, request):\n return self._process_request(request)\n\n def _redis_key(self, session_id):\n return \"warehouse/session/data/{}\".format(session_id)\n\n def _process_request(self, request):\n # Register a callback with the request so we can save the session once\n # it's finished.\n request.add_response_callback(self._process_response)\n\n # Load our session ID from the request.\n session_id = request.cookies.get(self.cookie_name)\n\n # If we do not have a session ID then we'll just use a new empty\n # session.\n if session_id is None:\n return Session()\n\n # Check to make sure we have a valid session id\n try:\n session_id = self.signer.unsign(session_id, max_age=self.max_age)\n session_id = session_id.decode(\"utf8\")\n except crypto.BadSignature:\n return Session()\n\n # Fetch the serialized data from redis\n bdata = self.redis.get(self._redis_key(session_id))\n\n # If the session didn't exist in redis, we'll give the user a new\n # session.\n if bdata is None:\n return Session()\n\n # De-serialize our session data\n try:\n data = msgpack.unpackb(bdata, encoding=\"utf8\", use_list=True)\n except (msgpack.exceptions.UnpackException,\n msgpack.exceptions.ExtraData):\n # If the session data was invalid we'll give the user a new session\n return Session()\n\n # If we were able to load existing session data, load it into a\n # Session class\n session = Session(data, session_id, False)\n\n return session\n\n def _process_response(self, request, response):\n # If the request has an InvalidSession, then the view can't have\n # accessed the session, and we can just skip all of this anyways.\n if isinstance(request.session, InvalidSession):\n return\n\n # Check to see if the session has been marked to be deleted, if it has\n # benn then we'll delete it, and tell our response to delete the\n # session cookie as well.\n if request.session.invalidated:\n for session_id in request.session.invalidated:\n self.redis.delete(self._redis_key(session_id))\n\n if not request.session.should_save():\n response.delete_cookie(self.cookie_name)\n\n # Check to see if the session has been marked to be saved, generally\n # this means that the session data has been modified and thus we need\n # to store the new data.\n if request.session.should_save():\n # Save our session in Redis\n self.redis.setex(\n self._redis_key(request.session.sid),\n self.max_age,\n msgpack.packb(\n request.session,\n encoding=\"utf8\",\n use_bin_type=True,\n ),\n )\n\n # Send our session cookie to the client\n response.set_cookie(\n self.cookie_name,\n self.signer.sign(request.session.sid.encode(\"utf8\")),\n max_age=self.max_age,\n httponly=True,\n secure=request.scheme == \"https\",\n samesite=b\"lax\"\n )\n\n\ndef session_view(view, info):\n if info.options.get(\"uses_session\"):\n # If we're using the session, then we'll just return the original view\n # with a small wrapper around it to ensure that it has a Vary: Cookie\n # header.\n return add_vary(\"Cookie\")(view)\n elif info.exception_only:\n return view\n else:\n # If we're not using the session on this view, then we'll wrap the view\n # with a wrapper that just ensures that the session cannot be used.\n @functools.wraps(view)\n def wrapped(context, request):\n # This whole method is a little bit of an odd duck, we want to make\n # sure that we don't actually *access* request.session, because\n # doing so triggers the machinery to create a new session. So\n # instead we will dig into the request object __dict__ to\n # effectively do the same thing, jsut without triggering an access\n # on request.session.\n\n # Save the original session so that we can restore it once the\n # inner views have been called.\n nothing = object()\n original_session = request.__dict__.get(\"session\", nothing)\n\n # This particular view hasn't been set to allow access to the\n # session, so we'll just assign an InvalidSession to\n # request.session\n request.__dict__[\"session\"] = InvalidSession()\n\n try:\n # Invoke the real view\n return view(context, request)\n finally:\n # Restore the original session so that things like\n # pyramid_debugtoolbar can access it.\n if original_session is nothing:\n del request.__dict__[\"session\"]\n else:\n request.__dict__[\"session\"] = original_session\n\n return wrapped\n\n\nsession_view.options = {\"uses_session\"}\n\n\ndef includeme(config):\n config.set_session_factory(\n SessionFactory(\n config.registry.settings[\"sessions.secret\"],\n config.registry.settings[\"sessions.url\"],\n ),\n )\n\n config.add_view_deriver(\n session_view,\n over=\"csrf_view\",\n under=viewderivers.INGRESS,\n )\n", "path": "warehouse/sessions.py"}]} | 3,864 | 79 |
gh_patches_debug_58664 | rasdani/github-patches | git_diff | jazzband__pip-tools-12 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Python versions lower than 2.7, too
</issue>
<code>
[start of setup.py]
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 import sys
5 from setuptools import setup
6
7
8 setup(
9 name='pip-tools',
10 version='0.2',
11 url='https://github.com/nvie/pip-tools/',
12 license='BSD',
13 author='Vincent Driessen',
14 author_email='[email protected]',
15 description=__doc__,
16 #packages=[],
17 scripts=['bin/pip-review', 'bin/pip-dump'],
18 #include_package_data=True,
19 zip_safe=False,
20 platforms='any',
21 #install_requires=[],
22 classifiers=[
23 # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
24 #'Development Status :: 1 - Planning',
25 #'Development Status :: 2 - Pre-Alpha',
26 #'Development Status :: 3 - Alpha',
27 'Development Status :: 4 - Beta',
28 #'Development Status :: 5 - Production/Stable',
29 #'Development Status :: 6 - Mature',
30 #'Development Status :: 7 - Inactive',
31 'Intended Audience :: Developers',
32 'Intended Audience :: System Administrators',
33 'License :: OSI Approved :: BSD License',
34 'Operating System :: OS Independent',
35 'Topic :: System :: Systems Administration',
36 ]
37 )
38
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
#include_package_data=True,
zip_safe=False,
platforms='any',
- #install_requires=[],
+ install_requires=['argparse==1.2.1'], # needed for python 2.6
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
#'Development Status :: 1 - Planning',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n- #install_requires=[],\n+ install_requires=['argparse==1.2.1'], # needed for python 2.6\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n", "issue": "Support Python versions lower than 2.7, too\n\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\nsetup(\n name='pip-tools',\n version='0.2',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n #install_requires=[],\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]} | 877 | 114 |
gh_patches_debug_21744 | rasdani/github-patches | git_diff | networkx__networkx-6558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
all_node_cuts returns incorrect cuts
The function `all_node_cuts` returns node cut-sets for the K2 (complete graph with two nodes), but there exist no.
### Current Behavior
Consider the following graph, consisting of two nodes connected by an edge:
```python
graph = nx.Graph()
graph.add_edge(1,2)
list(all_node_cuts(graph)
>>> [{1}, {2}]
```
This graph has no node cut-sets because removing any vertex does not increase the number of connected components
### Expected Behavior
Return no node cut-sets:
```python
graph = nx.Graph()
graph.add_edge(1,2)
list(all_node_cuts(graph)
>>> []
```
### Steps to Reproduce
The above example
### Environment
Python version: 3.10.10
NetworkX version: 2.6.3
</issue>
<code>
[start of networkx/algorithms/connectivity/kcutsets.py]
1 """
2 Kanevsky all minimum node k cutsets algorithm.
3 """
4 import copy
5 from collections import defaultdict
6 from itertools import combinations
7 from operator import itemgetter
8
9 import networkx as nx
10 from networkx.algorithms.flow import (
11 build_residual_network,
12 edmonds_karp,
13 shortest_augmenting_path,
14 )
15
16 from .utils import build_auxiliary_node_connectivity
17
18 default_flow_func = edmonds_karp
19
20
21 __all__ = ["all_node_cuts"]
22
23
24 def all_node_cuts(G, k=None, flow_func=None):
25 r"""Returns all minimum k cutsets of an undirected graph G.
26
27 This implementation is based on Kanevsky's algorithm [1]_ for finding all
28 minimum-size node cut-sets of an undirected graph G; ie the set (or sets)
29 of nodes of cardinality equal to the node connectivity of G. Thus if
30 removed, would break G into two or more connected components.
31
32 Parameters
33 ----------
34 G : NetworkX graph
35 Undirected graph
36
37 k : Integer
38 Node connectivity of the input graph. If k is None, then it is
39 computed. Default value: None.
40
41 flow_func : function
42 Function to perform the underlying flow computations. Default value is
43 :func:`~networkx.algorithms.flow.edmonds_karp`. This function performs
44 better in sparse graphs with right tailed degree distributions.
45 :func:`~networkx.algorithms.flow.shortest_augmenting_path` will
46 perform better in denser graphs.
47
48
49 Returns
50 -------
51 cuts : a generator of node cutsets
52 Each node cutset has cardinality equal to the node connectivity of
53 the input graph.
54
55 Examples
56 --------
57 >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2
58 >>> G = nx.grid_2d_graph(5, 5)
59 >>> cutsets = list(nx.all_node_cuts(G))
60 >>> len(cutsets)
61 4
62 >>> all(2 == len(cutset) for cutset in cutsets)
63 True
64 >>> nx.node_connectivity(G)
65 2
66
67 Notes
68 -----
69 This implementation is based on the sequential algorithm for finding all
70 minimum-size separating vertex sets in a graph [1]_. The main idea is to
71 compute minimum cuts using local maximum flow computations among a set
72 of nodes of highest degree and all other non-adjacent nodes in the Graph.
73 Once we find a minimum cut, we add an edge between the high degree
74 node and the target node of the local maximum flow computation to make
75 sure that we will not find that minimum cut again.
76
77 See also
78 --------
79 node_connectivity
80 edmonds_karp
81 shortest_augmenting_path
82
83 References
84 ----------
85 .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex
86 sets in a graph. Networks 23(6), 533--541.
87 http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract
88
89 """
90 if not nx.is_connected(G):
91 raise nx.NetworkXError("Input graph is disconnected.")
92
93 # Address some corner cases first.
94 # For complete Graphs
95 if nx.density(G) == 1:
96 for cut_set in combinations(G, len(G) - 1):
97 yield set(cut_set)
98 return
99 # Initialize data structures.
100 # Keep track of the cuts already computed so we do not repeat them.
101 seen = []
102 # Even-Tarjan reduction is what we call auxiliary digraph
103 # for node connectivity.
104 H = build_auxiliary_node_connectivity(G)
105 H_nodes = H.nodes # for speed
106 mapping = H.graph["mapping"]
107 # Keep a copy of original predecessors, H will be modified later.
108 # Shallow copy is enough.
109 original_H_pred = copy.copy(H._pred)
110 R = build_residual_network(H, "capacity")
111 kwargs = {"capacity": "capacity", "residual": R}
112 # Define default flow function
113 if flow_func is None:
114 flow_func = default_flow_func
115 if flow_func is shortest_augmenting_path:
116 kwargs["two_phase"] = True
117 # Begin the actual algorithm
118 # step 1: Find node connectivity k of G
119 if k is None:
120 k = nx.node_connectivity(G, flow_func=flow_func)
121 # step 2:
122 # Find k nodes with top degree, call it X:
123 X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}
124 # Check if X is a k-node-cutset
125 if _is_separating_set(G, X):
126 seen.append(X)
127 yield X
128
129 for x in X:
130 # step 3: Compute local connectivity flow of x with all other
131 # non adjacent nodes in G
132 non_adjacent = set(G) - X - set(G[x])
133 for v in non_adjacent:
134 # step 4: compute maximum flow in an Even-Tarjan reduction H of G
135 # and step 5: build the associated residual network R
136 R = flow_func(H, f"{mapping[x]}B", f"{mapping[v]}A", **kwargs)
137 flow_value = R.graph["flow_value"]
138
139 if flow_value == k:
140 # Find the nodes incident to the flow.
141 E1 = flowed_edges = [
142 (u, w) for (u, w, d) in R.edges(data=True) if d["flow"] != 0
143 ]
144 VE1 = incident_nodes = {n for edge in E1 for n in edge}
145 # Remove saturated edges form the residual network.
146 # Note that reversed edges are introduced with capacity 0
147 # in the residual graph and they need to be removed too.
148 saturated_edges = [
149 (u, w, d)
150 for (u, w, d) in R.edges(data=True)
151 if d["capacity"] == d["flow"] or d["capacity"] == 0
152 ]
153 R.remove_edges_from(saturated_edges)
154 R_closure = nx.transitive_closure(R)
155 # step 6: shrink the strongly connected components of
156 # residual flow network R and call it L.
157 L = nx.condensation(R)
158 cmap = L.graph["mapping"]
159 inv_cmap = defaultdict(list)
160 for n, scc in cmap.items():
161 inv_cmap[scc].append(n)
162 # Find the incident nodes in the condensed graph.
163 VE1 = {cmap[n] for n in VE1}
164 # step 7: Compute all antichains of L;
165 # they map to closed sets in H.
166 # Any edge in H that links a closed set is part of a cutset.
167 for antichain in nx.antichains(L):
168 # Only antichains that are subsets of incident nodes counts.
169 # Lemma 8 in reference.
170 if not set(antichain).issubset(VE1):
171 continue
172 # Nodes in an antichain of the condensation graph of
173 # the residual network map to a closed set of nodes that
174 # define a node partition of the auxiliary digraph H
175 # through taking all of antichain's predecessors in the
176 # transitive closure.
177 S = set()
178 for scc in antichain:
179 S.update(inv_cmap[scc])
180 S_ancestors = set()
181 for n in S:
182 S_ancestors.update(R_closure._pred[n])
183 S.update(S_ancestors)
184 if f"{mapping[x]}B" not in S or f"{mapping[v]}A" in S:
185 continue
186 # Find the cutset that links the node partition (S,~S) in H
187 cutset = set()
188 for u in S:
189 cutset.update((u, w) for w in original_H_pred[u] if w not in S)
190 # The edges in H that form the cutset are internal edges
191 # (ie edges that represent a node of the original graph G)
192 if any(H_nodes[u]["id"] != H_nodes[w]["id"] for u, w in cutset):
193 continue
194 node_cut = {H_nodes[u]["id"] for u, _ in cutset}
195
196 if len(node_cut) == k:
197 # The cut is invalid if it includes internal edges of
198 # end nodes. The other half of Lemma 8 in ref.
199 if x in node_cut or v in node_cut:
200 continue
201 if node_cut not in seen:
202 yield node_cut
203 seen.append(node_cut)
204
205 # Add an edge (x, v) to make sure that we do not
206 # find this cutset again. This is equivalent
207 # of adding the edge in the input graph
208 # G.add_edge(x, v) and then regenerate H and R:
209 # Add edges to the auxiliary digraph.
210 # See build_residual_network for convention we used
211 # in residual graphs.
212 H.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1)
213 H.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1)
214 # Add edges to the residual network.
215 R.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1)
216 R.add_edge(f"{mapping[v]}A", f"{mapping[x]}B", capacity=0)
217 R.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1)
218 R.add_edge(f"{mapping[x]}A", f"{mapping[v]}B", capacity=0)
219
220 # Add again the saturated edges to reuse the residual network
221 R.add_edges_from(saturated_edges)
222
223
224 def _is_separating_set(G, cut):
225 """Assumes that the input graph is connected"""
226 if len(cut) == len(G) - 1:
227 return True
228
229 H = nx.restricted_view(G, cut, [])
230 if nx.is_connected(H):
231 return False
232 return True
233
[end of networkx/algorithms/connectivity/kcutsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/algorithms/connectivity/kcutsets.py b/networkx/algorithms/connectivity/kcutsets.py
--- a/networkx/algorithms/connectivity/kcutsets.py
+++ b/networkx/algorithms/connectivity/kcutsets.py
@@ -92,10 +92,11 @@
# Address some corner cases first.
# For complete Graphs
+
if nx.density(G) == 1:
- for cut_set in combinations(G, len(G) - 1):
- yield set(cut_set)
+ yield from ()
return
+
# Initialize data structures.
# Keep track of the cuts already computed so we do not repeat them.
seen = []
@@ -129,7 +130,7 @@
for x in X:
# step 3: Compute local connectivity flow of x with all other
# non adjacent nodes in G
- non_adjacent = set(G) - X - set(G[x])
+ non_adjacent = set(G) - {x} - set(G[x])
for v in non_adjacent:
# step 4: compute maximum flow in an Even-Tarjan reduction H of G
# and step 5: build the associated residual network R
| {"golden_diff": "diff --git a/networkx/algorithms/connectivity/kcutsets.py b/networkx/algorithms/connectivity/kcutsets.py\n--- a/networkx/algorithms/connectivity/kcutsets.py\n+++ b/networkx/algorithms/connectivity/kcutsets.py\n@@ -92,10 +92,11 @@\n \n # Address some corner cases first.\n # For complete Graphs\n+\n if nx.density(G) == 1:\n- for cut_set in combinations(G, len(G) - 1):\n- yield set(cut_set)\n+ yield from ()\n return\n+\n # Initialize data structures.\n # Keep track of the cuts already computed so we do not repeat them.\n seen = []\n@@ -129,7 +130,7 @@\n for x in X:\n # step 3: Compute local connectivity flow of x with all other\n # non adjacent nodes in G\n- non_adjacent = set(G) - X - set(G[x])\n+ non_adjacent = set(G) - {x} - set(G[x])\n for v in non_adjacent:\n # step 4: compute maximum flow in an Even-Tarjan reduction H of G\n # and step 5: build the associated residual network R\n", "issue": "all_node_cuts returns incorrect cuts\nThe function `all_node_cuts` returns node cut-sets for the K2 (complete graph with two nodes), but there exist no.\r\n\r\n### Current Behavior\r\n\r\nConsider the following graph, consisting of two nodes connected by an edge:\r\n```python\r\ngraph = nx.Graph()\r\ngraph.add_edge(1,2)\r\nlist(all_node_cuts(graph)\r\n>>> [{1}, {2}]\r\n```\r\nThis graph has no node cut-sets because removing any vertex does not increase the number of connected components\r\n\r\n### Expected Behavior\r\n\r\nReturn no node cut-sets:\r\n```python\r\ngraph = nx.Graph()\r\ngraph.add_edge(1,2)\r\nlist(all_node_cuts(graph)\r\n>>> []\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\nThe above example\r\n\r\n### Environment\r\n\r\nPython version: 3.10.10\r\nNetworkX version: 2.6.3\r\n\n", "before_files": [{"content": "\"\"\"\nKanevsky all minimum node k cutsets algorithm.\n\"\"\"\nimport copy\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom operator import itemgetter\n\nimport networkx as nx\nfrom networkx.algorithms.flow import (\n build_residual_network,\n edmonds_karp,\n shortest_augmenting_path,\n)\n\nfrom .utils import build_auxiliary_node_connectivity\n\ndefault_flow_func = edmonds_karp\n\n\n__all__ = [\"all_node_cuts\"]\n\n\ndef all_node_cuts(G, k=None, flow_func=None):\n r\"\"\"Returns all minimum k cutsets of an undirected graph G.\n\n This implementation is based on Kanevsky's algorithm [1]_ for finding all\n minimum-size node cut-sets of an undirected graph G; ie the set (or sets)\n of nodes of cardinality equal to the node connectivity of G. Thus if\n removed, would break G into two or more connected components.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n k : Integer\n Node connectivity of the input graph. If k is None, then it is\n computed. Default value: None.\n\n flow_func : function\n Function to perform the underlying flow computations. Default value is\n :func:`~networkx.algorithms.flow.edmonds_karp`. This function performs\n better in sparse graphs with right tailed degree distributions.\n :func:`~networkx.algorithms.flow.shortest_augmenting_path` will\n perform better in denser graphs.\n\n\n Returns\n -------\n cuts : a generator of node cutsets\n Each node cutset has cardinality equal to the node connectivity of\n the input graph.\n\n Examples\n --------\n >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2\n >>> G = nx.grid_2d_graph(5, 5)\n >>> cutsets = list(nx.all_node_cuts(G))\n >>> len(cutsets)\n 4\n >>> all(2 == len(cutset) for cutset in cutsets)\n True\n >>> nx.node_connectivity(G)\n 2\n\n Notes\n -----\n This implementation is based on the sequential algorithm for finding all\n minimum-size separating vertex sets in a graph [1]_. The main idea is to\n compute minimum cuts using local maximum flow computations among a set\n of nodes of highest degree and all other non-adjacent nodes in the Graph.\n Once we find a minimum cut, we add an edge between the high degree\n node and the target node of the local maximum flow computation to make\n sure that we will not find that minimum cut again.\n\n See also\n --------\n node_connectivity\n edmonds_karp\n shortest_augmenting_path\n\n References\n ----------\n .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex\n sets in a graph. Networks 23(6), 533--541.\n http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract\n\n \"\"\"\n if not nx.is_connected(G):\n raise nx.NetworkXError(\"Input graph is disconnected.\")\n\n # Address some corner cases first.\n # For complete Graphs\n if nx.density(G) == 1:\n for cut_set in combinations(G, len(G) - 1):\n yield set(cut_set)\n return\n # Initialize data structures.\n # Keep track of the cuts already computed so we do not repeat them.\n seen = []\n # Even-Tarjan reduction is what we call auxiliary digraph\n # for node connectivity.\n H = build_auxiliary_node_connectivity(G)\n H_nodes = H.nodes # for speed\n mapping = H.graph[\"mapping\"]\n # Keep a copy of original predecessors, H will be modified later.\n # Shallow copy is enough.\n original_H_pred = copy.copy(H._pred)\n R = build_residual_network(H, \"capacity\")\n kwargs = {\"capacity\": \"capacity\", \"residual\": R}\n # Define default flow function\n if flow_func is None:\n flow_func = default_flow_func\n if flow_func is shortest_augmenting_path:\n kwargs[\"two_phase\"] = True\n # Begin the actual algorithm\n # step 1: Find node connectivity k of G\n if k is None:\n k = nx.node_connectivity(G, flow_func=flow_func)\n # step 2:\n # Find k nodes with top degree, call it X:\n X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}\n # Check if X is a k-node-cutset\n if _is_separating_set(G, X):\n seen.append(X)\n yield X\n\n for x in X:\n # step 3: Compute local connectivity flow of x with all other\n # non adjacent nodes in G\n non_adjacent = set(G) - X - set(G[x])\n for v in non_adjacent:\n # step 4: compute maximum flow in an Even-Tarjan reduction H of G\n # and step 5: build the associated residual network R\n R = flow_func(H, f\"{mapping[x]}B\", f\"{mapping[v]}A\", **kwargs)\n flow_value = R.graph[\"flow_value\"]\n\n if flow_value == k:\n # Find the nodes incident to the flow.\n E1 = flowed_edges = [\n (u, w) for (u, w, d) in R.edges(data=True) if d[\"flow\"] != 0\n ]\n VE1 = incident_nodes = {n for edge in E1 for n in edge}\n # Remove saturated edges form the residual network.\n # Note that reversed edges are introduced with capacity 0\n # in the residual graph and they need to be removed too.\n saturated_edges = [\n (u, w, d)\n for (u, w, d) in R.edges(data=True)\n if d[\"capacity\"] == d[\"flow\"] or d[\"capacity\"] == 0\n ]\n R.remove_edges_from(saturated_edges)\n R_closure = nx.transitive_closure(R)\n # step 6: shrink the strongly connected components of\n # residual flow network R and call it L.\n L = nx.condensation(R)\n cmap = L.graph[\"mapping\"]\n inv_cmap = defaultdict(list)\n for n, scc in cmap.items():\n inv_cmap[scc].append(n)\n # Find the incident nodes in the condensed graph.\n VE1 = {cmap[n] for n in VE1}\n # step 7: Compute all antichains of L;\n # they map to closed sets in H.\n # Any edge in H that links a closed set is part of a cutset.\n for antichain in nx.antichains(L):\n # Only antichains that are subsets of incident nodes counts.\n # Lemma 8 in reference.\n if not set(antichain).issubset(VE1):\n continue\n # Nodes in an antichain of the condensation graph of\n # the residual network map to a closed set of nodes that\n # define a node partition of the auxiliary digraph H\n # through taking all of antichain's predecessors in the\n # transitive closure.\n S = set()\n for scc in antichain:\n S.update(inv_cmap[scc])\n S_ancestors = set()\n for n in S:\n S_ancestors.update(R_closure._pred[n])\n S.update(S_ancestors)\n if f\"{mapping[x]}B\" not in S or f\"{mapping[v]}A\" in S:\n continue\n # Find the cutset that links the node partition (S,~S) in H\n cutset = set()\n for u in S:\n cutset.update((u, w) for w in original_H_pred[u] if w not in S)\n # The edges in H that form the cutset are internal edges\n # (ie edges that represent a node of the original graph G)\n if any(H_nodes[u][\"id\"] != H_nodes[w][\"id\"] for u, w in cutset):\n continue\n node_cut = {H_nodes[u][\"id\"] for u, _ in cutset}\n\n if len(node_cut) == k:\n # The cut is invalid if it includes internal edges of\n # end nodes. The other half of Lemma 8 in ref.\n if x in node_cut or v in node_cut:\n continue\n if node_cut not in seen:\n yield node_cut\n seen.append(node_cut)\n\n # Add an edge (x, v) to make sure that we do not\n # find this cutset again. This is equivalent\n # of adding the edge in the input graph\n # G.add_edge(x, v) and then regenerate H and R:\n # Add edges to the auxiliary digraph.\n # See build_residual_network for convention we used\n # in residual graphs.\n H.add_edge(f\"{mapping[x]}B\", f\"{mapping[v]}A\", capacity=1)\n H.add_edge(f\"{mapping[v]}B\", f\"{mapping[x]}A\", capacity=1)\n # Add edges to the residual network.\n R.add_edge(f\"{mapping[x]}B\", f\"{mapping[v]}A\", capacity=1)\n R.add_edge(f\"{mapping[v]}A\", f\"{mapping[x]}B\", capacity=0)\n R.add_edge(f\"{mapping[v]}B\", f\"{mapping[x]}A\", capacity=1)\n R.add_edge(f\"{mapping[x]}A\", f\"{mapping[v]}B\", capacity=0)\n\n # Add again the saturated edges to reuse the residual network\n R.add_edges_from(saturated_edges)\n\n\ndef _is_separating_set(G, cut):\n \"\"\"Assumes that the input graph is connected\"\"\"\n if len(cut) == len(G) - 1:\n return True\n\n H = nx.restricted_view(G, cut, [])\n if nx.is_connected(H):\n return False\n return True\n", "path": "networkx/algorithms/connectivity/kcutsets.py"}]} | 3,544 | 273 |
gh_patches_debug_8563 | rasdani/github-patches | git_diff | google__osv.dev-1044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"withdrawn" entries not getting exported correctly
Identified by @andrewpollock :
https://github.com/google/osv.dev/blob/26050deb42785bc5a4dc7d802eac8e7f95135509/docker/exporter/exporter.py#L94
withdrawn entries are marked as status = INVALID in our DB, so they're not included.
They should be included when we export.
</issue>
<code>
[start of docker/exporter/exporter.py]
1 #!/usr/bin/env python3
2 # Copyright 2021 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """OSV Exporter."""
16 import argparse
17 import concurrent.futures
18 import logging
19 import os
20 import tempfile
21 import zipfile
22 from typing import List
23
24 from google.cloud import ndb
25 from google.cloud import storage
26 from google.cloud import logging as google_logging
27
28 import osv
29
30 DEFAULT_WORK_DIR = '/work'
31
32 DEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'
33 _EXPORT_WORKERS = 32
34 ECOSYSTEMS_FILE = 'ecosystems.txt'
35
36
37 class Exporter:
38 """Exporter."""
39
40 def __init__(self, work_dir, export_bucket):
41 self._work_dir = work_dir
42 self._export_bucket = export_bucket
43
44 def run(self):
45 """Run exporter."""
46 query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)
47 ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]
48
49 for ecosystem in ecosystems:
50 with tempfile.TemporaryDirectory() as tmp_dir:
51 self._export_ecosystem_to_bucket(ecosystem, tmp_dir)
52
53 with tempfile.TemporaryDirectory() as tmp_dir:
54 self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)
55
56 def upload_single(self, bucket, source_path, target_path):
57 """Upload a single file to a bucket."""
58 logging.info('Uploading %s', target_path)
59 try:
60 blob = bucket.blob(target_path)
61 blob.upload_from_filename(source_path)
62 except Exception as e:
63 logging.error('Failed to export: %s', e)
64
65 def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],
66 tmp_dir: str):
67 """Export an ecosystems.txt file with all of the ecosystem names.
68
69 See https://github.com/google/osv.dev/issues/619
70
71 Args:
72 ecosystems: the list of ecosystem names
73 tmp_dir: temporary directory for scratch
74 """
75
76 logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)
77 storage_client = storage.Client()
78 bucket = storage_client.get_bucket(self._export_bucket)
79 ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)
80 with open(ecosystems_file_path, "w") as ecosystems_file:
81 ecosystems_file.writelines([e + "\n" for e in ecosystems])
82
83 self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)
84
85 def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):
86 """Export ecosystem vulns to bucket."""
87 logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)
88 storage_client = storage.Client()
89 bucket = storage_client.get_bucket(self._export_bucket)
90
91 zip_path = os.path.join(tmp_dir, 'all.zip')
92 with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
93 for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
94 if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
95 continue
96
97 file_path = os.path.join(tmp_dir, bug.id() + '.json')
98 osv.write_vulnerability(
99 bug.to_vulnerability(include_source=True), file_path)
100 zip_file.write(file_path, os.path.basename(file_path))
101
102 with concurrent.futures.ThreadPoolExecutor(
103 max_workers=_EXPORT_WORKERS) as executor:
104 for filename in os.listdir(tmp_dir):
105 executor.submit(self.upload_single, bucket,
106 os.path.join(tmp_dir, filename),
107 f'{ecosystem}/{filename}')
108
109
110 def main():
111 logging.getLogger().setLevel(logging.INFO)
112 parser = argparse.ArgumentParser(description='Exporter')
113 parser.add_argument(
114 '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)
115 parser.add_argument(
116 '--bucket',
117 help='Bucket name to export to',
118 default=DEFAULT_EXPORT_BUCKET)
119 args = parser.parse_args()
120
121 tmp_dir = os.path.join(args.work_dir, 'tmp')
122 os.makedirs(tmp_dir, exist_ok=True)
123 os.environ['TMPDIR'] = tmp_dir
124
125 exporter = Exporter(args.work_dir, args.bucket)
126 exporter.run()
127
128
129 if __name__ == '__main__':
130 _ndb_client = ndb.Client()
131 logging_client = google_logging.Client()
132 logging_client.setup_logging()
133 with _ndb_client.context():
134 main()
135
[end of docker/exporter/exporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/exporter/exporter.py b/docker/exporter/exporter.py
--- a/docker/exporter/exporter.py
+++ b/docker/exporter/exporter.py
@@ -91,7 +91,7 @@
zip_path = os.path.join(tmp_dir, 'all.zip')
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
- if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
+ if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:
continue
file_path = os.path.join(tmp_dir, bug.id() + '.json')
| {"golden_diff": "diff --git a/docker/exporter/exporter.py b/docker/exporter/exporter.py\n--- a/docker/exporter/exporter.py\n+++ b/docker/exporter/exporter.py\n@@ -91,7 +91,7 @@\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n- if not bug.public or not bug.status == osv.BugStatus.PROCESSED:\n+ if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:\n continue\n \n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n", "issue": "\"withdrawn\" entries not getting exported correctly\nIdentified by @andrewpollock : \r\n\r\nhttps://github.com/google/osv.dev/blob/26050deb42785bc5a4dc7d802eac8e7f95135509/docker/exporter/exporter.py#L94\r\n\r\nwithdrawn entries are marked as status = INVALID in our DB, so they're not included.\r\n\r\nThey should be included when we export. \n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"OSV Exporter.\"\"\"\nimport argparse\nimport concurrent.futures\nimport logging\nimport os\nimport tempfile\nimport zipfile\nfrom typing import List\n\nfrom google.cloud import ndb\nfrom google.cloud import storage\nfrom google.cloud import logging as google_logging\n\nimport osv\n\nDEFAULT_WORK_DIR = '/work'\n\nDEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'\n_EXPORT_WORKERS = 32\nECOSYSTEMS_FILE = 'ecosystems.txt'\n\n\nclass Exporter:\n \"\"\"Exporter.\"\"\"\n\n def __init__(self, work_dir, export_bucket):\n self._work_dir = work_dir\n self._export_bucket = export_bucket\n\n def run(self):\n \"\"\"Run exporter.\"\"\"\n query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)\n ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]\n\n for ecosystem in ecosystems:\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_to_bucket(ecosystem, tmp_dir)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)\n\n def upload_single(self, bucket, source_path, target_path):\n \"\"\"Upload a single file to a bucket.\"\"\"\n logging.info('Uploading %s', target_path)\n try:\n blob = bucket.blob(target_path)\n blob.upload_from_filename(source_path)\n except Exception as e:\n logging.error('Failed to export: %s', e)\n\n def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],\n tmp_dir: str):\n \"\"\"Export an ecosystems.txt file with all of the ecosystem names.\n\n See https://github.com/google/osv.dev/issues/619\n\n Args:\n ecosystems: the list of ecosystem names\n tmp_dir: temporary directory for scratch\n \"\"\"\n\n logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)\n with open(ecosystems_file_path, \"w\") as ecosystems_file:\n ecosystems_file.writelines([e + \"\\n\" for e in ecosystems])\n\n self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)\n\n def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):\n \"\"\"Export ecosystem vulns to bucket.\"\"\"\n logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n if not bug.public or not bug.status == osv.BugStatus.PROCESSED:\n continue\n\n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n osv.write_vulnerability(\n bug.to_vulnerability(include_source=True), file_path)\n zip_file.write(file_path, os.path.basename(file_path))\n\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=_EXPORT_WORKERS) as executor:\n for filename in os.listdir(tmp_dir):\n executor.submit(self.upload_single, bucket,\n os.path.join(tmp_dir, filename),\n f'{ecosystem}/{filename}')\n\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n parser = argparse.ArgumentParser(description='Exporter')\n parser.add_argument(\n '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)\n parser.add_argument(\n '--bucket',\n help='Bucket name to export to',\n default=DEFAULT_EXPORT_BUCKET)\n args = parser.parse_args()\n\n tmp_dir = os.path.join(args.work_dir, 'tmp')\n os.makedirs(tmp_dir, exist_ok=True)\n os.environ['TMPDIR'] = tmp_dir\n\n exporter = Exporter(args.work_dir, args.bucket)\n exporter.run()\n\n\nif __name__ == '__main__':\n _ndb_client = ndb.Client()\n logging_client = google_logging.Client()\n logging_client.setup_logging()\n with _ndb_client.context():\n main()\n", "path": "docker/exporter/exporter.py"}]} | 2,013 | 163 |
gh_patches_debug_24891 | rasdani/github-patches | git_diff | streamlink__streamlink-4140 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.picarto: Plugin fails to open VODS but works just fine on popout player links
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Running lateast dev build, The picarto plugin does not seem to want to open Vods.
Attempting to open https://picarto.tv/Grimsby/videos/197524 results in [plugins.picarto][error] User is not online.
But opening https://picarto.tv/videopopout/197524 works just fine.
### Debug log
```text
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.9.7
[cli][debug] Streamlink: 2.4.0+63.gbf269e2
[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)
[cli][debug] Arguments:
[cli][debug] url=https://picarto.tv/Grimsby/videos/197524
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --player="C:\mpv\mpv.exe"
[cli][debug] --output=asdd.mkv
[cli][debug] --rtmp-rtmpdump=rtmpdump.exe
[cli][debug] --rtmpdump=C:\Program Files (x86)\Streamlink\rtmpdump\rtmpdump.exe
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files (x86)\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin picarto for URL https://picarto.tv/Grimsby/videos/197524
[plugins.picarto][debug] Type=Live
[plugins.picarto][error] User is not online
```
</issue>
<code>
[start of src/streamlink/plugins/picarto.py]
1 import logging
2 import re
3 from urllib.parse import urlparse
4
5 from streamlink.plugin import Plugin, pluginmatcher
6 from streamlink.plugin.api import validate
7 from streamlink.stream.hls import HLSStream
8
9 log = logging.getLogger(__name__)
10
11
12 @pluginmatcher(re.compile(r"""
13 https?://(?:www\.)?picarto\.tv/
14 (?:(?P<po>streampopout|videopopout)/)?
15 (?P<user>[^&?/]+)
16 (?:\?tab=videos&id=(?P<vod_id>\d+))?
17 """, re.VERBOSE))
18 class Picarto(Plugin):
19 API_URL_LIVE = "https://ptvintern.picarto.tv/api/channel/detail/{username}"
20 API_URL_VOD = "https://ptvintern.picarto.tv/ptvapi"
21 HLS_URL = "https://{netloc}/stream/hls/{file_name}/index.m3u8"
22
23 def get_live(self, username):
24 netloc = self.session.http.get(self.url, schema=validate.Schema(
25 validate.parse_html(),
26 validate.xml_xpath_string(".//script[contains(@src,'/stream/player.js')][1]/@src"),
27 validate.any(None, validate.transform(lambda src: urlparse(src).netloc))
28 ))
29 if not netloc:
30 log.error("Could not find server netloc")
31 return
32
33 channel, multistreams = self.session.http.get(self.API_URL_LIVE.format(username=username), schema=validate.Schema(
34 validate.parse_json(),
35 {
36 "channel": validate.any(None, {
37 "stream_name": str,
38 "title": str,
39 "online": bool,
40 "private": bool,
41 "categories": [{"label": str}],
42 }),
43 "getMultiStreams": validate.any(None, {
44 "multistream": bool,
45 "streams": [{
46 "name": str,
47 "online": bool,
48 }],
49 }),
50 },
51 validate.union_get("channel", "getMultiStreams")
52 ))
53 if not channel or not multistreams:
54 log.debug("Missing channel or streaming data")
55 return
56
57 log.trace(f"netloc={netloc!r}")
58 log.trace(f"channel={channel!r}")
59 log.trace(f"multistreams={multistreams!r}")
60
61 if not channel["online"]:
62 log.error("User is not online")
63 return
64
65 if channel["private"]:
66 log.info("This is a private stream")
67 return
68
69 self.author = username
70 self.category = channel["categories"][0]["label"]
71 self.title = channel["title"]
72
73 hls_url = self.HLS_URL.format(
74 netloc=netloc,
75 file_name=channel["stream_name"]
76 )
77
78 return HLSStream.parse_variant_playlist(self.session, hls_url)
79
80 def get_vod(self, vod_id):
81 data = {
82 'query': (
83 'query ($videoId: ID!) {\n'
84 ' video(id: $videoId) {\n'
85 ' id\n'
86 ' title\n'
87 ' file_name\n'
88 ' video_recording_image_url\n'
89 ' channel {\n'
90 ' name\n'
91 ' }'
92 ' }\n'
93 '}\n'
94 ),
95 'variables': {'videoId': vod_id},
96 }
97 vod_data = self.session.http.post(self.API_URL_VOD, json=data, schema=validate.Schema(
98 validate.parse_json(),
99 {"data": {
100 "video": validate.any(None, {
101 "id": str,
102 "title": str,
103 "file_name": str,
104 "video_recording_image_url": str,
105 "channel": {"name": str},
106 }),
107 }},
108 validate.get(("data", "video"))
109 ))
110
111 if not vod_data:
112 log.debug("Missing video data")
113 return
114
115 log.trace(f"vod_data={vod_data!r}")
116
117 self.author = vod_data["channel"]["name"]
118 self.category = "VOD"
119 self.title = vod_data["title"]
120
121 netloc = urlparse(vod_data["video_recording_image_url"]).netloc
122 hls_url = self.HLS_URL.format(
123 netloc=netloc,
124 file_name=vod_data["file_name"]
125 )
126
127 return HLSStream.parse_variant_playlist(self.session, hls_url)
128
129 def _get_streams(self):
130 m = self.match.groupdict()
131
132 if (m['po'] == 'streampopout' or not m['po']) and m['user'] and not m['vod_id']:
133 log.debug('Type=Live')
134 return self.get_live(m['user'])
135 elif m['po'] == 'videopopout' or (m['user'] and m['vod_id']):
136 log.debug('Type=VOD')
137 vod_id = m['vod_id'] if m['vod_id'] else m['user']
138 return self.get_vod(vod_id)
139
140
141 __plugin__ = Picarto
142
[end of src/streamlink/plugins/picarto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/picarto.py b/src/streamlink/plugins/picarto.py
--- a/src/streamlink/plugins/picarto.py
+++ b/src/streamlink/plugins/picarto.py
@@ -11,9 +11,15 @@
@pluginmatcher(re.compile(r"""
https?://(?:www\.)?picarto\.tv/
- (?:(?P<po>streampopout|videopopout)/)?
- (?P<user>[^&?/]+)
- (?:\?tab=videos&id=(?P<vod_id>\d+))?
+ (?:
+ streampopout/(?P<po_user>[^/]+)/public
+ |
+ videopopout/(?P<po_vod_id>\d+)
+ |
+ [^/]+/videos/(?P<vod_id>\d+)
+ |
+ (?P<user>[^/?&]+)
+ )$
""", re.VERBOSE))
class Picarto(Plugin):
API_URL_LIVE = "https://ptvintern.picarto.tv/api/channel/detail/{username}"
@@ -129,13 +135,12 @@
def _get_streams(self):
m = self.match.groupdict()
- if (m['po'] == 'streampopout' or not m['po']) and m['user'] and not m['vod_id']:
- log.debug('Type=Live')
- return self.get_live(m['user'])
- elif m['po'] == 'videopopout' or (m['user'] and m['vod_id']):
+ if m['po_vod_id'] or m['vod_id']:
log.debug('Type=VOD')
- vod_id = m['vod_id'] if m['vod_id'] else m['user']
- return self.get_vod(vod_id)
+ return self.get_vod(m['po_vod_id'] or m['vod_id'])
+ elif m['po_user'] or m['user']:
+ log.debug('Type=Live')
+ return self.get_live(m['po_user'] or m['user'])
__plugin__ = Picarto
| {"golden_diff": "diff --git a/src/streamlink/plugins/picarto.py b/src/streamlink/plugins/picarto.py\n--- a/src/streamlink/plugins/picarto.py\n+++ b/src/streamlink/plugins/picarto.py\n@@ -11,9 +11,15 @@\n \n @pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?picarto\\.tv/\n- (?:(?P<po>streampopout|videopopout)/)?\n- (?P<user>[^&?/]+)\n- (?:\\?tab=videos&id=(?P<vod_id>\\d+))?\n+ (?:\n+ streampopout/(?P<po_user>[^/]+)/public\n+ |\n+ videopopout/(?P<po_vod_id>\\d+)\n+ |\n+ [^/]+/videos/(?P<vod_id>\\d+)\n+ |\n+ (?P<user>[^/?&]+)\n+ )$\n \"\"\", re.VERBOSE))\n class Picarto(Plugin):\n API_URL_LIVE = \"https://ptvintern.picarto.tv/api/channel/detail/{username}\"\n@@ -129,13 +135,12 @@\n def _get_streams(self):\n m = self.match.groupdict()\n \n- if (m['po'] == 'streampopout' or not m['po']) and m['user'] and not m['vod_id']:\n- log.debug('Type=Live')\n- return self.get_live(m['user'])\n- elif m['po'] == 'videopopout' or (m['user'] and m['vod_id']):\n+ if m['po_vod_id'] or m['vod_id']:\n log.debug('Type=VOD')\n- vod_id = m['vod_id'] if m['vod_id'] else m['user']\n- return self.get_vod(vod_id)\n+ return self.get_vod(m['po_vod_id'] or m['vod_id'])\n+ elif m['po_user'] or m['user']:\n+ log.debug('Type=Live')\n+ return self.get_live(m['po_user'] or m['user'])\n \n \n __plugin__ = Picarto\n", "issue": "plugins.picarto: Plugin fails to open VODS but works just fine on popout player links\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest build from the master branch\r\n\r\n### Description\r\n\r\nRunning lateast dev build, The picarto plugin does not seem to want to open Vods.\r\nAttempting to open https://picarto.tv/Grimsby/videos/197524 results in [plugins.picarto][error] User is not online.\r\nBut opening https://picarto.tv/videopopout/197524 works just fine.\r\n\r\n### Debug log\r\n\r\n```text\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.9.7\r\n[cli][debug] Streamlink: 2.4.0+63.gbf269e2\r\n[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://picarto.tv/Grimsby/videos/197524\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=\"C:\\mpv\\mpv.exe\"\r\n[cli][debug] --output=asdd.mkv\r\n[cli][debug] --rtmp-rtmpdump=rtmpdump.exe\r\n[cli][debug] --rtmpdump=C:\\Program Files (x86)\\Streamlink\\rtmpdump\\rtmpdump.exe\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files (x86)\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin picarto for URL https://picarto.tv/Grimsby/videos/197524\r\n[plugins.picarto][debug] Type=Live\r\n[plugins.picarto][error] User is not online\r\n```\r\n\n", "before_files": [{"content": "import logging\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?picarto\\.tv/\n (?:(?P<po>streampopout|videopopout)/)?\n (?P<user>[^&?/]+)\n (?:\\?tab=videos&id=(?P<vod_id>\\d+))?\n\"\"\", re.VERBOSE))\nclass Picarto(Plugin):\n API_URL_LIVE = \"https://ptvintern.picarto.tv/api/channel/detail/{username}\"\n API_URL_VOD = \"https://ptvintern.picarto.tv/ptvapi\"\n HLS_URL = \"https://{netloc}/stream/hls/{file_name}/index.m3u8\"\n\n def get_live(self, username):\n netloc = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(@src,'/stream/player.js')][1]/@src\"),\n validate.any(None, validate.transform(lambda src: urlparse(src).netloc))\n ))\n if not netloc:\n log.error(\"Could not find server netloc\")\n return\n\n channel, multistreams = self.session.http.get(self.API_URL_LIVE.format(username=username), schema=validate.Schema(\n validate.parse_json(),\n {\n \"channel\": validate.any(None, {\n \"stream_name\": str,\n \"title\": str,\n \"online\": bool,\n \"private\": bool,\n \"categories\": [{\"label\": str}],\n }),\n \"getMultiStreams\": validate.any(None, {\n \"multistream\": bool,\n \"streams\": [{\n \"name\": str,\n \"online\": bool,\n }],\n }),\n },\n validate.union_get(\"channel\", \"getMultiStreams\")\n ))\n if not channel or not multistreams:\n log.debug(\"Missing channel or streaming data\")\n return\n\n log.trace(f\"netloc={netloc!r}\")\n log.trace(f\"channel={channel!r}\")\n log.trace(f\"multistreams={multistreams!r}\")\n\n if not channel[\"online\"]:\n log.error(\"User is not online\")\n return\n\n if channel[\"private\"]:\n log.info(\"This is a private stream\")\n return\n\n self.author = username\n self.category = channel[\"categories\"][0][\"label\"]\n self.title = channel[\"title\"]\n\n hls_url = self.HLS_URL.format(\n netloc=netloc,\n file_name=channel[\"stream_name\"]\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n def get_vod(self, vod_id):\n data = {\n 'query': (\n 'query ($videoId: ID!) {\\n'\n ' video(id: $videoId) {\\n'\n ' id\\n'\n ' title\\n'\n ' file_name\\n'\n ' video_recording_image_url\\n'\n ' channel {\\n'\n ' name\\n'\n ' }'\n ' }\\n'\n '}\\n'\n ),\n 'variables': {'videoId': vod_id},\n }\n vod_data = self.session.http.post(self.API_URL_VOD, json=data, schema=validate.Schema(\n validate.parse_json(),\n {\"data\": {\n \"video\": validate.any(None, {\n \"id\": str,\n \"title\": str,\n \"file_name\": str,\n \"video_recording_image_url\": str,\n \"channel\": {\"name\": str},\n }),\n }},\n validate.get((\"data\", \"video\"))\n ))\n\n if not vod_data:\n log.debug(\"Missing video data\")\n return\n\n log.trace(f\"vod_data={vod_data!r}\")\n\n self.author = vod_data[\"channel\"][\"name\"]\n self.category = \"VOD\"\n self.title = vod_data[\"title\"]\n\n netloc = urlparse(vod_data[\"video_recording_image_url\"]).netloc\n hls_url = self.HLS_URL.format(\n netloc=netloc,\n file_name=vod_data[\"file_name\"]\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n def _get_streams(self):\n m = self.match.groupdict()\n\n if (m['po'] == 'streampopout' or not m['po']) and m['user'] and not m['vod_id']:\n log.debug('Type=Live')\n return self.get_live(m['user'])\n elif m['po'] == 'videopopout' or (m['user'] and m['vod_id']):\n log.debug('Type=VOD')\n vod_id = m['vod_id'] if m['vod_id'] else m['user']\n return self.get_vod(vod_id)\n\n\n__plugin__ = Picarto\n", "path": "src/streamlink/plugins/picarto.py"}]} | 2,495 | 480 |
gh_patches_debug_6226 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1847 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: 'split_by_domain'
```
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py", line 73, in _wrap_send
span.service = _extract_service_name(instance, span, hostname=hostname)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py", line 30, in _extract_service_name
if cfg['split_by_domain'] and hostname:
KeyError: 'split_by_domain'
```
Happens on python 3.6 and 3.7
</issue>
<code>
[start of ddtrace/contrib/requests/session.py]
1 import requests
2
3 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
4
5 from .connection import _wrap_send
6
7
8 class TracedSession(requests.Session):
9 """TracedSession is a requests' Session that is already traced.
10 You can use it if you want a finer grained control for your
11 HTTP clients.
12 """
13
14 pass
15
16
17 # always patch our `TracedSession` when imported
18 _w(TracedSession, "send", _wrap_send)
19
[end of ddtrace/contrib/requests/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py
--- a/ddtrace/contrib/requests/session.py
+++ b/ddtrace/contrib/requests/session.py
@@ -2,6 +2,8 @@
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
+from ddtrace import config, Pin
+
from .connection import _wrap_send
@@ -16,3 +18,4 @@
# always patch our `TracedSession` when imported
_w(TracedSession, "send", _wrap_send)
+Pin(_config=config.requests).onto(TracedSession)
| {"golden_diff": "diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py\n--- a/ddtrace/contrib/requests/session.py\n+++ b/ddtrace/contrib/requests/session.py\n@@ -2,6 +2,8 @@\n \n from ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n \n+from ddtrace import config, Pin\n+\n from .connection import _wrap_send\n \n \n@@ -16,3 +18,4 @@\n \n # always patch our `TracedSession` when imported\n _w(TracedSession, \"send\", _wrap_send)\n+Pin(_config=config.requests).onto(TracedSession)\n", "issue": "KeyError: 'split_by_domain' \n```\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py\", line 581, in post\r\n return self.request('POST', url, data=data, json=json, **kwargs)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py\", line 533, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py\", line 73, in _wrap_send\r\n span.service = _extract_service_name(instance, span, hostname=hostname)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py\", line 30, in _extract_service_name\r\n if cfg['split_by_domain'] and hostname:\r\nKeyError: 'split_by_domain'\r\n```\r\n\r\nHappens on python 3.6 and 3.7\n", "before_files": [{"content": "import requests\n\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .connection import _wrap_send\n\n\nclass TracedSession(requests.Session):\n \"\"\"TracedSession is a requests' Session that is already traced.\n You can use it if you want a finer grained control for your\n HTTP clients.\n \"\"\"\n\n pass\n\n\n# always patch our `TracedSession` when imported\n_w(TracedSession, \"send\", _wrap_send)\n", "path": "ddtrace/contrib/requests/session.py"}]} | 950 | 140 |
gh_patches_debug_21677 | rasdani/github-patches | git_diff | plotly__plotly.py-4562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
With newer versions of orjson, users need to specify the json engine explicitly (bug?)
Hey
I found out I get an
`AttributeError: partially initialized module 'orjson' has no attribute 'OPT_NON_STR_KEYS'`
if I don't specify this
`plotly.io.json.config.default_engine = 'orjson'`
when using orjson v3.6.6 (latest as of 25jan2022)
Also, additional note for whoever might have this issue: you don't need to uninstall orjson if you don't want to use it. just set the engine to 'json' explicitly.
I'm using orjson because of the performance claims, although I ran some tests switching between the 2 engines and they seem to yield the same results: using go.Candlestick with 10000 candlesticks and some 4-5 indicators, getting ~0.8sec in each case for creating the plot. My purpose is to improve the dash server performace, but it seems it makes no difference (the web page still renders slower than the ticker even with 600 candles)
</issue>
<code>
[start of packages/python/plotly/_plotly_utils/optional_imports.py]
1 """
2 Stand-alone module to provide information about whether optional deps exist.
3
4 """
5 from importlib import import_module
6 import logging
7 import sys
8
9 logger = logging.getLogger(__name__)
10 _not_importable = set()
11
12
13 def get_module(name, should_load=True):
14 """
15 Return module or None. Absolute import is required.
16
17 :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
18 :raise: (ImportError) Only when exc_msg is defined.
19 :return: (module|None) If import succeeds, the module will be returned.
20
21 """
22 if name in sys.modules:
23 return sys.modules[name]
24 if not should_load:
25 return None
26 if name not in _not_importable:
27 try:
28 return import_module(name)
29 except ImportError:
30 _not_importable.add(name)
31 except Exception:
32 _not_importable.add(name)
33 msg = f"Error importing optional module {name}"
34 logger.exception(msg)
35
[end of packages/python/plotly/_plotly_utils/optional_imports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/python/plotly/_plotly_utils/optional_imports.py b/packages/python/plotly/_plotly_utils/optional_imports.py
--- a/packages/python/plotly/_plotly_utils/optional_imports.py
+++ b/packages/python/plotly/_plotly_utils/optional_imports.py
@@ -2,6 +2,7 @@
Stand-alone module to provide information about whether optional deps exist.
"""
+
from importlib import import_module
import logging
import sys
@@ -19,10 +20,9 @@
:return: (module|None) If import succeeds, the module will be returned.
"""
- if name in sys.modules:
- return sys.modules[name]
if not should_load:
- return None
+ return sys.modules.get(name, None)
+
if name not in _not_importable:
try:
return import_module(name)
@@ -32,3 +32,5 @@
_not_importable.add(name)
msg = f"Error importing optional module {name}"
logger.exception(msg)
+
+ return None
| {"golden_diff": "diff --git a/packages/python/plotly/_plotly_utils/optional_imports.py b/packages/python/plotly/_plotly_utils/optional_imports.py\n--- a/packages/python/plotly/_plotly_utils/optional_imports.py\n+++ b/packages/python/plotly/_plotly_utils/optional_imports.py\n@@ -2,6 +2,7 @@\n Stand-alone module to provide information about whether optional deps exist.\n \n \"\"\"\n+\n from importlib import import_module\n import logging\n import sys\n@@ -19,10 +20,9 @@\n :return: (module|None) If import succeeds, the module will be returned.\n \n \"\"\"\n- if name in sys.modules:\n- return sys.modules[name]\n if not should_load:\n- return None\n+ return sys.modules.get(name, None)\n+\n if name not in _not_importable:\n try:\n return import_module(name)\n@@ -32,3 +32,5 @@\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n+\n+ return None\n", "issue": "With newer versions of orjson, users need to specify the json engine explicitly (bug?)\nHey\r\n\r\nI found out I get an\r\n`AttributeError: partially initialized module 'orjson' has no attribute 'OPT_NON_STR_KEYS'`\r\nif I don't specify this\r\n`plotly.io.json.config.default_engine = 'orjson'`\r\nwhen using orjson v3.6.6 (latest as of 25jan2022)\r\n\r\nAlso, additional note for whoever might have this issue: you don't need to uninstall orjson if you don't want to use it. just set the engine to 'json' explicitly. \r\n\r\nI'm using orjson because of the performance claims, although I ran some tests switching between the 2 engines and they seem to yield the same results: using go.Candlestick with 10000 candlesticks and some 4-5 indicators, getting ~0.8sec in each case for creating the plot. My purpose is to improve the dash server performace, but it seems it makes no difference (the web page still renders slower than the ticker even with 600 candles)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nStand-alone module to provide information about whether optional deps exist.\n\n\"\"\"\nfrom importlib import import_module\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n_not_importable = set()\n\n\ndef get_module(name, should_load=True):\n \"\"\"\n Return module or None. Absolute import is required.\n\n :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.\n :raise: (ImportError) Only when exc_msg is defined.\n :return: (module|None) If import succeeds, the module will be returned.\n\n \"\"\"\n if name in sys.modules:\n return sys.modules[name]\n if not should_load:\n return None\n if name not in _not_importable:\n try:\n return import_module(name)\n except ImportError:\n _not_importable.add(name)\n except Exception:\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n", "path": "packages/python/plotly/_plotly_utils/optional_imports.py"}]} | 1,059 | 240 |
gh_patches_debug_49037 | rasdani/github-patches | git_diff | facebookresearch__hydra-2677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Fix failing tests
Several tests are broken on main
</issue>
<code>
[start of examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import os
3
4 from omegaconf import DictConfig
5
6 import hydra
7
8
9 @hydra.main(version_base=None)
10 def my_app(_cfg: DictConfig) -> None:
11 print(f"Working directory : {os.getcwd()}")
12 print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
13
14
15 if __name__ == "__main__":
16 my_app()
17
[end of examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
@@ -9,7 +9,9 @@
@hydra.main(version_base=None)
def my_app(_cfg: DictConfig) -> None:
print(f"Working directory : {os.getcwd()}")
- print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
+ print(
+ f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}"
+ )
if __name__ == "__main__":
| {"golden_diff": "diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n@@ -9,7 +9,9 @@\n @hydra.main(version_base=None)\n def my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n- print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n+ print(\n+ f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n+ )\n \n \n if __name__ == \"__main__\":\n", "issue": "[Bug] Fix failing tests\nSeveral tests are broken on main\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n\n\nif __name__ == \"__main__\":\n my_app()\n", "path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"}]} | 708 | 197 |
gh_patches_debug_58538 | rasdani/github-patches | git_diff | ansible__molecule-2270 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
molecule does not honour --destroy=never and prunes ephemeral data anyway (appeared after #1739)
# Issue Type
- Bug report
# Molecule and Ansible details
```
ansible --version && molecule --version
ansible 2.7.10
config file = None
configured module search path = [u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]
molecule, version 2.22
```
Molecule installation method (one of):
- pip
Ansible installation method (one of):
- pip
Detail any linters or test runners used:
N/A
# Desired Behaviour
When running _molecule test --destroy=never_ do not prune molecule ephemeral data, even when _destroy_ stage is a part of a scenario.
# Actual Behaviour
Molecule configuration excerpt:
```
scenario:
name: default
test_sequence:
- dependency
- syntax
- lint
- create
- converge
- idempotence
- verify
- destroy
```
_molecule test --destroy=never --all_ does not honour _--destroy=never_ and prunes ephemeral data when _destroy_ is a part of a scenario in the molecule configuration.
Output excerpt:
```
Verifier completed successfully.
--> Scenario: 'default'
--> Action: 'destroy'
Skipping, '--destroy=never' requested.
--> Pruning extra files from scenario ephemeral directory
```
Described behaviour contradicts the purpose of NEVER destroying an environment parameter regardless of what stages were defined in the scenario.
In fact, the environment is left intact, though ephemeral data is gone, hence environment becomes unattended and it's not possible to destroy it later.
_molecule test --destroy=never --all_
_molecule destroy --all_ <-- will not destroy the environment
Previous molecule version works as expected:
```
ansible 2.7.10
config file = None
configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]
molecule, version 2.20.1
```
Output excerpt
```
Verifier completed successfully.
--> Scenario: 'default'
--> Action: 'destroy'
Skipping, '--destroy=never' requested.
```
Pruning based on presence of _destroy_ stage in the molecule configuration appeared after https://github.com/ansible/molecule/pull/1739
</issue>
<code>
[start of molecule/command/base.py]
1 # Copyright (c) 2015-2018 Cisco Systems, Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to
5 # deal in the Software without restriction, including without limitation the
6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 import abc
22 import collections
23 import glob
24 import os
25
26 import six
27
28 import molecule.command
29 import molecule.scenarios
30 from molecule import config
31 from molecule import logger
32 from molecule import util
33
34 LOG = logger.get_logger(__name__)
35 MOLECULE_GLOB = os.environ.get('MOLECULE_GLOB', 'molecule/*/molecule.yml')
36 MOLECULE_DEFAULT_SCENARIO_NAME = 'default'
37
38
39 @six.add_metaclass(abc.ABCMeta)
40 class Base(object):
41 """
42 An abstract base class used to define the command interface.
43 """
44
45 def __init__(self, c):
46 """
47 Base initializer for all :ref:`Command` classes.
48
49 :param c: An instance of a Molecule config.
50 :returns: None
51 """
52 self._config = c
53 self._setup()
54
55 @abc.abstractmethod
56 def execute(self): # pragma: no cover
57 pass
58
59 def print_info(self):
60 msg = "Scenario: '{}'".format(self._config.scenario.name)
61 LOG.info(msg)
62 msg = "Action: '{}'".format(util.underscore(self.__class__.__name__))
63 LOG.info(msg)
64
65 def _setup(self):
66 """
67 Prepare Molecule's provisioner and returns None.
68
69 :return: None
70 """
71 self._config.provisioner.write_config()
72 self._config.provisioner.manage_inventory()
73
74
75 def execute_cmdline_scenarios(scenario_name, args, command_args, ansible_args=()):
76 """
77 Execute scenario sequences based on parsed command-line arguments.
78
79 This is useful for subcommands that run scenario sequences, which
80 excludes subcommands such as ``list``, ``login``, and ``matrix``.
81
82 ``args`` and ``command_args`` are combined using :func:`get_configs`
83 to generate the scenario(s) configuration.
84
85 :param scenario_name: Name of scenario to run, or ``None`` to run all.
86 :param args: ``args`` dict from ``click`` command context
87 :param command_args: dict of command argumentss, including the target
88 subcommand to execute
89 :returns: None
90
91 """
92 scenarios = molecule.scenarios.Scenarios(
93 get_configs(args, command_args, ansible_args), scenario_name
94 )
95 scenarios.print_matrix()
96 for scenario in scenarios:
97 try:
98 execute_scenario(scenario)
99 except SystemExit:
100 # if the command has a 'destroy' arg, like test does,
101 # handle that behavior here.
102 if command_args.get('destroy') == 'always':
103 msg = (
104 'An error occurred during the {} sequence action: '
105 "'{}'. Cleaning up."
106 ).format(scenario.config.subcommand, scenario.config.action)
107 LOG.warning(msg)
108 execute_subcommand(scenario.config, 'cleanup')
109 execute_subcommand(scenario.config, 'destroy')
110 # always prune ephemeral dir if destroying on failure
111 scenario.prune()
112 if scenario.config.is_parallel:
113 scenario._remove_scenario_state_directory()
114 util.sysexit()
115 else:
116 raise
117
118
119 def execute_subcommand(config, subcommand):
120 command_module = getattr(molecule.command, subcommand)
121 command = getattr(command_module, util.camelize(subcommand))
122 # knowledge of the current action is used by some provisioners
123 # to ensure they behave correctly during certain sequence steps,
124 # particulary the setting of ansible options in create/destroy,
125 # and is also used for reporting in execute_cmdline_scenarios
126 config.action = subcommand
127
128 return command(config).execute()
129
130
131 def execute_scenario(scenario):
132 """
133 Execute each command in the given scenario's configured sequence.
134
135 :param scenario: The scenario to execute.
136 :returns: None
137
138 """
139
140 for action in scenario.sequence:
141 execute_subcommand(scenario.config, action)
142
143 # pruning only if a 'destroy' step was in the sequence allows for normal
144 # debugging by manually stepping through a scenario sequence
145 if 'destroy' in scenario.sequence:
146 scenario.prune()
147
148 if scenario.config.is_parallel:
149 scenario._remove_scenario_state_directory()
150
151
152 def get_configs(args, command_args, ansible_args=()):
153 """
154 Glob the current directory for Molecule config files, instantiate config
155 objects, and returns a list.
156
157 :param args: A dict of options, arguments and commands from the CLI.
158 :param command_args: A dict of options passed to the subcommand from
159 the CLI.
160 :param ansible_args: An optional tuple of arguments provided to the
161 `ansible-playbook` command.
162 :return: list
163 """
164 configs = [
165 config.Config(
166 molecule_file=util.abs_path(c),
167 args=args,
168 command_args=command_args,
169 ansible_args=ansible_args,
170 )
171 for c in glob.glob(MOLECULE_GLOB)
172 ]
173 _verify_configs(configs)
174
175 return configs
176
177
178 def _verify_configs(configs):
179 """
180 Verify a Molecule config was found and returns None.
181
182 :param configs: A list containing absolute paths to Molecule config files.
183 :return: None
184 """
185 if configs:
186 scenario_names = [c.scenario.name for c in configs]
187 for scenario_name, n in collections.Counter(scenario_names).items():
188 if n > 1:
189 msg = ("Duplicate scenario name '{}' found. " 'Exiting.').format(
190 scenario_name
191 )
192 util.sysexit_with_message(msg)
193
194 else:
195 msg = "'{}' glob failed. Exiting.".format(MOLECULE_GLOB)
196 util.sysexit_with_message(msg)
197
198
199 def _get_subcommand(string):
200 return string.split('.')[-1]
201
[end of molecule/command/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/molecule/command/base.py b/molecule/command/base.py
--- a/molecule/command/base.py
+++ b/molecule/command/base.py
@@ -140,9 +140,10 @@
for action in scenario.sequence:
execute_subcommand(scenario.config, action)
- # pruning only if a 'destroy' step was in the sequence allows for normal
- # debugging by manually stepping through a scenario sequence
- if 'destroy' in scenario.sequence:
+ if (
+ 'destroy' in scenario.sequence
+ and scenario.config.command_args.get('destroy') != 'never'
+ ):
scenario.prune()
if scenario.config.is_parallel:
| {"golden_diff": "diff --git a/molecule/command/base.py b/molecule/command/base.py\n--- a/molecule/command/base.py\n+++ b/molecule/command/base.py\n@@ -140,9 +140,10 @@\n for action in scenario.sequence:\n execute_subcommand(scenario.config, action)\n \n- # pruning only if a 'destroy' step was in the sequence allows for normal\n- # debugging by manually stepping through a scenario sequence\n- if 'destroy' in scenario.sequence:\n+ if (\n+ 'destroy' in scenario.sequence\n+ and scenario.config.command_args.get('destroy') != 'never'\n+ ):\n scenario.prune()\n \n if scenario.config.is_parallel:\n", "issue": "molecule does not honour --destroy=never and prunes ephemeral data anyway (appeared after #1739)\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nansible --version && molecule --version\r\nansible 2.7.10\r\n config file = None\r\n configured module search path = [u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]\r\nmolecule, version 2.22\r\n```\r\n\r\nMolecule installation method (one of):\r\n\r\n- pip\r\n\r\nAnsible installation method (one of):\r\n\r\n- pip\r\n\r\nDetail any linters or test runners used:\r\n\r\nN/A\r\n\r\n# Desired Behaviour\r\n\r\nWhen running _molecule test --destroy=never_ do not prune molecule ephemeral data, even when _destroy_ stage is a part of a scenario.\r\n\r\n# Actual Behaviour\r\n\r\nMolecule configuration excerpt:\r\n```\r\nscenario:\r\n name: default\r\n test_sequence:\r\n - dependency\r\n - syntax\r\n - lint\r\n - create\r\n - converge\r\n - idempotence\r\n - verify\r\n - destroy\r\n```\r\n\r\n_molecule test --destroy=never --all_ does not honour _--destroy=never_ and prunes ephemeral data when _destroy_ is a part of a scenario in the molecule configuration.\r\n\r\nOutput excerpt:\r\n```\r\nVerifier completed successfully.\r\n--> Scenario: 'default'\r\n--> Action: 'destroy'\r\nSkipping, '--destroy=never' requested.\r\n--> Pruning extra files from scenario ephemeral directory\r\n```\r\n\r\nDescribed behaviour contradicts the purpose of NEVER destroying an environment parameter regardless of what stages were defined in the scenario.\r\nIn fact, the environment is left intact, though ephemeral data is gone, hence environment becomes unattended and it's not possible to destroy it later.\r\n\r\n_molecule test --destroy=never --all_\r\n_molecule destroy --all_ <-- will not destroy the environment\r\n\r\n\r\nPrevious molecule version works as expected:\r\n```\r\nansible 2.7.10\r\n config file = None\r\n configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.5 (default, Jun 20 2019, 20:27:34) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]\r\nmolecule, version 2.20.1\r\n```\r\n\r\nOutput excerpt\r\n```\r\nVerifier completed successfully.\r\n--> Scenario: 'default'\r\n--> Action: 'destroy'\r\nSkipping, '--destroy=never' requested.\r\n```\r\n\r\nPruning based on presence of _destroy_ stage in the molecule configuration appeared after https://github.com/ansible/molecule/pull/1739\r\n\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport abc\nimport collections\nimport glob\nimport os\n\nimport six\n\nimport molecule.command\nimport molecule.scenarios\nfrom molecule import config\nfrom molecule import logger\nfrom molecule import util\n\nLOG = logger.get_logger(__name__)\nMOLECULE_GLOB = os.environ.get('MOLECULE_GLOB', 'molecule/*/molecule.yml')\nMOLECULE_DEFAULT_SCENARIO_NAME = 'default'\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Base(object):\n \"\"\"\n An abstract base class used to define the command interface.\n \"\"\"\n\n def __init__(self, c):\n \"\"\"\n Base initializer for all :ref:`Command` classes.\n\n :param c: An instance of a Molecule config.\n :returns: None\n \"\"\"\n self._config = c\n self._setup()\n\n @abc.abstractmethod\n def execute(self): # pragma: no cover\n pass\n\n def print_info(self):\n msg = \"Scenario: '{}'\".format(self._config.scenario.name)\n LOG.info(msg)\n msg = \"Action: '{}'\".format(util.underscore(self.__class__.__name__))\n LOG.info(msg)\n\n def _setup(self):\n \"\"\"\n Prepare Molecule's provisioner and returns None.\n\n :return: None\n \"\"\"\n self._config.provisioner.write_config()\n self._config.provisioner.manage_inventory()\n\n\ndef execute_cmdline_scenarios(scenario_name, args, command_args, ansible_args=()):\n \"\"\"\n Execute scenario sequences based on parsed command-line arguments.\n\n This is useful for subcommands that run scenario sequences, which\n excludes subcommands such as ``list``, ``login``, and ``matrix``.\n\n ``args`` and ``command_args`` are combined using :func:`get_configs`\n to generate the scenario(s) configuration.\n\n :param scenario_name: Name of scenario to run, or ``None`` to run all.\n :param args: ``args`` dict from ``click`` command context\n :param command_args: dict of command argumentss, including the target\n subcommand to execute\n :returns: None\n\n \"\"\"\n scenarios = molecule.scenarios.Scenarios(\n get_configs(args, command_args, ansible_args), scenario_name\n )\n scenarios.print_matrix()\n for scenario in scenarios:\n try:\n execute_scenario(scenario)\n except SystemExit:\n # if the command has a 'destroy' arg, like test does,\n # handle that behavior here.\n if command_args.get('destroy') == 'always':\n msg = (\n 'An error occurred during the {} sequence action: '\n \"'{}'. Cleaning up.\"\n ).format(scenario.config.subcommand, scenario.config.action)\n LOG.warning(msg)\n execute_subcommand(scenario.config, 'cleanup')\n execute_subcommand(scenario.config, 'destroy')\n # always prune ephemeral dir if destroying on failure\n scenario.prune()\n if scenario.config.is_parallel:\n scenario._remove_scenario_state_directory()\n util.sysexit()\n else:\n raise\n\n\ndef execute_subcommand(config, subcommand):\n command_module = getattr(molecule.command, subcommand)\n command = getattr(command_module, util.camelize(subcommand))\n # knowledge of the current action is used by some provisioners\n # to ensure they behave correctly during certain sequence steps,\n # particulary the setting of ansible options in create/destroy,\n # and is also used for reporting in execute_cmdline_scenarios\n config.action = subcommand\n\n return command(config).execute()\n\n\ndef execute_scenario(scenario):\n \"\"\"\n Execute each command in the given scenario's configured sequence.\n\n :param scenario: The scenario to execute.\n :returns: None\n\n \"\"\"\n\n for action in scenario.sequence:\n execute_subcommand(scenario.config, action)\n\n # pruning only if a 'destroy' step was in the sequence allows for normal\n # debugging by manually stepping through a scenario sequence\n if 'destroy' in scenario.sequence:\n scenario.prune()\n\n if scenario.config.is_parallel:\n scenario._remove_scenario_state_directory()\n\n\ndef get_configs(args, command_args, ansible_args=()):\n \"\"\"\n Glob the current directory for Molecule config files, instantiate config\n objects, and returns a list.\n\n :param args: A dict of options, arguments and commands from the CLI.\n :param command_args: A dict of options passed to the subcommand from\n the CLI.\n :param ansible_args: An optional tuple of arguments provided to the\n `ansible-playbook` command.\n :return: list\n \"\"\"\n configs = [\n config.Config(\n molecule_file=util.abs_path(c),\n args=args,\n command_args=command_args,\n ansible_args=ansible_args,\n )\n for c in glob.glob(MOLECULE_GLOB)\n ]\n _verify_configs(configs)\n\n return configs\n\n\ndef _verify_configs(configs):\n \"\"\"\n Verify a Molecule config was found and returns None.\n\n :param configs: A list containing absolute paths to Molecule config files.\n :return: None\n \"\"\"\n if configs:\n scenario_names = [c.scenario.name for c in configs]\n for scenario_name, n in collections.Counter(scenario_names).items():\n if n > 1:\n msg = (\"Duplicate scenario name '{}' found. \" 'Exiting.').format(\n scenario_name\n )\n util.sysexit_with_message(msg)\n\n else:\n msg = \"'{}' glob failed. Exiting.\".format(MOLECULE_GLOB)\n util.sysexit_with_message(msg)\n\n\ndef _get_subcommand(string):\n return string.split('.')[-1]\n", "path": "molecule/command/base.py"}]} | 3,207 | 150 |
gh_patches_debug_38113 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2339 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AZ needs a new vote scraper
old bill scraper included Votes, new one doesn't
might be best to wait and do this as part of pupa version
</issue>
<code>
[start of openstates/az/bills.py]
1 import json
2 import datetime
3
4 from pupa.scrape import Scraper, Bill
5 from . import utils
6 from . import session_metadata
7
8 from lxml import html
9
10
11 BASE_URL = 'https://www.azleg.gov/'
12
13
14 class AZBillScraper(Scraper):
15 chamber_map = {'lower': 'H', 'upper': 'S'}
16 chamber_map_rev = {'H': 'upper', 'S': 'lower', 'G': 'executive', 'SS': 'executive'}
17 chamber_map_rev_eng = {'H': 'House', 'S': 'Senate', 'G': 'Governor',
18 'SS': 'Secretary of State'}
19
20 def scrape_bill(self, chamber, session, bill_id, session_id):
21 bill_json_url = 'https://apps.azleg.gov/api/Bill/?billNumber={}&sessionId={}&' \
22 'legislativeBody={}'.format(bill_id, session_id, self.chamber_map[chamber])
23 response = self.get(bill_json_url)
24 page = json.loads(response.content.decode('utf-8'))
25
26 if not page:
27 self.warning('null page for %s', bill_id)
28 return
29
30 bill_title = page['ShortTitle']
31 bill_id = page['Number']
32 internal_id = page['BillId']
33 bill_type = self.get_bill_type(bill_id)
34 bill = Bill(
35 bill_id,
36 legislative_session=session,
37 chamber=chamber,
38 title=bill_title,
39 classification=bill_type,
40 )
41
42 self.scrape_actions(bill, page, chamber)
43 self.scrape_versions_and_documents(bill, internal_id)
44 self.scrape_sponsors(bill, internal_id)
45 self.scrape_subjects(bill, internal_id)
46
47 bill_url = 'https://apps.azleg.gov/BillStatus/BillOverview/{}?SessionId={}'.format(
48 internal_id, session_id)
49 bill.add_source(bill_url)
50
51 self.sort_bill_actions(bill)
52
53 yield bill
54
55 def scrape_versions_and_documents(self, bill, internal_id):
56 # Careful, this sends XML to a browser but JSON to machines
57 # https://apps.azleg.gov/api/DocType/?billStatusId=68408
58 versions_url = 'https://apps.azleg.gov/api/DocType/?billStatusId={}'.format(internal_id)
59 page = json.loads(self.get(versions_url).content.decode('utf-8'))
60 for document_set in page:
61 type_ = document_set['DocumentGroupName']
62 for doc in document_set['Documents']:
63 media_type = 'text/html' if doc['HtmlPath'] else 'application/pdf'
64 url = doc['HtmlPath'] or doc['PdfPath']
65 if not url:
66 self.warning("No PDF or HTML version found for %s" % doc['DocumentName'])
67 # Sometimes the URL is just a relative path; make it absolute
68 if not url.startswith('http'):
69 url = 'https://apps.azleg.gov{}'.format(url)
70
71 if type_ == 'Bill Versions':
72 bill.add_version_link(
73 note=doc['DocumentName'],
74 url=url,
75 media_type=media_type
76 )
77 else:
78 bill.add_document_link(
79 note=doc['DocumentName'],
80 url=url,
81 media_type=media_type
82 )
83
84 def scrape_sponsors(self, bill, internal_id):
85 # Careful, this sends XML to a browser but JSON to machines
86 # https://apps.azleg.gov/api/BillSponsor/?id=68398
87 sponsors_url = 'https://apps.azleg.gov/api/BillSponsor/?id={}'.format(internal_id)
88 page = json.loads(self.get(sponsors_url).content.decode('utf-8'))
89 for sponsor in page:
90 if 'Prime' in sponsor['SponsorType']:
91 sponsor_type = 'primary'
92 else:
93 sponsor_type = 'cosponsor'
94
95 # Some older bills don't have the FullName key
96 if 'FullName' in sponsor['Legislator']:
97 sponsor_name = sponsor['Legislator']['FullName']
98 else:
99 sponsor_name = "{} {}".format(
100 sponsor['Legislator']['FirstName'],
101 sponsor['Legislator']['LastName'],
102 )
103 bill.add_sponsorship(
104 classification=str(sponsor_type),
105 name=sponsor_name,
106 entity_type='person',
107 primary=sponsor_type == 'primary'
108 )
109
110 def scrape_subjects(self, bill, internal_id):
111 # https://apps.azleg.gov/api/Keyword/?billStatusId=68149
112 subjects_url = 'https://apps.azleg.gov/api/Keyword/?billStatusId={}'.format(internal_id)
113 page = json.loads(self.get(subjects_url).content.decode('utf-8'))
114 for subject in page:
115 bill.add_subject(subject['Name'])
116
117 def scrape_actions(self, bill, page, self_chamber):
118 """
119 Scrape the actions for a given bill
120
121 AZ No longer provides a full list, just a series of keys and dates.
122 So map that backwards using action_map
123 """
124 for status in page['BillStatusAction']:
125 if status['Action'] in utils.status_action_map:
126 category = utils.status_action_map[status['Action']]
127 if status['Committee']['TypeName'] == 'Floor':
128 categories = [category]
129 if status['Committee']['CommitteeShortName'] == 'THIRD':
130 categories.append('reading-3')
131 elif status['Committee']['TypeName'] == 'Standing':
132 categories = ['committee-{}'.format(category)]
133 else:
134 raise ValueError(
135 'Unexpected committee type: {}'.format(status['Committee']['TypeName']))
136 action_date = datetime.datetime.strptime(
137 status['ReportDate'], '%Y-%m-%dT%H:%M:%S').strftime('%Y-%m-%d')
138 bill.add_action(
139 description=status['Action'],
140 chamber={
141 'S': 'upper',
142 'H': 'lower',
143 }[status['Committee']['LegislativeBody']],
144 date=action_date,
145 classification=categories,
146 )
147 for action in utils.action_map:
148 if page[action] and utils.action_map[action]['name'] != '':
149 try:
150 action_date = datetime.datetime.strptime(
151 page[action], '%Y-%m-%dT%H:%M:%S').strftime('%Y-%m-%d')
152
153 bill.add_action(
154 chamber=self.actor_from_action(bill, action, self_chamber),
155 description=utils.action_map[action]['name'],
156 date=action_date,
157 classification=utils.action_map[action]['action'],
158 )
159 except (ValueError, TypeError):
160 self.info("Invalid Action Time {} for {}".format(page[action], action))
161
162 # Governor Signs and Vetos get different treatment
163 if page['GovernorAction'] == 'Signed':
164 action_date = page['GovernorActionDate'].split('T')[0]
165 bill.add_action(
166 chamber='executive',
167 description='Signed by Governor',
168 date=action_date,
169 classification='executive-signature',
170 )
171
172 if page['GovernorAction'] == 'Vetoed':
173 action_date = page['GovernorActionDate'].split('T')[0]
174 bill.add_action(
175 chamber='executive',
176 description='Vetoed by Governor',
177 date=action_date,
178 classification='executive-veto',
179 )
180
181 # Transmit to (X) has its own data structure as well
182 for transmit in page['BodyTransmittedTo']:
183 action_date = transmit['TransmitDate'].split('T')[0]
184 # upper, lower, executive
185 action_actor = self.chamber_map_rev[transmit['LegislativeBody']]
186 # house, senate, governor
187 body_text = self.chamber_map_rev_eng[transmit['LegislativeBody']]
188
189 action_text = 'Transmit to {}'.format(body_text)
190
191 if action_actor == 'executive':
192 action_type = 'executive-receipt'
193 else:
194 action_type = None
195
196 bill.add_action(
197 chamber=action_actor,
198 description=action_text,
199 date=action_date,
200 classification=action_type
201 )
202
203 def actor_from_action(self, bill, action, self_chamber):
204 """
205 Determine the actor from the action key
206 If the action_map = 'chamber', return the bill's home chamber
207 """
208 action_map = utils.action_chamber_map
209 for key in action_map:
210 if key in action:
211 if action_map[key] == 'chamber':
212 return self_chamber
213 else:
214 return action_map[key]
215
216 def scrape(self, chamber=None, session=None):
217 if not session:
218 session = self.latest_session()
219 self.info('no session specified, using %s', session)
220 session_id = session_metadata.session_id_meta_data[session]
221
222 # Get the bills page to start the session
223 req = self.get('https://www.azleg.gov/bills/')
224
225 session_form_url = 'https://www.azleg.gov/azlegwp/setsession.php'
226 form = {
227 'sessionID': session_id
228 }
229 req = self.post(url=session_form_url, data=form, cookies=req.cookies, allow_redirects=True)
230
231 bill_list_url = 'https://www.azleg.gov/bills/'
232
233 page = self.get(bill_list_url, cookies=req.cookies).content
234 # There's an errant close-comment that browsers handle
235 # but LXML gets really confused.
236 page = page.replace(b'--!>', b'-->')
237 page = html.fromstring(page)
238
239 bill_rows = []
240 chambers = [chamber] if chamber else ['upper', 'lower']
241 for chamber in chambers:
242 if chamber == 'lower':
243 bill_rows = page.xpath('//div[@name="HBTable"]//tbody//tr')
244 else:
245 bill_rows = page.xpath('//div[@name="SBTable"]//tbody//tr')
246 for row in bill_rows:
247 bill_id = row.xpath('td/a/text()')[0]
248 yield from self.scrape_bill(chamber, session, bill_id, session_id)
249
250 # TODO: MBTable - Non-bill Misc Motions?
251
252 def sort_bill_actions(self, bill):
253 actions = bill.actions
254 actions_list = []
255 out_of_order = []
256 new_list = []
257 if not actions:
258 return bill
259 action_date = actions[0]['date']
260 actions[0]['description'] = actions[0]['description'].lower()
261 actions_list.append(actions[0])
262 # seperate the actions that are out of order
263 for action in actions[1:]:
264 if action['date'] < action_date:
265 out_of_order.append(action)
266 else:
267 actions_list.append(action)
268 action_date = action['date']
269 action['description'] = action['description'].lower()
270 action_date = actions_list[0]['date']
271
272 for action in actions_list:
273 # this takes care of the actions in beween
274 for act in out_of_order:
275 if act['date'] < action_date:
276 o_index = out_of_order.index(act)
277 new_list.append(out_of_order.pop(o_index))
278 if act['date'] >= action_date and act['date'] < action['date']:
279 o_index = out_of_order.index(act)
280 new_list.append(out_of_order.pop(o_index))
281 new_list.append(action)
282
283 for act in out_of_order:
284 if act['date'] == action['date']:
285 o_index = out_of_order.index(act)
286 new_list.append(out_of_order.pop(o_index))
287
288 if out_of_order != []:
289 self.info("Unable to sort " + bill.identifier)
290 else:
291 bill.actions = new_list
292
293 def get_bill_type(self, bill_id):
294 for key in utils.bill_types:
295 if key in bill_id.lower():
296 return utils.bill_types[key]
297 return None
298
[end of openstates/az/bills.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/az/bills.py b/openstates/az/bills.py
--- a/openstates/az/bills.py
+++ b/openstates/az/bills.py
@@ -1,12 +1,12 @@
import json
import datetime
-from pupa.scrape import Scraper, Bill
+from lxml import html
+from pupa.scrape import Scraper, Bill, VoteEvent
+
from . import utils
from . import session_metadata
-from lxml import html
-
BASE_URL = 'https://www.azleg.gov/'
@@ -43,6 +43,7 @@
self.scrape_versions_and_documents(bill, internal_id)
self.scrape_sponsors(bill, internal_id)
self.scrape_subjects(bill, internal_id)
+ yield from self.scrape_votes(bill, page)
bill_url = 'https://apps.azleg.gov/BillStatus/BillOverview/{}?SessionId={}'.format(
internal_id, session_id)
@@ -213,6 +214,52 @@
else:
return action_map[key]
+ def scrape_votes(self, bill, page):
+ base_url = 'https://apps.azleg.gov/api/BillStatusFloorAction'
+ for header in page['FloorHeaders']:
+ params = {
+ 'billStatusId': page['BillId'],
+ 'billStatusActionId': header['BillStatusActionId'],
+ 'includeVotes': 'true',
+ }
+ resp = self.get(base_url, params=params)
+ actions = resp.json
+ for action in actions:
+ if action['Action'] == 'No Action':
+ continue
+ action_date = datetime.datetime.strptime(action['ReportDate'], '%Y-%m-%dT%H:%M:%S')
+ vote = VoteEvent(
+ chamber={
+ 'S': 'upper',
+ 'H': 'lower',
+ }[header['LegislativeBody']],
+ motion_text=action['Action'],
+ classification='passage',
+ result=(
+ 'pass'
+ if action['UnanimouslyAdopted'] or action['Ayes'] > action['Nays']
+ else 'fail'
+ ),
+ start_date=action_date.strftime('%Y-%m-%d'),
+ bill=bill,
+ )
+ vote.add_source(resp.url)
+ vote.set_count('yes', action['Ayes'] or 0)
+ vote.set_count('no', action['Nays'] or 0)
+ vote.set_count('other', (action['Present'] or 0))
+ vote.set_count('absent', (action['Absent'] or 0))
+ vote.set_count('excused', (action['Excused'] or 0))
+ vote.set_count('not voting' (action['NotVoting'] or 0))
+
+ for v in action['Votes']:
+ vote_type = {
+ 'Y': 'yes',
+ 'N': 'no',
+ }.get(v['Vote'], 'other')
+ vote.vote(vote_type, v['Legislator']['FullName'])
+
+ yield vote
+
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
| {"golden_diff": "diff --git a/openstates/az/bills.py b/openstates/az/bills.py\n--- a/openstates/az/bills.py\n+++ b/openstates/az/bills.py\n@@ -1,12 +1,12 @@\n import json\n import datetime\n \n-from pupa.scrape import Scraper, Bill\n+from lxml import html\n+from pupa.scrape import Scraper, Bill, VoteEvent\n+\n from . import utils\n from . import session_metadata\n \n-from lxml import html\n-\n \n BASE_URL = 'https://www.azleg.gov/'\n \n@@ -43,6 +43,7 @@\n self.scrape_versions_and_documents(bill, internal_id)\n self.scrape_sponsors(bill, internal_id)\n self.scrape_subjects(bill, internal_id)\n+ yield from self.scrape_votes(bill, page)\n \n bill_url = 'https://apps.azleg.gov/BillStatus/BillOverview/{}?SessionId={}'.format(\n internal_id, session_id)\n@@ -213,6 +214,52 @@\n else:\n return action_map[key]\n \n+ def scrape_votes(self, bill, page):\n+ base_url = 'https://apps.azleg.gov/api/BillStatusFloorAction'\n+ for header in page['FloorHeaders']:\n+ params = {\n+ 'billStatusId': page['BillId'],\n+ 'billStatusActionId': header['BillStatusActionId'],\n+ 'includeVotes': 'true',\n+ }\n+ resp = self.get(base_url, params=params)\n+ actions = resp.json\n+ for action in actions:\n+ if action['Action'] == 'No Action':\n+ continue\n+ action_date = datetime.datetime.strptime(action['ReportDate'], '%Y-%m-%dT%H:%M:%S')\n+ vote = VoteEvent(\n+ chamber={\n+ 'S': 'upper',\n+ 'H': 'lower',\n+ }[header['LegislativeBody']],\n+ motion_text=action['Action'],\n+ classification='passage',\n+ result=(\n+ 'pass'\n+ if action['UnanimouslyAdopted'] or action['Ayes'] > action['Nays']\n+ else 'fail'\n+ ),\n+ start_date=action_date.strftime('%Y-%m-%d'),\n+ bill=bill,\n+ )\n+ vote.add_source(resp.url)\n+ vote.set_count('yes', action['Ayes'] or 0)\n+ vote.set_count('no', action['Nays'] or 0)\n+ vote.set_count('other', (action['Present'] or 0))\n+ vote.set_count('absent', (action['Absent'] or 0))\n+ vote.set_count('excused', (action['Excused'] or 0))\n+ vote.set_count('not voting' (action['NotVoting'] or 0))\n+\n+ for v in action['Votes']:\n+ vote_type = {\n+ 'Y': 'yes',\n+ 'N': 'no',\n+ }.get(v['Vote'], 'other')\n+ vote.vote(vote_type, v['Legislator']['FullName'])\n+\n+ yield vote\n+\n def scrape(self, chamber=None, session=None):\n if not session:\n session = self.latest_session()\n", "issue": "AZ needs a new vote scraper\nold bill scraper included Votes, new one doesn't\n\nmight be best to wait and do this as part of pupa version\n", "before_files": [{"content": "import json\nimport datetime\n\nfrom pupa.scrape import Scraper, Bill\nfrom . import utils\nfrom . import session_metadata\n\nfrom lxml import html\n\n\nBASE_URL = 'https://www.azleg.gov/'\n\n\nclass AZBillScraper(Scraper):\n chamber_map = {'lower': 'H', 'upper': 'S'}\n chamber_map_rev = {'H': 'upper', 'S': 'lower', 'G': 'executive', 'SS': 'executive'}\n chamber_map_rev_eng = {'H': 'House', 'S': 'Senate', 'G': 'Governor',\n 'SS': 'Secretary of State'}\n\n def scrape_bill(self, chamber, session, bill_id, session_id):\n bill_json_url = 'https://apps.azleg.gov/api/Bill/?billNumber={}&sessionId={}&' \\\n 'legislativeBody={}'.format(bill_id, session_id, self.chamber_map[chamber])\n response = self.get(bill_json_url)\n page = json.loads(response.content.decode('utf-8'))\n\n if not page:\n self.warning('null page for %s', bill_id)\n return\n\n bill_title = page['ShortTitle']\n bill_id = page['Number']\n internal_id = page['BillId']\n bill_type = self.get_bill_type(bill_id)\n bill = Bill(\n bill_id,\n legislative_session=session,\n chamber=chamber,\n title=bill_title,\n classification=bill_type,\n )\n\n self.scrape_actions(bill, page, chamber)\n self.scrape_versions_and_documents(bill, internal_id)\n self.scrape_sponsors(bill, internal_id)\n self.scrape_subjects(bill, internal_id)\n\n bill_url = 'https://apps.azleg.gov/BillStatus/BillOverview/{}?SessionId={}'.format(\n internal_id, session_id)\n bill.add_source(bill_url)\n\n self.sort_bill_actions(bill)\n\n yield bill\n\n def scrape_versions_and_documents(self, bill, internal_id):\n # Careful, this sends XML to a browser but JSON to machines\n # https://apps.azleg.gov/api/DocType/?billStatusId=68408\n versions_url = 'https://apps.azleg.gov/api/DocType/?billStatusId={}'.format(internal_id)\n page = json.loads(self.get(versions_url).content.decode('utf-8'))\n for document_set in page:\n type_ = document_set['DocumentGroupName']\n for doc in document_set['Documents']:\n media_type = 'text/html' if doc['HtmlPath'] else 'application/pdf'\n url = doc['HtmlPath'] or doc['PdfPath']\n if not url:\n self.warning(\"No PDF or HTML version found for %s\" % doc['DocumentName'])\n # Sometimes the URL is just a relative path; make it absolute\n if not url.startswith('http'):\n url = 'https://apps.azleg.gov{}'.format(url)\n\n if type_ == 'Bill Versions':\n bill.add_version_link(\n note=doc['DocumentName'],\n url=url,\n media_type=media_type\n )\n else:\n bill.add_document_link(\n note=doc['DocumentName'],\n url=url,\n media_type=media_type\n )\n\n def scrape_sponsors(self, bill, internal_id):\n # Careful, this sends XML to a browser but JSON to machines\n # https://apps.azleg.gov/api/BillSponsor/?id=68398\n sponsors_url = 'https://apps.azleg.gov/api/BillSponsor/?id={}'.format(internal_id)\n page = json.loads(self.get(sponsors_url).content.decode('utf-8'))\n for sponsor in page:\n if 'Prime' in sponsor['SponsorType']:\n sponsor_type = 'primary'\n else:\n sponsor_type = 'cosponsor'\n\n # Some older bills don't have the FullName key\n if 'FullName' in sponsor['Legislator']:\n sponsor_name = sponsor['Legislator']['FullName']\n else:\n sponsor_name = \"{} {}\".format(\n sponsor['Legislator']['FirstName'],\n sponsor['Legislator']['LastName'],\n )\n bill.add_sponsorship(\n classification=str(sponsor_type),\n name=sponsor_name,\n entity_type='person',\n primary=sponsor_type == 'primary'\n )\n\n def scrape_subjects(self, bill, internal_id):\n # https://apps.azleg.gov/api/Keyword/?billStatusId=68149\n subjects_url = 'https://apps.azleg.gov/api/Keyword/?billStatusId={}'.format(internal_id)\n page = json.loads(self.get(subjects_url).content.decode('utf-8'))\n for subject in page:\n bill.add_subject(subject['Name'])\n\n def scrape_actions(self, bill, page, self_chamber):\n \"\"\"\n Scrape the actions for a given bill\n\n AZ No longer provides a full list, just a series of keys and dates.\n So map that backwards using action_map\n \"\"\"\n for status in page['BillStatusAction']:\n if status['Action'] in utils.status_action_map:\n category = utils.status_action_map[status['Action']]\n if status['Committee']['TypeName'] == 'Floor':\n categories = [category]\n if status['Committee']['CommitteeShortName'] == 'THIRD':\n categories.append('reading-3')\n elif status['Committee']['TypeName'] == 'Standing':\n categories = ['committee-{}'.format(category)]\n else:\n raise ValueError(\n 'Unexpected committee type: {}'.format(status['Committee']['TypeName']))\n action_date = datetime.datetime.strptime(\n status['ReportDate'], '%Y-%m-%dT%H:%M:%S').strftime('%Y-%m-%d')\n bill.add_action(\n description=status['Action'],\n chamber={\n 'S': 'upper',\n 'H': 'lower',\n }[status['Committee']['LegislativeBody']],\n date=action_date,\n classification=categories,\n )\n for action in utils.action_map:\n if page[action] and utils.action_map[action]['name'] != '':\n try:\n action_date = datetime.datetime.strptime(\n page[action], '%Y-%m-%dT%H:%M:%S').strftime('%Y-%m-%d')\n\n bill.add_action(\n chamber=self.actor_from_action(bill, action, self_chamber),\n description=utils.action_map[action]['name'],\n date=action_date,\n classification=utils.action_map[action]['action'],\n )\n except (ValueError, TypeError):\n self.info(\"Invalid Action Time {} for {}\".format(page[action], action))\n\n # Governor Signs and Vetos get different treatment\n if page['GovernorAction'] == 'Signed':\n action_date = page['GovernorActionDate'].split('T')[0]\n bill.add_action(\n chamber='executive',\n description='Signed by Governor',\n date=action_date,\n classification='executive-signature',\n )\n\n if page['GovernorAction'] == 'Vetoed':\n action_date = page['GovernorActionDate'].split('T')[0]\n bill.add_action(\n chamber='executive',\n description='Vetoed by Governor',\n date=action_date,\n classification='executive-veto',\n )\n\n # Transmit to (X) has its own data structure as well\n for transmit in page['BodyTransmittedTo']:\n action_date = transmit['TransmitDate'].split('T')[0]\n # upper, lower, executive\n action_actor = self.chamber_map_rev[transmit['LegislativeBody']]\n # house, senate, governor\n body_text = self.chamber_map_rev_eng[transmit['LegislativeBody']]\n\n action_text = 'Transmit to {}'.format(body_text)\n\n if action_actor == 'executive':\n action_type = 'executive-receipt'\n else:\n action_type = None\n\n bill.add_action(\n chamber=action_actor,\n description=action_text,\n date=action_date,\n classification=action_type\n )\n\n def actor_from_action(self, bill, action, self_chamber):\n \"\"\"\n Determine the actor from the action key\n If the action_map = 'chamber', return the bill's home chamber\n \"\"\"\n action_map = utils.action_chamber_map\n for key in action_map:\n if key in action:\n if action_map[key] == 'chamber':\n return self_chamber\n else:\n return action_map[key]\n\n def scrape(self, chamber=None, session=None):\n if not session:\n session = self.latest_session()\n self.info('no session specified, using %s', session)\n session_id = session_metadata.session_id_meta_data[session]\n\n # Get the bills page to start the session\n req = self.get('https://www.azleg.gov/bills/')\n\n session_form_url = 'https://www.azleg.gov/azlegwp/setsession.php'\n form = {\n 'sessionID': session_id\n }\n req = self.post(url=session_form_url, data=form, cookies=req.cookies, allow_redirects=True)\n\n bill_list_url = 'https://www.azleg.gov/bills/'\n\n page = self.get(bill_list_url, cookies=req.cookies).content\n # There's an errant close-comment that browsers handle\n # but LXML gets really confused.\n page = page.replace(b'--!>', b'-->')\n page = html.fromstring(page)\n\n bill_rows = []\n chambers = [chamber] if chamber else ['upper', 'lower']\n for chamber in chambers:\n if chamber == 'lower':\n bill_rows = page.xpath('//div[@name=\"HBTable\"]//tbody//tr')\n else:\n bill_rows = page.xpath('//div[@name=\"SBTable\"]//tbody//tr')\n for row in bill_rows:\n bill_id = row.xpath('td/a/text()')[0]\n yield from self.scrape_bill(chamber, session, bill_id, session_id)\n\n # TODO: MBTable - Non-bill Misc Motions?\n\n def sort_bill_actions(self, bill):\n actions = bill.actions\n actions_list = []\n out_of_order = []\n new_list = []\n if not actions:\n return bill\n action_date = actions[0]['date']\n actions[0]['description'] = actions[0]['description'].lower()\n actions_list.append(actions[0])\n # seperate the actions that are out of order\n for action in actions[1:]:\n if action['date'] < action_date:\n out_of_order.append(action)\n else:\n actions_list.append(action)\n action_date = action['date']\n action['description'] = action['description'].lower()\n action_date = actions_list[0]['date']\n\n for action in actions_list:\n # this takes care of the actions in beween\n for act in out_of_order:\n if act['date'] < action_date:\n o_index = out_of_order.index(act)\n new_list.append(out_of_order.pop(o_index))\n if act['date'] >= action_date and act['date'] < action['date']:\n o_index = out_of_order.index(act)\n new_list.append(out_of_order.pop(o_index))\n new_list.append(action)\n\n for act in out_of_order:\n if act['date'] == action['date']:\n o_index = out_of_order.index(act)\n new_list.append(out_of_order.pop(o_index))\n\n if out_of_order != []:\n self.info(\"Unable to sort \" + bill.identifier)\n else:\n bill.actions = new_list\n\n def get_bill_type(self, bill_id):\n for key in utils.bill_types:\n if key in bill_id.lower():\n return utils.bill_types[key]\n return None\n", "path": "openstates/az/bills.py"}]} | 3,924 | 721 |
gh_patches_debug_23936 | rasdani/github-patches | git_diff | biolab__orange3-text-413 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Topic Modeling: HDP fails
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.6.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.21.dev
##### Expected behavior
HDP works.
##### Actual behavior
HDP crashes the widget.
##### Steps to reproduce the behavior
Corpus - Topic Modeling. Select HDP.
##### Additional info (worksheets, data, screenshots, ...)
Upon checking the code, model seems to be fit several times instead of once. The first result is empty, hence the widget fails. Yet to debug further.
</issue>
<code>
[start of orangecontrib/text/topics/topics.py]
1 from gensim import matutils
2 import numpy as np
3 from gensim.corpora import Dictionary
4
5 from Orange.data import StringVariable, ContinuousVariable, Domain
6 from Orange.data.table import Table
7 from orangecontrib.text.corpus import Corpus
8 from orangecontrib.text.util import chunkable
9
10
11 MAX_WORDS = 1000
12
13
14 class Topic(Table):
15 """ Dummy wrapper for Table so signals can distinguish Topic from Data.
16 """
17
18 def __new__(cls, *args, **kwargs):
19 """ Bypass Table.__new__. """
20 return object.__new__(Topic)
21
22
23 class GensimWrapper:
24 name = NotImplemented
25 Model = NotImplemented
26 num_topics = NotImplemented
27 has_negative_weights = False # whether words can negatively contibute to a topic
28
29 def __init__(self, **kwargs):
30 for k, v in kwargs.items():
31 setattr(self, k, v)
32 self.kwargs = kwargs
33 self.model = None
34 self.topic_names = []
35 self.n_words = 0
36 self.running = False
37
38 def fit(self, corpus, **kwargs):
39 """ Train the model with the corpus.
40
41 Args:
42 corpus (Corpus): A corpus to learn topics from.
43 """
44 if not len(corpus.dictionary):
45 return None
46 self.reset_model(corpus)
47 self.running = True
48 self.update(corpus.ngrams_corpus, **kwargs)
49 self.n_words = len(corpus.dictionary)
50 self.topic_names = ['Topic {}'.format(i+1)
51 for i in range(self.num_topics)]
52 self.running = False
53
54 def dummy_method(self, *args, **kwargs):
55 pass
56
57 def reset_model(self, corpus):
58 # prevent model from updating
59 _update = self.Model.update
60 self.Model.update = self.dummy_method
61 self.id2word = Dictionary(corpus.ngrams_iterator(include_postags=True), prune_at=None)
62 self.model = self.Model(corpus=corpus,
63 id2word=self.id2word, **self.kwargs)
64 self.Model.update = _update
65
66 @chunkable
67 def update(self, documents):
68 self.model.update(documents)
69
70 def transform(self, corpus):
71 """ Create a table with topics representation. """
72 topics = self.model[corpus.ngrams_corpus]
73 matrix = matutils.corpus2dense(topics, num_docs=len(corpus),
74 num_terms=self.num_topics).T
75
76 corpus.extend_attributes(matrix[:, :len(self.topic_names)], self.topic_names)
77 return corpus
78
79 def fit_transform(self, corpus, **kwargs):
80 self.fit(corpus, **kwargs)
81 return self.transform(corpus)
82
83 def get_topics_table_by_id(self, topic_id):
84 """ Transform topics from gensim model to table. """
85 words = self._topics_words(MAX_WORDS)
86 weights = self._topics_weights(MAX_WORDS)
87 if topic_id >= len(words):
88 raise ValueError("Too large topic ID.")
89
90 num_words = len(words[topic_id])
91
92 data = np.zeros((num_words, 2), dtype=object)
93 data[:, 0] = words[topic_id]
94 data[:, 1] = weights[topic_id]
95
96 metas = [StringVariable(self.topic_names[topic_id]),
97 ContinuousVariable("Topic {} weights".format(topic_id + 1))]
98 metas[-1]._out_format = '%.2e'
99
100 domain = Domain([], metas=metas)
101 t = Topic.from_numpy(domain,
102 X=np.zeros((num_words, 0)),
103 metas=data)
104 t.W = data[:, 1]
105 t.name = 'Topic {}'.format(topic_id + 1)
106 return t
107
108 def get_all_topics_table(self):
109 """ Transform all topics from gensim model to table. """
110 all_words = self._topics_words(self.n_words)
111 all_weights = self._topics_weights(self.n_words)
112 sorted_words = sorted(all_words[0])
113 n_topics = len(all_words)
114
115 X = []
116 for words, weights in zip(all_words, all_weights):
117 weights = [we for wo, we in sorted(zip(words, weights))]
118 X.append(weights)
119 X = np.array(X).T
120
121 # take only first n_topics; e.g. when user requested 10, but gensim
122 # returns only 9 — when the rank is lower than num_topics requested
123 attrs = [ContinuousVariable(n)
124 for n in self.topic_names[:n_topics]]
125
126 t = Table.from_numpy(Domain(attrs, metas=[StringVariable('Word')]),
127 X=X, metas=np.array(sorted_words)[:, None])
128 t.name = 'All topics'
129 return t
130
131 def get_top_words_by_id(self, topic_id, num_of_words=10):
132 topics = self._topics_words(num_of_words=num_of_words)
133 weights = self._topics_weights(num_of_words=num_of_words)
134 if not 0 <= topic_id < self.num_topics:
135 raise ValueError("Invalid {}".format(topic_id))
136 elif topic_id >= len(topics):
137 return [], []
138 return topics[topic_id], weights[topic_id]
139
140 def _topics_words(self, num_of_words):
141 """ Returns list of list of topic words. """
142 x = self.model.show_topics(-1, num_of_words, formatted=False)
143 # `show_topics` method return a list of `(topic_number, topic)` tuples,
144 # where `topic` is a list of `(word, probability)` tuples.
145 return [[i[0] for i in topic[1]] for topic in x]
146
147 def _topics_weights(self, num_of_words):
148 """ Returns list of list of topic weights. """
149 topics = self.model.show_topics(-1, num_of_words, formatted=False)
150 # `show_topics` method return a list of `(topic_number, topic)` tuples,
151 # where `topic` is a list of `(word, probability)` tuples.
152 return [[i[1] for i in t[1]] for t in topics]
153
[end of orangecontrib/text/topics/topics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/orangecontrib/text/topics/topics.py b/orangecontrib/text/topics/topics.py
--- a/orangecontrib/text/topics/topics.py
+++ b/orangecontrib/text/topics/topics.py
@@ -139,14 +139,15 @@
def _topics_words(self, num_of_words):
""" Returns list of list of topic words. """
- x = self.model.show_topics(-1, num_of_words, formatted=False)
+ x = self.model.show_topics(self.num_topics, num_of_words, formatted=False)
# `show_topics` method return a list of `(topic_number, topic)` tuples,
# where `topic` is a list of `(word, probability)` tuples.
return [[i[0] for i in topic[1]] for topic in x]
def _topics_weights(self, num_of_words):
""" Returns list of list of topic weights. """
- topics = self.model.show_topics(-1, num_of_words, formatted=False)
+ topics = self.model.show_topics(self.num_topics, num_of_words,
+ formatted=False)
# `show_topics` method return a list of `(topic_number, topic)` tuples,
# where `topic` is a list of `(word, probability)` tuples.
return [[i[1] for i in t[1]] for t in topics]
| {"golden_diff": "diff --git a/orangecontrib/text/topics/topics.py b/orangecontrib/text/topics/topics.py\n--- a/orangecontrib/text/topics/topics.py\n+++ b/orangecontrib/text/topics/topics.py\n@@ -139,14 +139,15 @@\n \n def _topics_words(self, num_of_words):\n \"\"\" Returns list of list of topic words. \"\"\"\n- x = self.model.show_topics(-1, num_of_words, formatted=False)\n+ x = self.model.show_topics(self.num_topics, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[0] for i in topic[1]] for topic in x]\n \n def _topics_weights(self, num_of_words):\n \"\"\" Returns list of list of topic weights. \"\"\"\n- topics = self.model.show_topics(-1, num_of_words, formatted=False)\n+ topics = self.model.show_topics(self.num_topics, num_of_words,\n+ formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[1] for i in t[1]] for t in topics]\n", "issue": "Topic Modeling: HDP fails\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.6.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.21.dev\r\n\r\n##### Expected behavior\r\nHDP works.\r\n\r\n\r\n##### Actual behavior\r\nHDP crashes the widget.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nCorpus - Topic Modeling. Select HDP.\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\nUpon checking the code, model seems to be fit several times instead of once. The first result is empty, hence the widget fails. Yet to debug further. \r\n\r\n\n", "before_files": [{"content": "from gensim import matutils\nimport numpy as np\nfrom gensim.corpora import Dictionary\n\nfrom Orange.data import StringVariable, ContinuousVariable, Domain\nfrom Orange.data.table import Table\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.util import chunkable\n\n\nMAX_WORDS = 1000\n\n\nclass Topic(Table):\n \"\"\" Dummy wrapper for Table so signals can distinguish Topic from Data.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\" Bypass Table.__new__. \"\"\"\n return object.__new__(Topic)\n\n\nclass GensimWrapper:\n name = NotImplemented\n Model = NotImplemented\n num_topics = NotImplemented\n has_negative_weights = False # whether words can negatively contibute to a topic\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n self.kwargs = kwargs\n self.model = None\n self.topic_names = []\n self.n_words = 0\n self.running = False\n\n def fit(self, corpus, **kwargs):\n \"\"\" Train the model with the corpus.\n\n Args:\n corpus (Corpus): A corpus to learn topics from.\n \"\"\"\n if not len(corpus.dictionary):\n return None\n self.reset_model(corpus)\n self.running = True\n self.update(corpus.ngrams_corpus, **kwargs)\n self.n_words = len(corpus.dictionary)\n self.topic_names = ['Topic {}'.format(i+1)\n for i in range(self.num_topics)]\n self.running = False\n\n def dummy_method(self, *args, **kwargs):\n pass\n\n def reset_model(self, corpus):\n # prevent model from updating\n _update = self.Model.update\n self.Model.update = self.dummy_method\n self.id2word = Dictionary(corpus.ngrams_iterator(include_postags=True), prune_at=None)\n self.model = self.Model(corpus=corpus,\n id2word=self.id2word, **self.kwargs)\n self.Model.update = _update\n\n @chunkable\n def update(self, documents):\n self.model.update(documents)\n\n def transform(self, corpus):\n \"\"\" Create a table with topics representation. \"\"\"\n topics = self.model[corpus.ngrams_corpus]\n matrix = matutils.corpus2dense(topics, num_docs=len(corpus),\n num_terms=self.num_topics).T\n\n corpus.extend_attributes(matrix[:, :len(self.topic_names)], self.topic_names)\n return corpus\n\n def fit_transform(self, corpus, **kwargs):\n self.fit(corpus, **kwargs)\n return self.transform(corpus)\n\n def get_topics_table_by_id(self, topic_id):\n \"\"\" Transform topics from gensim model to table. \"\"\"\n words = self._topics_words(MAX_WORDS)\n weights = self._topics_weights(MAX_WORDS)\n if topic_id >= len(words):\n raise ValueError(\"Too large topic ID.\")\n\n num_words = len(words[topic_id])\n\n data = np.zeros((num_words, 2), dtype=object)\n data[:, 0] = words[topic_id]\n data[:, 1] = weights[topic_id]\n\n metas = [StringVariable(self.topic_names[topic_id]),\n ContinuousVariable(\"Topic {} weights\".format(topic_id + 1))]\n metas[-1]._out_format = '%.2e'\n\n domain = Domain([], metas=metas)\n t = Topic.from_numpy(domain,\n X=np.zeros((num_words, 0)),\n metas=data)\n t.W = data[:, 1]\n t.name = 'Topic {}'.format(topic_id + 1)\n return t\n\n def get_all_topics_table(self):\n \"\"\" Transform all topics from gensim model to table. \"\"\"\n all_words = self._topics_words(self.n_words)\n all_weights = self._topics_weights(self.n_words)\n sorted_words = sorted(all_words[0])\n n_topics = len(all_words)\n\n X = []\n for words, weights in zip(all_words, all_weights):\n weights = [we for wo, we in sorted(zip(words, weights))]\n X.append(weights)\n X = np.array(X).T\n\n # take only first n_topics; e.g. when user requested 10, but gensim\n # returns only 9 \u2014 when the rank is lower than num_topics requested\n attrs = [ContinuousVariable(n)\n for n in self.topic_names[:n_topics]]\n\n t = Table.from_numpy(Domain(attrs, metas=[StringVariable('Word')]),\n X=X, metas=np.array(sorted_words)[:, None])\n t.name = 'All topics'\n return t\n\n def get_top_words_by_id(self, topic_id, num_of_words=10):\n topics = self._topics_words(num_of_words=num_of_words)\n weights = self._topics_weights(num_of_words=num_of_words)\n if not 0 <= topic_id < self.num_topics:\n raise ValueError(\"Invalid {}\".format(topic_id))\n elif topic_id >= len(topics):\n return [], []\n return topics[topic_id], weights[topic_id]\n\n def _topics_words(self, num_of_words):\n \"\"\" Returns list of list of topic words. \"\"\"\n x = self.model.show_topics(-1, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[0] for i in topic[1]] for topic in x]\n\n def _topics_weights(self, num_of_words):\n \"\"\" Returns list of list of topic weights. \"\"\"\n topics = self.model.show_topics(-1, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[1] for i in t[1]] for t in topics]\n", "path": "orangecontrib/text/topics/topics.py"}]} | 2,360 | 288 |
gh_patches_debug_66238 | rasdani/github-patches | git_diff | deepchecks__deepchecks-728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] resources/suite_output.html file is missing when installing not via git
**Describe the bug**
can't use save_as_html because suite_output.html file is missing
**To Reproduce**
pip install deepchecks
suite_result.save_as_html()
**Expected behavior**
save as html
**Environment (please complete the following information):**
- OS: linux
- Python Version: 3.7
- Deepchecks Version: 0.3.1
</issue>
<code>
[start of setup.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """
12
13 |build| |Documentation Status| |pkgVersion| |pyVersions|
14 |Maintainability| |Coverage Status|
15
16 .. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png
17 :target: https://github.com/deepchecks/deepchecks
18
19 Deepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.
20 This includes checks related to various types of issues, such as model performance, data integrity,
21 distribution mismatches, and more.
22
23 What Do You Need in Order to Start Validating?
24 ----------------------------------------------
25
26 Depending on your phase and what you wise to validate, you'll need a
27 subset of the following:
28
29 - Raw data (before pre-processing such as OHE, string processing,
30 etc.), with optional labels
31
32 - The model's training data with labels
33
34 - Test data (which the model isn't exposed to) with labels
35
36 - A model compatible with scikit-learn API that you wish to validate
37 (e.g. RandomForest, XGBoost)
38
39 Deepchecks validation accompanies you from the initial phase when you
40 have only raw data, through the data splits, and to the final stage of
41 having a trained model that you wish to evaluate. Accordingly, each
42 phase requires different assets for the validation. See more about
43 typical usage scenarios and the built-in suites in the
44 `docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.
45
46 Installation
47 ------------
48
49 Using pip
50 ~~~~~~~~~
51
52 .. code:: bash
53
54 pip install deepchecks #--upgrade --user
55
56 Using conda
57 ~~~~~~~~~~~
58
59 .. code:: bash
60
61 conda install -c deepchecks deepchecks
62
63 .. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg
64 .. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest
65 :target: https://docs.deepchecks.com/en/latest/?badge=latest
66 .. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks
67 .. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks
68 .. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability
69 :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability
70 .. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main
71 :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main
72
73 """
74
75 import setuptools
76 from setuptools import setup
77 from distutils.util import convert_path
78 import os
79
80 main_ns = {}
81 DOCLINES = (__doc__ or '').split("\n")
82
83 with open(os.path.join('./', 'VERSION')) as version_file:
84 VER = version_file.read().strip()
85
86 requirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'
87 install_requires = []
88 if os.path.isfile(requirementPath):
89 with open(requirementPath) as f:
90 install_requires = f.read().splitlines()
91
92
93
94
95 setup(
96 name='deepchecks',
97 version=VER,
98 packages=setuptools.find_packages(),
99 install_requires=install_requires,
100 license_files = ('LICENSE', ),
101 description = DOCLINES[0],
102 long_description="\n".join(DOCLINES[2:]),
103 author = 'deepchecks',
104 author_email = '[email protected]',
105 url = 'https://github.com/deepchecks/deepchecks',
106 download_url = "https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz".format(VER),
107 keywords = ['Software Development', 'Machine Learning'],
108 include_package_data=True,
109 classifiers = [
110 'Intended Audience :: Developers',
111 'Intended Audience :: Science/Research',
112 'Topic :: Software Development',
113 'Topic :: Scientific/Engineering',
114 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
115 'Programming Language :: Python :: 3',
116 'Programming Language :: Python :: 3.6',
117 'Programming Language :: Python :: 3.7',
118 'Programming Language :: Python :: 3.8',
119 'Programming Language :: Python :: 3.9',
120 'Programming Language :: Python :: 3.10',
121 ],
122 )
123
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,6 @@
import setuptools
from setuptools import setup
-from distutils.util import convert_path
import os
main_ns = {}
@@ -89,9 +88,6 @@
with open(requirementPath) as f:
install_requires = f.read().splitlines()
-
-
-
setup(
name='deepchecks',
version=VER,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,7 +74,6 @@\n \n import setuptools\n from setuptools import setup\n-from distutils.util import convert_path\n import os\n \n main_ns = {}\n@@ -89,9 +88,6 @@\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n \n-\n-\n-\n setup(\n name='deepchecks',\n version=VER,\n", "issue": "[BUG] resources/suite_output.html file is missing when installing not via git\n**Describe the bug**\r\ncan't use save_as_html because suite_output.html file is missing\r\n\r\n**To Reproduce**\r\npip install deepchecks\r\nsuite_result.save_as_html()\r\n\r\n**Expected behavior**\r\nsave as html\r\n\r\n**Environment (please complete the following information):**\r\n - OS: linux\r\n - Python Version: 3.7\r\n - Deepchecks Version: 0.3.1\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nfrom distutils.util import convert_path\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\n\n\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n", "path": "setup.py"}]} | 1,981 | 105 |
gh_patches_debug_4106 | rasdani/github-patches | git_diff | hylang__hy-1955 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make hy-history location configurable
How about an environment variable like `HY_HISTORY` that allows the user to change the location of `~/.hy-history`.
</issue>
<code>
[start of hy/completer.py]
1 # Copyright 2021 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 import contextlib
6 import os
7 import re
8 import sys
9 import builtins
10
11 import hy.macros
12 import hy.compiler
13
14
15 docomplete = True
16
17 try:
18 import readline
19 except ImportError:
20 try:
21 import pyreadline.rlmain
22 import pyreadline.unicode_helper # NOQA
23 import readline
24 except ImportError:
25 docomplete = False
26
27 if docomplete:
28 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
29 readline_bind = "bind ^I rl_complete"
30 else:
31 readline_bind = "tab: complete"
32
33
34 class Completer(object):
35
36 def __init__(self, namespace={}):
37 if not isinstance(namespace, dict):
38 raise TypeError('namespace must be a dictionary')
39 self.namespace = namespace
40 self.path = [hy.compiler._special_form_compilers,
41 builtins.__dict__,
42 namespace]
43
44 self.tag_path = []
45
46 namespace.setdefault('__macros__', {})
47 namespace.setdefault('__tags__', {})
48
49 self.path.append(namespace['__macros__'])
50 self.tag_path.append(namespace['__tags__'])
51
52 def attr_matches(self, text):
53 # Borrowed from IPython's completer
54 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
55
56 if m:
57 expr, attr = m.group(1, 3)
58 attr = attr.replace("-", "_")
59 expr = expr.replace("-", "_")
60 else:
61 return []
62
63 try:
64 obj = eval(expr, self.namespace)
65 words = dir(obj)
66 except Exception:
67 return []
68
69 n = len(attr)
70 matches = []
71 for w in words:
72 if w[:n] == attr:
73 matches.append("{}.{}".format(
74 expr.replace("_", "-"), w.replace("_", "-")))
75 return matches
76
77 def global_matches(self, text):
78 matches = []
79 for p in self.path:
80 for k in p.keys():
81 if isinstance(k, str):
82 k = k.replace("_", "-")
83 if k.startswith(text):
84 matches.append(k)
85 return matches
86
87 def tag_matches(self, text):
88 text = text[1:]
89 matches = []
90 for p in self.tag_path:
91 for k in p.keys():
92 if isinstance(k, str):
93 if k.startswith(text):
94 matches.append("#{}".format(k))
95 return matches
96
97 def complete(self, text, state):
98 if text.startswith("#"):
99 matches = self.tag_matches(text)
100 elif "." in text:
101 matches = self.attr_matches(text)
102 else:
103 matches = self.global_matches(text)
104 try:
105 return matches[state]
106 except IndexError:
107 return None
108
109
110 @contextlib.contextmanager
111 def completion(completer=None):
112 delims = "()[]{} "
113 if not completer:
114 completer = Completer()
115
116 if docomplete:
117 readline.set_completer(completer.complete)
118 readline.set_completer_delims(delims)
119
120 history = os.path.expanduser("~/.hy-history")
121 readline.parse_and_bind("set blink-matching-paren on")
122
123 try:
124 readline.read_history_file(history)
125 except IOError:
126 pass
127
128 readline.parse_and_bind(readline_bind)
129
130 try:
131 yield
132 finally:
133 if docomplete:
134 try:
135 readline.write_history_file(history)
136 except IOError:
137 pass
138
[end of hy/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -117,7 +117,8 @@
readline.set_completer(completer.complete)
readline.set_completer_delims(delims)
- history = os.path.expanduser("~/.hy-history")
+ history = os.environ.get(
+ "HY_HISTORY", os.path.expanduser("~/.hy-history"))
readline.parse_and_bind("set blink-matching-paren on")
try:
| {"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -117,7 +117,8 @@\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n \n- history = os.path.expanduser(\"~/.hy-history\")\n+ history = os.environ.get(\n+ \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n \n try:\n", "issue": "Make hy-history location configurable\nHow about an environment variable like `HY_HISTORY` that allows the user to change the location of `~/.hy-history`.\n", "before_files": [{"content": "# Copyright 2021 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._special_form_compilers,\n builtins.__dict__,\n namespace]\n\n self.tag_path = []\n\n namespace.setdefault('__macros__', {})\n namespace.setdefault('__tags__', {})\n\n self.path.append(namespace['__macros__'])\n self.tag_path.append(namespace['__tags__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, str):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.path.expanduser(\"~/.hy-history\")\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n try:\n readline.write_history_file(history)\n except IOError:\n pass\n", "path": "hy/completer.py"}]} | 1,644 | 127 |
gh_patches_debug_28925 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-575 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise error when using Scalars (or non strawberry types) with Union
If we do something any of the following:
```python
X = strawberry.Union("X", (int, str))
@strawberry.type
class A:
field: Union[int, str]
@strawberry.type
class A:
field: Union[NonStrawberryTypeA, NonStrawberryTypeB]
```
we should get an error informing that:
1. you can't use unions with scalars (not supported by GraphQL)
2. you can only use union with strawberry types
</issue>
<code>
[start of strawberry/union.py]
1 from typing import NoReturn, Optional, Tuple, Type
2
3
4 class StrawberryUnion:
5 def __init__(
6 self, name: str, types: Tuple[Type, ...], description: Optional[str] = None
7 ):
8 self.name = name
9 self._types = types
10 self.description = description
11
12 @property
13 def types(self) -> Tuple[Type, ...]:
14 from .types.type_resolver import _resolve_generic_type
15
16 types = tuple(
17 _resolve_generic_type(t, self.name)
18 for t in self._types
19 if t is not None.__class__
20 )
21
22 return types
23
24 def __call__(self, *_args, **_kwargs) -> NoReturn:
25 """Do not use.
26
27 Used to bypass
28 https://github.com/python/cpython/blob/5efb1a77e75648012f8b52960c8637fc296a5c6d/Lib/typing.py#L148-L149
29 """
30 raise ValueError("Cannot use union type directly")
31
32
33 def union(
34 name: str, types: Tuple[Type, ...], *, description: str = None
35 ) -> StrawberryUnion:
36 """Creates a new named Union type.
37
38 Example usages:
39
40 >>> strawberry.union("Some Thing", (int, str))
41
42 >>> @strawberry.type
43 ... class A: ...
44 >>> @strawberry.type
45 ... class B: ...
46 >>> strawberry.union("Name", (A, Optional[B]))
47 """
48
49 union_definition = StrawberryUnion(name=name, types=types, description=description)
50
51 return union_definition
52
[end of strawberry/union.py]
[start of strawberry/exceptions.py]
1 # TODO: add links to docs
2
3 from typing import List, Set
4
5 from graphql import GraphQLObjectType
6
7
8 class NotAnEnum(Exception):
9 def __init__(self):
10 message = "strawberry.enum can only be used with subclasses of Enum"
11
12 super().__init__(message)
13
14
15 class MissingReturnAnnotationError(Exception):
16 """The field is missing the return annotation"""
17
18 def __init__(self, field_name: str):
19 message = (
20 f'Return annotation missing for field "{field_name}", '
21 "did you forget to add it?"
22 )
23
24 super().__init__(message)
25
26
27 class MissingArgumentsAnnotationsError(Exception):
28 """The field is missing the annotation for one or more arguments"""
29
30 def __init__(self, field_name: str, arguments: Set[str]):
31 arguments_list: List[str] = sorted(list(arguments))
32
33 if len(arguments_list) == 1:
34 argument = f'argument "{arguments_list[0]}"'
35 else:
36 head = ", ".join(arguments_list[:-1])
37 argument = f'arguments "{head}" and "{arguments_list[-1]}"'
38
39 message = (
40 f"Missing annotation for {argument} "
41 f'in field "{field_name}", did you forget to add it?'
42 )
43
44 super().__init__(message)
45
46
47 class WrongReturnTypeForUnion(Exception):
48 """The Union type cannot be resolved because it's not a field"""
49
50 def __init__(self, field_name: str, result_type: str):
51 message = (
52 f'The type "{result_type}" cannot be resolved for the field "{field_name}" '
53 ", are you using a strawberry.field?"
54 )
55
56 super().__init__(message)
57
58
59 class UnallowedReturnTypeForUnion(Exception):
60 """The return type is not in the list of Union types"""
61
62 def __init__(
63 self, field_name: str, result_type: str, allowed_types: Set[GraphQLObjectType]
64 ):
65 formatted_allowed_types = [type_.name for type_ in allowed_types]
66
67 message = (
68 f'The type "{result_type}" of the field "{field_name}" '
69 f'is not in the list of the types of the union: "{formatted_allowed_types}"'
70 )
71
72 super().__init__(message)
73
74
75 class MissingTypesForGenericError(Exception):
76 """Raised when a generic types was used without passing any type."""
77
78 def __init__(self, field_name: str, annotation):
79 message = (
80 f'The type "{annotation.__name__}" of the field "{field_name}" '
81 f"is generic, but no type has been passed"
82 )
83
84 super().__init__(message)
85
86
87 class UnsupportedTypeError(Exception):
88 def __init__(self, annotation):
89 message = f"{annotation} conversion is not supported"
90
91 super().__init__(message)
92
93
94 class MissingFieldAnnotationError(Exception):
95 def __init__(self, field_name: str):
96 message = (
97 f'Unable to determine the type of field "{field_name}". Either '
98 f"annotate it directly, or provide a typed resolver using "
99 f"@strawberry.field."
100 )
101
102 super().__init__(message)
103
104
105 class PrivateStrawberryFieldError(Exception):
106 def __init__(self, field_name: str, type_name: str):
107 message = (
108 f"Field {field_name} on type {type_name} cannot be both "
109 "private and a strawberry.field"
110 )
111
112 super().__init__(message)
113
114
115 class MultipleStrawberryArgumentsError(Exception):
116 def __init__(self, field_name: str, argument_name: str):
117 message = (
118 f"Annotation for argument `{argument_name}` on field "
119 f"`{field_name}` cannot have multiple `strawberry.argument`s"
120 )
121
122 super().__init__(message)
123
124
125 class ScalarAlreadyRegisteredError(Exception):
126 def __init__(self, scalar_name: str):
127 message = f"Scalar `{scalar_name}` has already been registered"
128
129 super().__init__(message)
130
131
132 class WrongNumberOfResultsReturned(Exception):
133 def __init__(self, expected: int, received: int):
134 message = (
135 "Received wrong number of results in dataloader, "
136 f"expected: {expected}, received: {received}"
137 )
138
139 super().__init__(message)
140
[end of strawberry/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/exceptions.py b/strawberry/exceptions.py
--- a/strawberry/exceptions.py
+++ b/strawberry/exceptions.py
@@ -72,6 +72,12 @@
super().__init__(message)
+class InvalidUnionType(Exception):
+ """The union is constructed with an invalid type"""
+
+ pass
+
+
class MissingTypesForGenericError(Exception):
"""Raised when a generic types was used without passing any type."""
diff --git a/strawberry/union.py b/strawberry/union.py
--- a/strawberry/union.py
+++ b/strawberry/union.py
@@ -1,4 +1,7 @@
-from typing import NoReturn, Optional, Tuple, Type
+from typing import NoReturn, Optional, Tuple, Type, TypeVar
+
+from .exceptions import InvalidUnionType
+from .scalars import SCALAR_TYPES
class StrawberryUnion:
@@ -37,8 +40,6 @@
Example usages:
- >>> strawberry.union("Some Thing", (int, str))
-
>>> @strawberry.type
... class A: ...
>>> @strawberry.type
@@ -46,6 +47,21 @@
>>> strawberry.union("Name", (A, Optional[B]))
"""
+ # Validate types
+ if len(types) == 0:
+ raise TypeError("No types passed to `union`")
+
+ for _type in types:
+ if _type in SCALAR_TYPES:
+ raise InvalidUnionType(
+ f"Scalar type `{_type.__name__}` cannot be used in a GraphQL Union"
+ )
+
+ if not isinstance(_type, TypeVar) and not hasattr(_type, "_type_definition"):
+ raise InvalidUnionType(
+ f"Union type `{_type.__name__}` is not a Strawberry type"
+ )
+
union_definition = StrawberryUnion(name=name, types=types, description=description)
return union_definition
| {"golden_diff": "diff --git a/strawberry/exceptions.py b/strawberry/exceptions.py\n--- a/strawberry/exceptions.py\n+++ b/strawberry/exceptions.py\n@@ -72,6 +72,12 @@\n super().__init__(message)\n \n \n+class InvalidUnionType(Exception):\n+ \"\"\"The union is constructed with an invalid type\"\"\"\n+\n+ pass\n+\n+\n class MissingTypesForGenericError(Exception):\n \"\"\"Raised when a generic types was used without passing any type.\"\"\"\n \ndiff --git a/strawberry/union.py b/strawberry/union.py\n--- a/strawberry/union.py\n+++ b/strawberry/union.py\n@@ -1,4 +1,7 @@\n-from typing import NoReturn, Optional, Tuple, Type\n+from typing import NoReturn, Optional, Tuple, Type, TypeVar\n+\n+from .exceptions import InvalidUnionType\n+from .scalars import SCALAR_TYPES\n \n \n class StrawberryUnion:\n@@ -37,8 +40,6 @@\n \n Example usages:\n \n- >>> strawberry.union(\"Some Thing\", (int, str))\n-\n >>> @strawberry.type\n ... class A: ...\n >>> @strawberry.type\n@@ -46,6 +47,21 @@\n >>> strawberry.union(\"Name\", (A, Optional[B]))\n \"\"\"\n \n+ # Validate types\n+ if len(types) == 0:\n+ raise TypeError(\"No types passed to `union`\")\n+\n+ for _type in types:\n+ if _type in SCALAR_TYPES:\n+ raise InvalidUnionType(\n+ f\"Scalar type `{_type.__name__}` cannot be used in a GraphQL Union\"\n+ )\n+\n+ if not isinstance(_type, TypeVar) and not hasattr(_type, \"_type_definition\"):\n+ raise InvalidUnionType(\n+ f\"Union type `{_type.__name__}` is not a Strawberry type\"\n+ )\n+\n union_definition = StrawberryUnion(name=name, types=types, description=description)\n \n return union_definition\n", "issue": "Raise error when using Scalars (or non strawberry types) with Union\nIf we do something any of the following:\r\n\r\n```python\r\nX = strawberry.Union(\"X\", (int, str))\r\n\r\[email protected]\r\nclass A:\r\n field: Union[int, str]\r\n\r\n\r\[email protected]\r\nclass A:\r\n field: Union[NonStrawberryTypeA, NonStrawberryTypeB]\r\n```\r\n\r\nwe should get an error informing that:\r\n\r\n1. you can't use unions with scalars (not supported by GraphQL)\r\n2. you can only use union with strawberry types\n", "before_files": [{"content": "from typing import NoReturn, Optional, Tuple, Type\n\n\nclass StrawberryUnion:\n def __init__(\n self, name: str, types: Tuple[Type, ...], description: Optional[str] = None\n ):\n self.name = name\n self._types = types\n self.description = description\n\n @property\n def types(self) -> Tuple[Type, ...]:\n from .types.type_resolver import _resolve_generic_type\n\n types = tuple(\n _resolve_generic_type(t, self.name)\n for t in self._types\n if t is not None.__class__\n )\n\n return types\n\n def __call__(self, *_args, **_kwargs) -> NoReturn:\n \"\"\"Do not use.\n\n Used to bypass\n https://github.com/python/cpython/blob/5efb1a77e75648012f8b52960c8637fc296a5c6d/Lib/typing.py#L148-L149\n \"\"\"\n raise ValueError(\"Cannot use union type directly\")\n\n\ndef union(\n name: str, types: Tuple[Type, ...], *, description: str = None\n) -> StrawberryUnion:\n \"\"\"Creates a new named Union type.\n\n Example usages:\n\n >>> strawberry.union(\"Some Thing\", (int, str))\n\n >>> @strawberry.type\n ... class A: ...\n >>> @strawberry.type\n ... class B: ...\n >>> strawberry.union(\"Name\", (A, Optional[B]))\n \"\"\"\n\n union_definition = StrawberryUnion(name=name, types=types, description=description)\n\n return union_definition\n", "path": "strawberry/union.py"}, {"content": "# TODO: add links to docs\n\nfrom typing import List, Set\n\nfrom graphql import GraphQLObjectType\n\n\nclass NotAnEnum(Exception):\n def __init__(self):\n message = \"strawberry.enum can only be used with subclasses of Enum\"\n\n super().__init__(message)\n\n\nclass MissingReturnAnnotationError(Exception):\n \"\"\"The field is missing the return annotation\"\"\"\n\n def __init__(self, field_name: str):\n message = (\n f'Return annotation missing for field \"{field_name}\", '\n \"did you forget to add it?\"\n )\n\n super().__init__(message)\n\n\nclass MissingArgumentsAnnotationsError(Exception):\n \"\"\"The field is missing the annotation for one or more arguments\"\"\"\n\n def __init__(self, field_name: str, arguments: Set[str]):\n arguments_list: List[str] = sorted(list(arguments))\n\n if len(arguments_list) == 1:\n argument = f'argument \"{arguments_list[0]}\"'\n else:\n head = \", \".join(arguments_list[:-1])\n argument = f'arguments \"{head}\" and \"{arguments_list[-1]}\"'\n\n message = (\n f\"Missing annotation for {argument} \"\n f'in field \"{field_name}\", did you forget to add it?'\n )\n\n super().__init__(message)\n\n\nclass WrongReturnTypeForUnion(Exception):\n \"\"\"The Union type cannot be resolved because it's not a field\"\"\"\n\n def __init__(self, field_name: str, result_type: str):\n message = (\n f'The type \"{result_type}\" cannot be resolved for the field \"{field_name}\" '\n \", are you using a strawberry.field?\"\n )\n\n super().__init__(message)\n\n\nclass UnallowedReturnTypeForUnion(Exception):\n \"\"\"The return type is not in the list of Union types\"\"\"\n\n def __init__(\n self, field_name: str, result_type: str, allowed_types: Set[GraphQLObjectType]\n ):\n formatted_allowed_types = [type_.name for type_ in allowed_types]\n\n message = (\n f'The type \"{result_type}\" of the field \"{field_name}\" '\n f'is not in the list of the types of the union: \"{formatted_allowed_types}\"'\n )\n\n super().__init__(message)\n\n\nclass MissingTypesForGenericError(Exception):\n \"\"\"Raised when a generic types was used without passing any type.\"\"\"\n\n def __init__(self, field_name: str, annotation):\n message = (\n f'The type \"{annotation.__name__}\" of the field \"{field_name}\" '\n f\"is generic, but no type has been passed\"\n )\n\n super().__init__(message)\n\n\nclass UnsupportedTypeError(Exception):\n def __init__(self, annotation):\n message = f\"{annotation} conversion is not supported\"\n\n super().__init__(message)\n\n\nclass MissingFieldAnnotationError(Exception):\n def __init__(self, field_name: str):\n message = (\n f'Unable to determine the type of field \"{field_name}\". Either '\n f\"annotate it directly, or provide a typed resolver using \"\n f\"@strawberry.field.\"\n )\n\n super().__init__(message)\n\n\nclass PrivateStrawberryFieldError(Exception):\n def __init__(self, field_name: str, type_name: str):\n message = (\n f\"Field {field_name} on type {type_name} cannot be both \"\n \"private and a strawberry.field\"\n )\n\n super().__init__(message)\n\n\nclass MultipleStrawberryArgumentsError(Exception):\n def __init__(self, field_name: str, argument_name: str):\n message = (\n f\"Annotation for argument `{argument_name}` on field \"\n f\"`{field_name}` cannot have multiple `strawberry.argument`s\"\n )\n\n super().__init__(message)\n\n\nclass ScalarAlreadyRegisteredError(Exception):\n def __init__(self, scalar_name: str):\n message = f\"Scalar `{scalar_name}` has already been registered\"\n\n super().__init__(message)\n\n\nclass WrongNumberOfResultsReturned(Exception):\n def __init__(self, expected: int, received: int):\n message = (\n \"Received wrong number of results in dataloader, \"\n f\"expected: {expected}, received: {received}\"\n )\n\n super().__init__(message)\n", "path": "strawberry/exceptions.py"}]} | 2,374 | 452 |
gh_patches_debug_58411 | rasdani/github-patches | git_diff | web2py__web2py-1871 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
contrib/pg8000 is old and causes weird postgres errors
Please update the contrib/pg8000 driver to the current version.
Otherwise errors like Broken Pipe, Operationalerror,.. occur,
- at least for postgres 9.6,
- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).
related links:
https://github.com/mfenniak/pg8000/issues/73
https://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU
..in copy into issues: web2py/web2py, web2py/pydal
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4 from gluon.fileutils import tar, untar, read_file, write_file
5 import tarfile
6 import sys
7
8
9 def tar(file, filelist, expression='^.+$'):
10 """
11 tars dir/files into file, only tars file that match expression
12 """
13
14 tar = tarfile.TarFile(file, 'w')
15 try:
16 for element in filelist:
17 try:
18 for file in listdir(element, expression, add_dirs=True):
19 tar.add(os.path.join(element, file), file, False)
20 except:
21 tar.add(element)
22 finally:
23 tar.close()
24
25
26 def start():
27 if 'sdist' in sys.argv:
28 tar('gluon/env.tar', ['applications', 'VERSION',
29 'extras/icons/splashlogo.gif'])
30
31 setup(name='web2py',
32 version=read_file("VERSION").split()[1],
33 description="""full-stack framework for rapid development and prototyping
34 of secure database-driven web-based applications, written and
35 programmable in Python.""",
36 long_description="""
37 Everything in one package with no dependencies. Development, deployment,
38 debugging, testing, database administration and maintenance of applications can
39 be done via the provided web interface. web2py has no configuration files,
40 requires no installation, can run off a USB drive. web2py uses Python for the
41 Model, the Views and the Controllers, has a built-in ticketing system to manage
42 errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,
43 MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a
44 Database Abstraction Layer. web2py includes libraries to handle
45 HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production
46 ready, capable of upload/download streaming of very large files, and always
47 backward compatible.
48 """,
49 author='Massimo Di Pierro',
50 author_email='[email protected]',
51 license='http://web2py.com/examples/default/license',
52 classifiers=["Development Status :: 5 - Production/Stable"],
53 url='http://web2py.com',
54 platforms='Windows, Linux, Mac, Unix,Windows Mobile',
55 packages=['gluon',
56 'gluon/contrib',
57 'gluon/contrib/gateways',
58 'gluon/contrib/login_methods',
59 'gluon/contrib/markdown',
60 'gluon/contrib/markmin',
61 'gluon/contrib/memcache',
62 'gluon/contrib/fpdf',
63 'gluon/contrib/pymysql',
64 'gluon/contrib/pyrtf',
65 'gluon/contrib/pysimplesoap',
66 'gluon/contrib/pg8000',
67 'gluon/contrib/plural_rules',
68 'gluon/contrib/minify',
69 'gluon/contrib/pyaes',
70 'gluon/contrib/pyuca',
71 'gluon/tests',
72 ],
73 package_data={'gluon': ['env.tar']},
74 # scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],
75 )
76
77 if __name__ == '__main__':
78 #print "web2py does not require installation and"
79 #print "you should just start it with:"
80 #print
81 #print "$ python web2py.py"
82 #print
83 #print "are you sure you want to install it anyway (y/n)?"
84 #s = raw_input('>')
85 #if s.lower()[:1]=='y':
86 start()
87
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,6 @@
'gluon/contrib/pymysql',
'gluon/contrib/pyrtf',
'gluon/contrib/pysimplesoap',
- 'gluon/contrib/pg8000',
'gluon/contrib/plural_rules',
'gluon/contrib/minify',
'gluon/contrib/pyaes',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,6 @@\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n- 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n", "issue": "contrib/pg8000 is old and causes weird postgres errors\nPlease update the contrib/pg8000 driver to the current version.\r\nOtherwise errors like Broken Pipe, Operationalerror,.. occur,\r\n- at least for postgres 9.6,\r\n- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).\r\n\r\nrelated links:\r\nhttps://github.com/mfenniak/pg8000/issues/73\r\nhttps://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU\r\n\r\n..in copy into issues: web2py/web2py, web2py/pydal\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py"}]} | 1,643 | 113 |
gh_patches_debug_8874 | rasdani/github-patches | git_diff | Chia-Network__chia-blockchain-17197 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(Bug) Unable to run Chia using Windows on AMD K10 architecture (AMD Phenom)
### What happened?
hello, I would like someone to help me. this is how the chia app works for me on 1.8.2. since I want a stronger version, when I install 2.0 or 2.0.1 I have a problem. the problem is the following. When entering the application, it always says Loading keyring status.... what should I do? thank you
### Version
2.0.0 - 2.0.1
### What platform are you using?
Windows
### What ui mode are you using?
GUI
### Relevant log output
```shell
Loading keyring status
```
</issue>
<code>
[start of setup.py]
1 from __future__ import annotations
2
3 import os
4 import sys
5
6 from setuptools import find_packages, setup
7
8 dependencies = [
9 "aiofiles==23.2.1", # Async IO for files
10 "anyio==4.0.0",
11 "boto3==1.29.4", # AWS S3 for DL s3 plugin
12 "chiavdf==1.1.0", # timelord and vdf verification
13 "chiabip158==1.3", # bip158-style wallet filters
14 "chiapos==2.0.3", # proof of space
15 "clvm==0.9.8",
16 "clvm_tools==0.4.7", # Currying, Program.to, other conveniences
17 "chia_rs==0.2.13",
18 "clvm-tools-rs==0.1.39", # Rust implementation of clvm_tools' compiler
19 "aiohttp==3.9.1", # HTTP server for full node rpc
20 "aiosqlite==0.19.0", # asyncio wrapper for sqlite, to store blocks
21 "bitstring==4.1.2", # Binary data management library
22 "colorama==0.4.6", # Colorizes terminal output
23 "colorlog==6.7.0", # Adds color to logs
24 "concurrent-log-handler==0.9.24", # Concurrently log and rotate logs
25 "cryptography==41.0.5", # Python cryptography library for TLS - keyring conflict
26 "filelock==3.13.1", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)
27 "keyring==24.3.0", # Store keys in MacOS Keychain, Windows Credential Locker
28 "PyYAML==6.0.1", # Used for config file format
29 "setproctitle==1.3.3", # Gives the chia processes readable names
30 "sortedcontainers==2.4.0", # For maintaining sorted mempools
31 "click==8.1.3", # For the CLI
32 "dnspython==2.4.2", # Query DNS seeds
33 "watchdog==2.2.0", # Filesystem event watching - watches keyring.yaml
34 "dnslib==0.9.23", # dns lib
35 "typing-extensions==4.8.0", # typing backports like Protocol and TypedDict
36 "zstd==1.5.5.1",
37 "packaging==23.2",
38 "psutil==5.9.4",
39 ]
40
41 upnp_dependencies = [
42 "miniupnpc==2.2.2", # Allows users to open ports on their router
43 ]
44
45 dev_dependencies = [
46 "build==1.0.3",
47 "coverage==7.3.2",
48 "diff-cover==8.0.1",
49 "pre-commit==3.5.0",
50 "py3createtorrent==1.1.0",
51 "pylint==3.0.2",
52 "pytest==7.4.3",
53 "pytest-cov==4.1.0",
54 "pytest-mock==3.12.0",
55 "pytest-xdist==3.5.0",
56 "pyupgrade==3.15.0",
57 "twine==4.0.2",
58 "isort==5.12.0",
59 "flake8==6.1.0",
60 "mypy==1.7.0",
61 "black==23.11.0",
62 "lxml==4.9.3",
63 "aiohttp_cors==0.7.0", # For blackd
64 "pyinstaller==5.13.0",
65 "types-aiofiles==23.2.0.0",
66 "types-cryptography==3.3.23.2",
67 "types-pyyaml==6.0.12.12",
68 "types-setuptools==68.2.0.1",
69 ]
70
71 legacy_keyring_dependencies = [
72 "keyrings.cryptfile==1.3.9",
73 ]
74
75 kwargs = dict(
76 name="chia-blockchain",
77 author="Mariano Sorgente",
78 author_email="[email protected]",
79 description="Chia blockchain full node, farmer, timelord, and wallet.",
80 url="https://chia.net/",
81 license="Apache License",
82 python_requires=">=3.8.1, <4",
83 keywords="chia blockchain node",
84 install_requires=dependencies,
85 extras_require=dict(
86 dev=dev_dependencies,
87 upnp=upnp_dependencies,
88 legacy_keyring=legacy_keyring_dependencies,
89 ),
90 packages=find_packages(include=["build_scripts", "chia", "chia.*", "mozilla-ca"]),
91 entry_points={
92 "console_scripts": [
93 "chia = chia.cmds.chia:main",
94 "chia_daemon = chia.daemon.server:main",
95 "chia_wallet = chia.server.start_wallet:main",
96 "chia_full_node = chia.server.start_full_node:main",
97 "chia_harvester = chia.server.start_harvester:main",
98 "chia_farmer = chia.server.start_farmer:main",
99 "chia_introducer = chia.server.start_introducer:main",
100 "chia_crawler = chia.seeder.start_crawler:main",
101 "chia_seeder = chia.seeder.dns_server:main",
102 "chia_timelord = chia.server.start_timelord:main",
103 "chia_timelord_launcher = chia.timelord.timelord_launcher:main",
104 "chia_full_node_simulator = chia.simulator.start_simulator:main",
105 "chia_data_layer = chia.server.start_data_layer:main",
106 "chia_data_layer_http = chia.data_layer.data_layer_server:main",
107 "chia_data_layer_s3_plugin = chia.data_layer.s3_plugin_service:run_server",
108 ]
109 },
110 package_data={
111 "": ["*.clsp", "*.clsp.hex", "*.clvm", "*.clib", "py.typed"],
112 "chia.util": ["initial-*.yaml", "english.txt"],
113 "chia.ssl": ["chia_ca.crt", "chia_ca.key", "dst_root_ca.pem"],
114 "mozilla-ca": ["cacert.pem"],
115 },
116 long_description=open("README.md").read(),
117 long_description_content_type="text/markdown",
118 zip_safe=False,
119 project_urls={
120 "Source": "https://github.com/Chia-Network/chia-blockchain/",
121 "Changelog": "https://github.com/Chia-Network/chia-blockchain/blob/main/CHANGELOG.md",
122 },
123 )
124
125 if "setup_file" in sys.modules:
126 # include dev deps in regular deps when run in snyk
127 dependencies.extend(dev_dependencies)
128
129 if len(os.environ.get("CHIA_SKIP_SETUP", "")) < 1:
130 setup(**kwargs) # type: ignore
131
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@
"chiapos==2.0.3", # proof of space
"clvm==0.9.8",
"clvm_tools==0.4.7", # Currying, Program.to, other conveniences
- "chia_rs==0.2.13",
+ "chia_rs==0.2.15",
"clvm-tools-rs==0.1.39", # Rust implementation of clvm_tools' compiler
"aiohttp==3.9.1", # HTTP server for full node rpc
"aiosqlite==0.19.0", # asyncio wrapper for sqlite, to store blocks
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \"chiapos==2.0.3\", # proof of space\n \"clvm==0.9.8\",\n \"clvm_tools==0.4.7\", # Currying, Program.to, other conveniences\n- \"chia_rs==0.2.13\",\n+ \"chia_rs==0.2.15\",\n \"clvm-tools-rs==0.1.39\", # Rust implementation of clvm_tools' compiler\n \"aiohttp==3.9.1\", # HTTP server for full node rpc\n \"aiosqlite==0.19.0\", # asyncio wrapper for sqlite, to store blocks\n", "issue": "(Bug) Unable to run Chia using Windows on AMD K10 architecture (AMD Phenom)\n### What happened?\n\nhello, I would like someone to help me. this is how the chia app works for me on 1.8.2. since I want a stronger version, when I install 2.0 or 2.0.1 I have a problem. the problem is the following. When entering the application, it always says Loading keyring status.... what should I do? thank you\n\n### Version\n\n2.0.0 - 2.0.1\n\n### What platform are you using?\n\nWindows\n\n### What ui mode are you using?\n\nGUI\n\n### Relevant log output\n\n```shell\nLoading keyring status\n```\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\ndependencies = [\n \"aiofiles==23.2.1\", # Async IO for files\n \"anyio==4.0.0\",\n \"boto3==1.29.4\", # AWS S3 for DL s3 plugin\n \"chiavdf==1.1.0\", # timelord and vdf verification\n \"chiabip158==1.3\", # bip158-style wallet filters\n \"chiapos==2.0.3\", # proof of space\n \"clvm==0.9.8\",\n \"clvm_tools==0.4.7\", # Currying, Program.to, other conveniences\n \"chia_rs==0.2.13\",\n \"clvm-tools-rs==0.1.39\", # Rust implementation of clvm_tools' compiler\n \"aiohttp==3.9.1\", # HTTP server for full node rpc\n \"aiosqlite==0.19.0\", # asyncio wrapper for sqlite, to store blocks\n \"bitstring==4.1.2\", # Binary data management library\n \"colorama==0.4.6\", # Colorizes terminal output\n \"colorlog==6.7.0\", # Adds color to logs\n \"concurrent-log-handler==0.9.24\", # Concurrently log and rotate logs\n \"cryptography==41.0.5\", # Python cryptography library for TLS - keyring conflict\n \"filelock==3.13.1\", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)\n \"keyring==24.3.0\", # Store keys in MacOS Keychain, Windows Credential Locker\n \"PyYAML==6.0.1\", # Used for config file format\n \"setproctitle==1.3.3\", # Gives the chia processes readable names\n \"sortedcontainers==2.4.0\", # For maintaining sorted mempools\n \"click==8.1.3\", # For the CLI\n \"dnspython==2.4.2\", # Query DNS seeds\n \"watchdog==2.2.0\", # Filesystem event watching - watches keyring.yaml\n \"dnslib==0.9.23\", # dns lib\n \"typing-extensions==4.8.0\", # typing backports like Protocol and TypedDict\n \"zstd==1.5.5.1\",\n \"packaging==23.2\",\n \"psutil==5.9.4\",\n]\n\nupnp_dependencies = [\n \"miniupnpc==2.2.2\", # Allows users to open ports on their router\n]\n\ndev_dependencies = [\n \"build==1.0.3\",\n \"coverage==7.3.2\",\n \"diff-cover==8.0.1\",\n \"pre-commit==3.5.0\",\n \"py3createtorrent==1.1.0\",\n \"pylint==3.0.2\",\n \"pytest==7.4.3\",\n \"pytest-cov==4.1.0\",\n \"pytest-mock==3.12.0\",\n \"pytest-xdist==3.5.0\",\n \"pyupgrade==3.15.0\",\n \"twine==4.0.2\",\n \"isort==5.12.0\",\n \"flake8==6.1.0\",\n \"mypy==1.7.0\",\n \"black==23.11.0\",\n \"lxml==4.9.3\",\n \"aiohttp_cors==0.7.0\", # For blackd\n \"pyinstaller==5.13.0\",\n \"types-aiofiles==23.2.0.0\",\n \"types-cryptography==3.3.23.2\",\n \"types-pyyaml==6.0.12.12\",\n \"types-setuptools==68.2.0.1\",\n]\n\nlegacy_keyring_dependencies = [\n \"keyrings.cryptfile==1.3.9\",\n]\n\nkwargs = dict(\n name=\"chia-blockchain\",\n author=\"Mariano Sorgente\",\n author_email=\"[email protected]\",\n description=\"Chia blockchain full node, farmer, timelord, and wallet.\",\n url=\"https://chia.net/\",\n license=\"Apache License\",\n python_requires=\">=3.8.1, <4\",\n keywords=\"chia blockchain node\",\n install_requires=dependencies,\n extras_require=dict(\n dev=dev_dependencies,\n upnp=upnp_dependencies,\n legacy_keyring=legacy_keyring_dependencies,\n ),\n packages=find_packages(include=[\"build_scripts\", \"chia\", \"chia.*\", \"mozilla-ca\"]),\n entry_points={\n \"console_scripts\": [\n \"chia = chia.cmds.chia:main\",\n \"chia_daemon = chia.daemon.server:main\",\n \"chia_wallet = chia.server.start_wallet:main\",\n \"chia_full_node = chia.server.start_full_node:main\",\n \"chia_harvester = chia.server.start_harvester:main\",\n \"chia_farmer = chia.server.start_farmer:main\",\n \"chia_introducer = chia.server.start_introducer:main\",\n \"chia_crawler = chia.seeder.start_crawler:main\",\n \"chia_seeder = chia.seeder.dns_server:main\",\n \"chia_timelord = chia.server.start_timelord:main\",\n \"chia_timelord_launcher = chia.timelord.timelord_launcher:main\",\n \"chia_full_node_simulator = chia.simulator.start_simulator:main\",\n \"chia_data_layer = chia.server.start_data_layer:main\",\n \"chia_data_layer_http = chia.data_layer.data_layer_server:main\",\n \"chia_data_layer_s3_plugin = chia.data_layer.s3_plugin_service:run_server\",\n ]\n },\n package_data={\n \"\": [\"*.clsp\", \"*.clsp.hex\", \"*.clvm\", \"*.clib\", \"py.typed\"],\n \"chia.util\": [\"initial-*.yaml\", \"english.txt\"],\n \"chia.ssl\": [\"chia_ca.crt\", \"chia_ca.key\", \"dst_root_ca.pem\"],\n \"mozilla-ca\": [\"cacert.pem\"],\n },\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n zip_safe=False,\n project_urls={\n \"Source\": \"https://github.com/Chia-Network/chia-blockchain/\",\n \"Changelog\": \"https://github.com/Chia-Network/chia-blockchain/blob/main/CHANGELOG.md\",\n },\n)\n\nif \"setup_file\" in sys.modules:\n # include dev deps in regular deps when run in snyk\n dependencies.extend(dev_dependencies)\n\nif len(os.environ.get(\"CHIA_SKIP_SETUP\", \"\")) < 1:\n setup(**kwargs) # type: ignore\n", "path": "setup.py"}]} | 2,550 | 182 |
gh_patches_debug_14675 | rasdani/github-patches | git_diff | Netflix__lemur-739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Web-UI] Active button for authority is not working
I have list of authorities created on the lemur. I want to disable creation of new certificates using one of the authority. Clicking on **active** button against that authority should make that authority inactive which is not the case.
Even the PUT call to the `http://example.com/api/1/authorities/5` with active key's value set to false returns true in response. Please find the request and response for the API in the file given below.
[active.txt](https://github.com/Netflix/lemur/files/878813/active.txt)
</issue>
<code>
[start of lemur/authorities/service.py]
1 """
2 .. module: lemur.authorities.service
3 :platform: Unix
4 :synopsis: This module contains all of the services level functions used to
5 administer authorities in Lemur
6 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
7 :license: Apache, see LICENSE for more details.
8 .. moduleauthor:: Kevin Glisson <[email protected]>
9
10 """
11 from lemur import database
12 from lemur.extensions import metrics
13 from lemur.authorities.models import Authority
14 from lemur.roles import service as role_service
15
16 from lemur.certificates.service import upload
17
18
19 def update(authority_id, description=None, owner=None, active=None, roles=None):
20 """
21 Update an authority with new values.
22
23 :param authority_id:
24 :param roles: roles that are allowed to use this authority
25 :return:
26 """
27 authority = get(authority_id)
28
29 if roles:
30 authority.roles = roles
31
32 if active:
33 authority.active = active
34
35 authority.description = description
36 authority.owner = owner
37 return database.update(authority)
38
39
40 def mint(**kwargs):
41 """
42 Creates the authority based on the plugin provided.
43 """
44 issuer = kwargs['plugin']['plugin_object']
45 values = issuer.create_authority(kwargs)
46
47 # support older plugins
48 if len(values) == 3:
49 body, chain, roles = values
50 private_key = None
51 elif len(values) == 4:
52 body, private_key, chain, roles = values
53
54 roles = create_authority_roles(roles, kwargs['owner'], kwargs['plugin']['plugin_object'].title, kwargs['creator'])
55 return body, private_key, chain, roles
56
57
58 def create_authority_roles(roles, owner, plugin_title, creator):
59 """
60 Creates all of the necessary authority roles.
61 :param creator:
62 :param roles:
63 :return:
64 """
65 role_objs = []
66 for r in roles:
67 role = role_service.get_by_name(r['name'])
68 if not role:
69 role = role_service.create(
70 r['name'],
71 password=r['password'],
72 description="Auto generated role for {0}".format(plugin_title),
73 username=r['username'])
74
75 # the user creating the authority should be able to administer it
76 if role.username == 'admin':
77 creator.roles.append(role)
78
79 role_objs.append(role)
80
81 # create an role for the owner and assign it
82 owner_role = role_service.get_by_name(owner)
83 if not owner_role:
84 owner_role = role_service.create(
85 owner,
86 description="Auto generated role based on owner: {0}".format(owner)
87 )
88
89 role_objs.append(owner_role)
90 return role_objs
91
92
93 def create(**kwargs):
94 """
95 Creates a new authority.
96 """
97 body, private_key, chain, roles = mint(**kwargs)
98
99 kwargs['creator'].roles = list(set(list(kwargs['creator'].roles) + roles))
100
101 kwargs['body'] = body
102 kwargs['private_key'] = private_key
103 kwargs['chain'] = chain
104
105 if kwargs.get('roles'):
106 kwargs['roles'] += roles
107 else:
108 kwargs['roles'] = roles
109
110 cert = upload(**kwargs)
111 kwargs['authority_certificate'] = cert
112
113 authority = Authority(**kwargs)
114 authority = database.create(authority)
115 kwargs['creator'].authorities.append(authority)
116
117 metrics.send('authority_created', 'counter', 1, metric_tags=dict(owner=authority.owner))
118 return authority
119
120
121 def get_all():
122 """
123 Get all authorities that are currently in Lemur.
124
125 :rtype : List
126 :return:
127 """
128 query = database.session_query(Authority)
129 return database.find_all(query, Authority, {}).all()
130
131
132 def get(authority_id):
133 """
134 Retrieves an authority given it's ID
135
136 :param authority_id:
137 :return:
138 """
139 return database.get(Authority, authority_id)
140
141
142 def get_by_name(authority_name):
143 """
144 Retrieves an authority given it's name.
145
146 :param authority_name:
147 :return:
148 """
149 return database.get(Authority, authority_name, field='name')
150
151
152 def get_authority_role(ca_name, creator=None):
153 """
154 Attempts to get the authority role for a given ca uses current_user
155 as a basis for accomplishing that.
156
157 :param ca_name:
158 """
159 if creator:
160 if creator.is_admin:
161 return role_service.get_by_name("{0}_admin".format(ca_name))
162 return role_service.get_by_name("{0}_operator".format(ca_name))
163
164
165 def render(args):
166 """
167 Helper that helps us render the REST Api responses.
168 :param args:
169 :return:
170 """
171 query = database.session_query(Authority)
172 filt = args.pop('filter')
173
174 if filt:
175 terms = filt.split(';')
176 if 'active' in filt: # this is really weird but strcmp seems to not work here??
177 query = query.filter(Authority.active == terms[1])
178 else:
179 query = database.filter(query, Authority, terms)
180
181 # we make sure that a user can only use an authority they either own are a member of - admins can see all
182 if not args['user'].is_admin:
183 authority_ids = []
184 for authority in args['user'].authorities:
185 authority_ids.append(authority.id)
186
187 for role in args['user'].roles:
188 for authority in role.authorities:
189 authority_ids.append(authority.id)
190 query = query.filter(Authority.id.in_(authority_ids))
191
192 return database.sort_and_page(query, Authority, args)
193
[end of lemur/authorities/service.py]
[start of lemur/authorities/schemas.py]
1 """
2 .. module: lemur.authorities.schemas
3 :platform: unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6 .. moduleauthor:: Kevin Glisson <[email protected]>
7 """
8 from flask import current_app
9
10 from marshmallow import fields, validates_schema, pre_load
11 from marshmallow import validate
12 from marshmallow.exceptions import ValidationError
13
14 from lemur.schemas import PluginInputSchema, PluginOutputSchema, ExtensionSchema, AssociatedAuthoritySchema, AssociatedRoleSchema
15 from lemur.users.schemas import UserNestedOutputSchema
16 from lemur.common.schema import LemurInputSchema, LemurOutputSchema
17 from lemur.common import validators, missing
18
19 from lemur.common.fields import ArrowDateTime
20
21
22 class AuthorityInputSchema(LemurInputSchema):
23 name = fields.String(required=True)
24 owner = fields.Email(required=True)
25 description = fields.String()
26 common_name = fields.String(required=True, validate=validators.sensitive_domain)
27
28 validity_start = ArrowDateTime()
29 validity_end = ArrowDateTime()
30 validity_years = fields.Integer()
31
32 # certificate body fields
33 organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))
34 organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))
35 location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))
36 country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))
37 state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))
38
39 plugin = fields.Nested(PluginInputSchema)
40
41 # signing related options
42 type = fields.String(validate=validate.OneOf(['root', 'subca']), missing='root')
43 parent = fields.Nested(AssociatedAuthoritySchema)
44 signing_algorithm = fields.String(validate=validate.OneOf(['sha256WithRSA', 'sha1WithRSA']), missing='sha256WithRSA')
45 key_type = fields.String(validate=validate.OneOf(['RSA2048', 'RSA4096']), missing='RSA2048')
46 key_name = fields.String()
47 sensitivity = fields.String(validate=validate.OneOf(['medium', 'high']), missing='medium')
48 serial_number = fields.Integer()
49 first_serial = fields.Integer(missing=1)
50
51 extensions = fields.Nested(ExtensionSchema)
52
53 roles = fields.Nested(AssociatedRoleSchema(many=True))
54
55 @validates_schema
56 def validate_dates(self, data):
57 validators.dates(data)
58
59 @validates_schema
60 def validate_subca(self, data):
61 if data['type'] == 'subca':
62 if not data.get('parent'):
63 raise ValidationError("If generating a subca, parent 'authority' must be specified.")
64
65 @pre_load
66 def ensure_dates(self, data):
67 return missing.convert_validity_years(data)
68
69
70 class AuthorityUpdateSchema(LemurInputSchema):
71 owner = fields.Email(required=True)
72 description = fields.String()
73 active = fields.Boolean()
74 roles = fields.Nested(AssociatedRoleSchema(many=True))
75
76
77 class RootAuthorityCertificateOutputSchema(LemurOutputSchema):
78 __envelope__ = False
79 id = fields.Integer()
80 active = fields.Boolean()
81 bits = fields.Integer()
82 body = fields.String()
83 chain = fields.String()
84 description = fields.String()
85 name = fields.String()
86 cn = fields.String()
87 not_after = fields.DateTime()
88 not_before = fields.DateTime()
89 owner = fields.Email()
90 status = fields.Boolean()
91 user = fields.Nested(UserNestedOutputSchema)
92
93
94 class AuthorityOutputSchema(LemurOutputSchema):
95 id = fields.Integer()
96 description = fields.String()
97 name = fields.String()
98 owner = fields.Email()
99 plugin = fields.Nested(PluginOutputSchema)
100 active = fields.Boolean()
101 options = fields.Dict()
102 roles = fields.List(fields.Nested(AssociatedRoleSchema))
103 authority_certificate = fields.Nested(RootAuthorityCertificateOutputSchema)
104
105
106 class AuthorityNestedOutputSchema(LemurOutputSchema):
107 __envelope__ = False
108 id = fields.Integer()
109 description = fields.String()
110 name = fields.String()
111 owner = fields.Email()
112 plugin = fields.Nested(PluginOutputSchema)
113 active = fields.Boolean()
114
115
116 authority_update_schema = AuthorityUpdateSchema()
117 authority_input_schema = AuthorityInputSchema()
118 authority_output_schema = AuthorityOutputSchema()
119 authorities_output_schema = AuthorityOutputSchema(many=True)
120
[end of lemur/authorities/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lemur/authorities/schemas.py b/lemur/authorities/schemas.py
--- a/lemur/authorities/schemas.py
+++ b/lemur/authorities/schemas.py
@@ -70,7 +70,7 @@
class AuthorityUpdateSchema(LemurInputSchema):
owner = fields.Email(required=True)
description = fields.String()
- active = fields.Boolean()
+ active = fields.Boolean(missing=True)
roles = fields.Nested(AssociatedRoleSchema(many=True))
diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -29,9 +29,7 @@
if roles:
authority.roles = roles
- if active:
- authority.active = active
-
+ authority.active = active
authority.description = description
authority.owner = owner
return database.update(authority)
| {"golden_diff": "diff --git a/lemur/authorities/schemas.py b/lemur/authorities/schemas.py\n--- a/lemur/authorities/schemas.py\n+++ b/lemur/authorities/schemas.py\n@@ -70,7 +70,7 @@\n class AuthorityUpdateSchema(LemurInputSchema):\n owner = fields.Email(required=True)\n description = fields.String()\n- active = fields.Boolean()\n+ active = fields.Boolean(missing=True)\n roles = fields.Nested(AssociatedRoleSchema(many=True))\n \n \ndiff --git a/lemur/authorities/service.py b/lemur/authorities/service.py\n--- a/lemur/authorities/service.py\n+++ b/lemur/authorities/service.py\n@@ -29,9 +29,7 @@\n if roles:\n authority.roles = roles\n \n- if active:\n- authority.active = active\n-\n+ authority.active = active\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n", "issue": "[Web-UI] Active button for authority is not working\nI have list of authorities created on the lemur. I want to disable creation of new certificates using one of the authority. Clicking on **active** button against that authority should make that authority inactive which is not the case.\r\n\r\nEven the PUT call to the `http://example.com/api/1/authorities/5` with active key's value set to false returns true in response. Please find the request and response for the API in the file given below.\r\n[active.txt](https://github.com/Netflix/lemur/files/878813/active.txt)\r\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.authorities.service\n :platform: Unix\n :synopsis: This module contains all of the services level functions used to\n administer authorities in Lemur\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nfrom lemur import database\nfrom lemur.extensions import metrics\nfrom lemur.authorities.models import Authority\nfrom lemur.roles import service as role_service\n\nfrom lemur.certificates.service import upload\n\n\ndef update(authority_id, description=None, owner=None, active=None, roles=None):\n \"\"\"\n Update an authority with new values.\n\n :param authority_id:\n :param roles: roles that are allowed to use this authority\n :return:\n \"\"\"\n authority = get(authority_id)\n\n if roles:\n authority.roles = roles\n\n if active:\n authority.active = active\n\n authority.description = description\n authority.owner = owner\n return database.update(authority)\n\n\ndef mint(**kwargs):\n \"\"\"\n Creates the authority based on the plugin provided.\n \"\"\"\n issuer = kwargs['plugin']['plugin_object']\n values = issuer.create_authority(kwargs)\n\n # support older plugins\n if len(values) == 3:\n body, chain, roles = values\n private_key = None\n elif len(values) == 4:\n body, private_key, chain, roles = values\n\n roles = create_authority_roles(roles, kwargs['owner'], kwargs['plugin']['plugin_object'].title, kwargs['creator'])\n return body, private_key, chain, roles\n\n\ndef create_authority_roles(roles, owner, plugin_title, creator):\n \"\"\"\n Creates all of the necessary authority roles.\n :param creator:\n :param roles:\n :return:\n \"\"\"\n role_objs = []\n for r in roles:\n role = role_service.get_by_name(r['name'])\n if not role:\n role = role_service.create(\n r['name'],\n password=r['password'],\n description=\"Auto generated role for {0}\".format(plugin_title),\n username=r['username'])\n\n # the user creating the authority should be able to administer it\n if role.username == 'admin':\n creator.roles.append(role)\n\n role_objs.append(role)\n\n # create an role for the owner and assign it\n owner_role = role_service.get_by_name(owner)\n if not owner_role:\n owner_role = role_service.create(\n owner,\n description=\"Auto generated role based on owner: {0}\".format(owner)\n )\n\n role_objs.append(owner_role)\n return role_objs\n\n\ndef create(**kwargs):\n \"\"\"\n Creates a new authority.\n \"\"\"\n body, private_key, chain, roles = mint(**kwargs)\n\n kwargs['creator'].roles = list(set(list(kwargs['creator'].roles) + roles))\n\n kwargs['body'] = body\n kwargs['private_key'] = private_key\n kwargs['chain'] = chain\n\n if kwargs.get('roles'):\n kwargs['roles'] += roles\n else:\n kwargs['roles'] = roles\n\n cert = upload(**kwargs)\n kwargs['authority_certificate'] = cert\n\n authority = Authority(**kwargs)\n authority = database.create(authority)\n kwargs['creator'].authorities.append(authority)\n\n metrics.send('authority_created', 'counter', 1, metric_tags=dict(owner=authority.owner))\n return authority\n\n\ndef get_all():\n \"\"\"\n Get all authorities that are currently in Lemur.\n\n :rtype : List\n :return:\n \"\"\"\n query = database.session_query(Authority)\n return database.find_all(query, Authority, {}).all()\n\n\ndef get(authority_id):\n \"\"\"\n Retrieves an authority given it's ID\n\n :param authority_id:\n :return:\n \"\"\"\n return database.get(Authority, authority_id)\n\n\ndef get_by_name(authority_name):\n \"\"\"\n Retrieves an authority given it's name.\n\n :param authority_name:\n :return:\n \"\"\"\n return database.get(Authority, authority_name, field='name')\n\n\ndef get_authority_role(ca_name, creator=None):\n \"\"\"\n Attempts to get the authority role for a given ca uses current_user\n as a basis for accomplishing that.\n\n :param ca_name:\n \"\"\"\n if creator:\n if creator.is_admin:\n return role_service.get_by_name(\"{0}_admin\".format(ca_name))\n return role_service.get_by_name(\"{0}_operator\".format(ca_name))\n\n\ndef render(args):\n \"\"\"\n Helper that helps us render the REST Api responses.\n :param args:\n :return:\n \"\"\"\n query = database.session_query(Authority)\n filt = args.pop('filter')\n\n if filt:\n terms = filt.split(';')\n if 'active' in filt: # this is really weird but strcmp seems to not work here??\n query = query.filter(Authority.active == terms[1])\n else:\n query = database.filter(query, Authority, terms)\n\n # we make sure that a user can only use an authority they either own are a member of - admins can see all\n if not args['user'].is_admin:\n authority_ids = []\n for authority in args['user'].authorities:\n authority_ids.append(authority.id)\n\n for role in args['user'].roles:\n for authority in role.authorities:\n authority_ids.append(authority.id)\n query = query.filter(Authority.id.in_(authority_ids))\n\n return database.sort_and_page(query, Authority, args)\n", "path": "lemur/authorities/service.py"}, {"content": "\"\"\"\n.. module: lemur.authorities.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom flask import current_app\n\nfrom marshmallow import fields, validates_schema, pre_load\nfrom marshmallow import validate\nfrom marshmallow.exceptions import ValidationError\n\nfrom lemur.schemas import PluginInputSchema, PluginOutputSchema, ExtensionSchema, AssociatedAuthoritySchema, AssociatedRoleSchema\nfrom lemur.users.schemas import UserNestedOutputSchema\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.common import validators, missing\n\nfrom lemur.common.fields import ArrowDateTime\n\n\nclass AuthorityInputSchema(LemurInputSchema):\n name = fields.String(required=True)\n owner = fields.Email(required=True)\n description = fields.String()\n common_name = fields.String(required=True, validate=validators.sensitive_domain)\n\n validity_start = ArrowDateTime()\n validity_end = ArrowDateTime()\n validity_years = fields.Integer()\n\n # certificate body fields\n organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))\n organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))\n location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))\n country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))\n state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))\n\n plugin = fields.Nested(PluginInputSchema)\n\n # signing related options\n type = fields.String(validate=validate.OneOf(['root', 'subca']), missing='root')\n parent = fields.Nested(AssociatedAuthoritySchema)\n signing_algorithm = fields.String(validate=validate.OneOf(['sha256WithRSA', 'sha1WithRSA']), missing='sha256WithRSA')\n key_type = fields.String(validate=validate.OneOf(['RSA2048', 'RSA4096']), missing='RSA2048')\n key_name = fields.String()\n sensitivity = fields.String(validate=validate.OneOf(['medium', 'high']), missing='medium')\n serial_number = fields.Integer()\n first_serial = fields.Integer(missing=1)\n\n extensions = fields.Nested(ExtensionSchema)\n\n roles = fields.Nested(AssociatedRoleSchema(many=True))\n\n @validates_schema\n def validate_dates(self, data):\n validators.dates(data)\n\n @validates_schema\n def validate_subca(self, data):\n if data['type'] == 'subca':\n if not data.get('parent'):\n raise ValidationError(\"If generating a subca, parent 'authority' must be specified.\")\n\n @pre_load\n def ensure_dates(self, data):\n return missing.convert_validity_years(data)\n\n\nclass AuthorityUpdateSchema(LemurInputSchema):\n owner = fields.Email(required=True)\n description = fields.String()\n active = fields.Boolean()\n roles = fields.Nested(AssociatedRoleSchema(many=True))\n\n\nclass RootAuthorityCertificateOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n active = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n description = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n status = fields.Boolean()\n user = fields.Nested(UserNestedOutputSchema)\n\n\nclass AuthorityOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n description = fields.String()\n name = fields.String()\n owner = fields.Email()\n plugin = fields.Nested(PluginOutputSchema)\n active = fields.Boolean()\n options = fields.Dict()\n roles = fields.List(fields.Nested(AssociatedRoleSchema))\n authority_certificate = fields.Nested(RootAuthorityCertificateOutputSchema)\n\n\nclass AuthorityNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n description = fields.String()\n name = fields.String()\n owner = fields.Email()\n plugin = fields.Nested(PluginOutputSchema)\n active = fields.Boolean()\n\n\nauthority_update_schema = AuthorityUpdateSchema()\nauthority_input_schema = AuthorityInputSchema()\nauthority_output_schema = AuthorityOutputSchema()\nauthorities_output_schema = AuthorityOutputSchema(many=True)\n", "path": "lemur/authorities/schemas.py"}]} | 3,630 | 223 |
gh_patches_debug_18335 | rasdani/github-patches | git_diff | searx__searx-1301 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Gigablast crash
Greetings,
I have been experimenting with SearX lately and have been seeing this message:
"
Engines cannot retrieve results:
gigablast (unexpected crash: No JSON object could be decoded)
"
Seems like something is wrong with the Gigablast driver but I am not sure how to fix it.
I'm using: searx - 0.14.0
Thanks
</issue>
<code>
[start of searx/engines/gigablast.py]
1 """
2 Gigablast (Web)
3
4 @website https://gigablast.com
5 @provide-api yes (https://gigablast.com/api.html)
6
7 @using-api yes
8 @results XML
9 @stable yes
10 @parse url, title, content
11 """
12
13 import random
14 from json import loads
15 from time import time
16 from lxml.html import fromstring
17 from searx.url_utils import urlencode
18
19 # engine dependent config
20 categories = ['general']
21 paging = True
22 number_of_results = 10
23 language_support = True
24 safesearch = True
25
26 # search-url
27 base_url = 'https://gigablast.com/'
28 search_string = 'search?{query}'\
29 '&n={number_of_results}'\
30 '&c=main'\
31 '&s={offset}'\
32 '&format=json'\
33 '&qh=0'\
34 '&qlang={lang}'\
35 '&ff={safesearch}'\
36 '&rxiec={rxieu}'\
37 '&rand={rxikd}' # current unix timestamp
38
39 # specific xpath variables
40 results_xpath = '//response//result'
41 url_xpath = './/url'
42 title_xpath = './/title'
43 content_xpath = './/sum'
44
45 supported_languages_url = 'https://gigablast.com/search?&rxikd=1'
46
47
48 # do search-request
49 def request(query, params):
50 offset = (params['pageno'] - 1) * number_of_results
51
52 language = params['language'].replace('-', '_').lower()
53 if language.split('-')[0] != 'zh':
54 language = language.split('-')[0]
55
56 if params['safesearch'] >= 1:
57 safesearch = 1
58 else:
59 safesearch = 0
60
61 # rxieu is some kind of hash from the search query, but accepts random atm
62 search_path = search_string.format(query=urlencode({'q': query}),
63 offset=offset,
64 number_of_results=number_of_results,
65 rxikd=int(time() * 1000),
66 rxieu=random.randint(1000000000, 9999999999),
67 lang=language,
68 safesearch=safesearch)
69
70 params['url'] = base_url + search_path
71
72 return params
73
74
75 # get response from search-request
76 def response(resp):
77 results = []
78
79 # parse results
80 response_json = loads(resp.text)
81
82 for result in response_json['results']:
83 # append result
84 results.append({'url': result['url'],
85 'title': result['title'],
86 'content': result['sum']})
87
88 # return results
89 return results
90
91
92 # get supported languages from their site
93 def _fetch_supported_languages(resp):
94 supported_languages = []
95 dom = fromstring(resp.text)
96 links = dom.xpath('//span[@id="menu2"]/a')
97 for link in links:
98 href = link.xpath('./@href')[0].split('lang%3A')
99 if len(href) == 2:
100 code = href[1].split('_')
101 if len(code) == 2:
102 code = code[0] + '-' + code[1].upper()
103 else:
104 code = code[0]
105 supported_languages.append(code)
106
107 return supported_languages
108
[end of searx/engines/gigablast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -34,6 +34,7 @@
'&qlang={lang}'\
'&ff={safesearch}'\
'&rxiec={rxieu}'\
+ '&ulse={ulse}'\
'&rand={rxikd}' # current unix timestamp
# specific xpath variables
@@ -64,6 +65,7 @@
number_of_results=number_of_results,
rxikd=int(time() * 1000),
rxieu=random.randint(1000000000, 9999999999),
+ ulse=random.randint(100000000, 999999999),
lang=language,
safesearch=safesearch)
| {"golden_diff": "diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py\n--- a/searx/engines/gigablast.py\n+++ b/searx/engines/gigablast.py\n@@ -34,6 +34,7 @@\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n+ '&ulse={ulse}'\\\n '&rand={rxikd}' # current unix timestamp\n \n # specific xpath variables\n@@ -64,6 +65,7 @@\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n+ ulse=random.randint(100000000, 999999999),\n lang=language,\n safesearch=safesearch)\n", "issue": "Gigablast crash\nGreetings,\r\n\r\nI have been experimenting with SearX lately and have been seeing this message:\r\n\r\n\"\r\nEngines cannot retrieve results:\r\n\r\ngigablast (unexpected crash: No JSON object could be decoded)\r\n\"\r\n\r\nSeems like something is wrong with the Gigablast driver but I am not sure how to fix it.\r\n\r\nI'm using: searx - 0.14.0 \r\n\r\nThanks\n", "before_files": [{"content": "\"\"\"\n Gigablast (Web)\n\n @website https://gigablast.com\n @provide-api yes (https://gigablast.com/api.html)\n\n @using-api yes\n @results XML\n @stable yes\n @parse url, title, content\n\"\"\"\n\nimport random\nfrom json import loads\nfrom time import time\nfrom lxml.html import fromstring\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['general']\npaging = True\nnumber_of_results = 10\nlanguage_support = True\nsafesearch = True\n\n# search-url\nbase_url = 'https://gigablast.com/'\nsearch_string = 'search?{query}'\\\n '&n={number_of_results}'\\\n '&c=main'\\\n '&s={offset}'\\\n '&format=json'\\\n '&qh=0'\\\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n '&rand={rxikd}' # current unix timestamp\n\n# specific xpath variables\nresults_xpath = '//response//result'\nurl_xpath = './/url'\ntitle_xpath = './/title'\ncontent_xpath = './/sum'\n\nsupported_languages_url = 'https://gigablast.com/search?&rxikd=1'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n language = params['language'].replace('-', '_').lower()\n if language.split('-')[0] != 'zh':\n language = language.split('-')[0]\n\n if params['safesearch'] >= 1:\n safesearch = 1\n else:\n safesearch = 0\n\n # rxieu is some kind of hash from the search query, but accepts random atm\n search_path = search_string.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n lang=language,\n safesearch=safesearch)\n\n params['url'] = base_url + search_path\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n # parse results\n response_json = loads(resp.text)\n\n for result in response_json['results']:\n # append result\n results.append({'url': result['url'],\n 'title': result['title'],\n 'content': result['sum']})\n\n # return results\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = []\n dom = fromstring(resp.text)\n links = dom.xpath('//span[@id=\"menu2\"]/a')\n for link in links:\n href = link.xpath('./@href')[0].split('lang%3A')\n if len(href) == 2:\n code = href[1].split('_')\n if len(code) == 2:\n code = code[0] + '-' + code[1].upper()\n else:\n code = code[0]\n supported_languages.append(code)\n\n return supported_languages\n", "path": "searx/engines/gigablast.py"}]} | 1,585 | 228 |
gh_patches_debug_17435 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2756 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failure to invalidate session when user resets their own password
## Description
When a user resets their own password, their session is not invalidated.
## Steps to Reproduce
1. User logs in
2. User resets password
## Expected Behavior
User is logged out and is requested to use their new password to login
## Actual Behavior
User can continue to browse without having to enter their new password again
## Comments
Related tickets: #2300, #880
</issue>
<code>
[start of securedrop/journalist_app/account.py]
1 # -*- coding: utf-8 -*-
2
3 from flask import (Blueprint, render_template, request, g, redirect, url_for,
4 flash)
5 from flask_babel import gettext
6
7 from db import db_session
8 from journalist_app.utils import (make_password, set_diceware_password,
9 validate_user)
10
11
12 def make_blueprint(config):
13 view = Blueprint('account', __name__)
14
15 @view.route('/account', methods=('GET',))
16 def edit():
17 password = make_password(config)
18 return render_template('edit_account.html',
19 password=password)
20
21 @view.route('/new-password', methods=('POST',))
22 def new_password():
23 user = g.user
24 current_password = request.form.get('current_password')
25 token = request.form.get('token')
26 error_message = gettext('Incorrect password or two-factor code.')
27 # If the user is validated, change their password
28 if validate_user(user.username, current_password, token,
29 error_message):
30 password = request.form.get('password')
31 set_diceware_password(user, password)
32 return redirect(url_for('account.edit'))
33
34 @view.route('/2fa', methods=('GET', 'POST'))
35 def new_two_factor():
36 if request.method == 'POST':
37 token = request.form['token']
38 if g.user.verify_token(token):
39 flash(gettext("Token in two-factor authentication verified."),
40 "notification")
41 return redirect(url_for('account.edit'))
42 else:
43 flash(gettext(
44 "Could not verify token in two-factor authentication."),
45 "error")
46
47 return render_template('account_new_two_factor.html', user=g.user)
48
49 @view.route('/reset-2fa-totp', methods=['POST'])
50 def reset_two_factor_totp():
51 g.user.is_totp = True
52 g.user.regenerate_totp_shared_secret()
53 db_session.commit()
54 return redirect(url_for('account.new_two_factor'))
55
56 @view.route('/reset-2fa-hotp', methods=['POST'])
57 def reset_two_factor_hotp():
58 otp_secret = request.form.get('otp_secret', None)
59 if otp_secret:
60 g.user.set_hotp_secret(otp_secret)
61 db_session.commit()
62 return redirect(url_for('account.new_two_factor'))
63 else:
64 return render_template('account_edit_hotp_secret.html')
65
66 return view
67
[end of securedrop/journalist_app/account.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/journalist_app/account.py b/securedrop/journalist_app/account.py
--- a/securedrop/journalist_app/account.py
+++ b/securedrop/journalist_app/account.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from flask import (Blueprint, render_template, request, g, redirect, url_for,
- flash)
+ flash, session)
from flask_babel import gettext
from db import db_session
@@ -29,6 +29,9 @@
error_message):
password = request.form.get('password')
set_diceware_password(user, password)
+ session.pop('uid', None)
+ session.pop('expires', None)
+ return redirect(url_for('main.login'))
return redirect(url_for('account.edit'))
@view.route('/2fa', methods=('GET', 'POST'))
| {"golden_diff": "diff --git a/securedrop/journalist_app/account.py b/securedrop/journalist_app/account.py\n--- a/securedrop/journalist_app/account.py\n+++ b/securedrop/journalist_app/account.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from flask import (Blueprint, render_template, request, g, redirect, url_for,\n- flash)\n+ flash, session)\n from flask_babel import gettext\n \n from db import db_session\n@@ -29,6 +29,9 @@\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n+ session.pop('uid', None)\n+ session.pop('expires', None)\n+ return redirect(url_for('main.login'))\n return redirect(url_for('account.edit'))\n \n @view.route('/2fa', methods=('GET', 'POST'))\n", "issue": "Failure to invalidate session when user resets their own password\n## Description\r\n\r\nWhen a user resets their own password, their session is not invalidated. \r\n\r\n## Steps to Reproduce\r\n\r\n1. User logs in\r\n2. User resets password\r\n\r\n## Expected Behavior\r\n\r\nUser is logged out and is requested to use their new password to login\r\n\r\n## Actual Behavior\r\n\r\nUser can continue to browse without having to enter their new password again\r\n\r\n## Comments\r\n\r\nRelated tickets: #2300, #880\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, render_template, request, g, redirect, url_for,\n flash)\nfrom flask_babel import gettext\n\nfrom db import db_session\nfrom journalist_app.utils import (make_password, set_diceware_password,\n validate_user)\n\n\ndef make_blueprint(config):\n view = Blueprint('account', __name__)\n\n @view.route('/account', methods=('GET',))\n def edit():\n password = make_password(config)\n return render_template('edit_account.html',\n password=password)\n\n @view.route('/new-password', methods=('POST',))\n def new_password():\n user = g.user\n current_password = request.form.get('current_password')\n token = request.form.get('token')\n error_message = gettext('Incorrect password or two-factor code.')\n # If the user is validated, change their password\n if validate_user(user.username, current_password, token,\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('account.edit'))\n\n @view.route('/2fa', methods=('GET', 'POST'))\n def new_two_factor():\n if request.method == 'POST':\n token = request.form['token']\n if g.user.verify_token(token):\n flash(gettext(\"Token in two-factor authentication verified.\"),\n \"notification\")\n return redirect(url_for('account.edit'))\n else:\n flash(gettext(\n \"Could not verify token in two-factor authentication.\"),\n \"error\")\n\n return render_template('account_new_two_factor.html', user=g.user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n def reset_two_factor_totp():\n g.user.is_totp = True\n g.user.regenerate_totp_shared_secret()\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n def reset_two_factor_hotp():\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n g.user.set_hotp_secret(otp_secret)\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n else:\n return render_template('account_edit_hotp_secret.html')\n\n return view\n", "path": "securedrop/journalist_app/account.py"}]} | 1,256 | 195 |
gh_patches_debug_22132 | rasdani/github-patches | git_diff | open-mmlab__mmcv-823 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
imshow_bboxes does not show bboxes if img is incontiguous
As [DKDKDDK](https://stackoverflow.com/questions/57586449/why-cv2-rectangle-sometimes-return-np-ndarray-while-sometimes-cv2-umat) asked, cv2.rectangle seems unable to draw inplacely on incontiguous arrays. When calling `mmcv.imshow_bboxes` or `mmcv.imshow_det_bboxes`, the contiguousness of argument `img` is consistent with what the user passed in. Would it be convenient to add `img = np.ascontiguousarray(img)` inside `mmcv.imshow_bboxes` and `mmcv.imshow_det_bboxes`?
</issue>
<code>
[start of mmcv/visualization/image.py]
1 # Copyright (c) Open-MMLab. All rights reserved.
2 import cv2
3 import numpy as np
4
5 from mmcv.image import imread, imwrite
6 from .color import color_val
7
8
9 def imshow(img, win_name='', wait_time=0):
10 """Show an image.
11
12 Args:
13 img (str or ndarray): The image to be displayed.
14 win_name (str): The window name.
15 wait_time (int): Value of waitKey param.
16 """
17 cv2.imshow(win_name, imread(img))
18 if wait_time == 0: # prevent from hangning if windows was closed
19 while True:
20 ret = cv2.waitKey(1)
21
22 closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1
23 # if user closed window or if some key pressed
24 if closed or ret != -1:
25 break
26 else:
27 ret = cv2.waitKey(wait_time)
28
29
30 def imshow_bboxes(img,
31 bboxes,
32 colors='green',
33 top_k=-1,
34 thickness=1,
35 show=True,
36 win_name='',
37 wait_time=0,
38 out_file=None):
39 """Draw bboxes on an image.
40
41 Args:
42 img (str or ndarray): The image to be displayed.
43 bboxes (list or ndarray): A list of ndarray of shape (k, 4).
44 colors (list[str or tuple or Color]): A list of colors.
45 top_k (int): Plot the first k bboxes only if set positive.
46 thickness (int): Thickness of lines.
47 show (bool): Whether to show the image.
48 win_name (str): The window name.
49 wait_time (int): Value of waitKey param.
50 out_file (str, optional): The filename to write the image.
51
52 Returns:
53 ndarray: The image with bboxes drawn on it.
54 """
55 img = imread(img)
56
57 if isinstance(bboxes, np.ndarray):
58 bboxes = [bboxes]
59 if not isinstance(colors, list):
60 colors = [colors for _ in range(len(bboxes))]
61 colors = [color_val(c) for c in colors]
62 assert len(bboxes) == len(colors)
63
64 for i, _bboxes in enumerate(bboxes):
65 _bboxes = _bboxes.astype(np.int32)
66 if top_k <= 0:
67 _top_k = _bboxes.shape[0]
68 else:
69 _top_k = min(top_k, _bboxes.shape[0])
70 for j in range(_top_k):
71 left_top = (_bboxes[j, 0], _bboxes[j, 1])
72 right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
73 cv2.rectangle(
74 img, left_top, right_bottom, colors[i], thickness=thickness)
75
76 if show:
77 imshow(img, win_name, wait_time)
78 if out_file is not None:
79 imwrite(img, out_file)
80 return img
81
82
83 def imshow_det_bboxes(img,
84 bboxes,
85 labels,
86 class_names=None,
87 score_thr=0,
88 bbox_color='green',
89 text_color='green',
90 thickness=1,
91 font_scale=0.5,
92 show=True,
93 win_name='',
94 wait_time=0,
95 out_file=None):
96 """Draw bboxes and class labels (with scores) on an image.
97
98 Args:
99 img (str or ndarray): The image to be displayed.
100 bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
101 (n, 5).
102 labels (ndarray): Labels of bboxes.
103 class_names (list[str]): Names of each classes.
104 score_thr (float): Minimum score of bboxes to be shown.
105 bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
106 text_color (str or tuple or :obj:`Color`): Color of texts.
107 thickness (int): Thickness of lines.
108 font_scale (float): Font scales of texts.
109 show (bool): Whether to show the image.
110 win_name (str): The window name.
111 wait_time (int): Value of waitKey param.
112 out_file (str or None): The filename to write the image.
113
114 Returns:
115 ndarray: The image with bboxes drawn on it.
116 """
117 assert bboxes.ndim == 2
118 assert labels.ndim == 1
119 assert bboxes.shape[0] == labels.shape[0]
120 assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
121 img = imread(img)
122
123 if score_thr > 0:
124 assert bboxes.shape[1] == 5
125 scores = bboxes[:, -1]
126 inds = scores > score_thr
127 bboxes = bboxes[inds, :]
128 labels = labels[inds]
129
130 bbox_color = color_val(bbox_color)
131 text_color = color_val(text_color)
132 img = np.ascontiguousarray(img)
133 for bbox, label in zip(bboxes, labels):
134 bbox_int = bbox.astype(np.int32)
135 left_top = (bbox_int[0], bbox_int[1])
136 right_bottom = (bbox_int[2], bbox_int[3])
137 cv2.rectangle(
138 img, left_top, right_bottom, bbox_color, thickness=thickness)
139 label_text = class_names[
140 label] if class_names is not None else f'cls {label}'
141 if len(bbox) > 4:
142 label_text += f'|{bbox[-1]:.02f}'
143 cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
144 cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
145
146 if show:
147 imshow(img, win_name, wait_time)
148 if out_file is not None:
149 imwrite(img, out_file)
150 return img
151
[end of mmcv/visualization/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmcv/visualization/image.py b/mmcv/visualization/image.py
--- a/mmcv/visualization/image.py
+++ b/mmcv/visualization/image.py
@@ -53,6 +53,7 @@
ndarray: The image with bboxes drawn on it.
"""
img = imread(img)
+ img = np.ascontiguousarray(img)
if isinstance(bboxes, np.ndarray):
bboxes = [bboxes]
@@ -119,6 +120,7 @@
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
+ img = np.ascontiguousarray(img)
if score_thr > 0:
assert bboxes.shape[1] == 5
@@ -129,7 +131,7 @@
bbox_color = color_val(bbox_color)
text_color = color_val(text_color)
- img = np.ascontiguousarray(img)
+
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
| {"golden_diff": "diff --git a/mmcv/visualization/image.py b/mmcv/visualization/image.py\n--- a/mmcv/visualization/image.py\n+++ b/mmcv/visualization/image.py\n@@ -53,6 +53,7 @@\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n img = imread(img)\n+ img = np.ascontiguousarray(img)\n \n if isinstance(bboxes, np.ndarray):\n bboxes = [bboxes]\n@@ -119,6 +120,7 @@\n assert bboxes.shape[0] == labels.shape[0]\n assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5\n img = imread(img)\n+ img = np.ascontiguousarray(img)\n \n if score_thr > 0:\n assert bboxes.shape[1] == 5\n@@ -129,7 +131,7 @@\n \n bbox_color = color_val(bbox_color)\n text_color = color_val(text_color)\n- img = np.ascontiguousarray(img)\n+\n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n", "issue": "imshow_bboxes does not show bboxes if img is incontiguous \nAs [DKDKDDK](https://stackoverflow.com/questions/57586449/why-cv2-rectangle-sometimes-return-np-ndarray-while-sometimes-cv2-umat) asked, cv2.rectangle seems unable to draw inplacely on incontiguous arrays. When calling `mmcv.imshow_bboxes` or `mmcv.imshow_det_bboxes`, the contiguousness of argument `img` is consistent with what the user passed in. Would it be convenient to add `img = np.ascontiguousarray(img)` inside `mmcv.imshow_bboxes` and `mmcv.imshow_det_bboxes`?\n", "before_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport cv2\nimport numpy as np\n\nfrom mmcv.image import imread, imwrite\nfrom .color import color_val\n\n\ndef imshow(img, win_name='', wait_time=0):\n \"\"\"Show an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n \"\"\"\n cv2.imshow(win_name, imread(img))\n if wait_time == 0: # prevent from hangning if windows was closed\n while True:\n ret = cv2.waitKey(1)\n\n closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1\n # if user closed window or if some key pressed\n if closed or ret != -1:\n break\n else:\n ret = cv2.waitKey(wait_time)\n\n\ndef imshow_bboxes(img,\n bboxes,\n colors='green',\n top_k=-1,\n thickness=1,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw bboxes on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (list or ndarray): A list of ndarray of shape (k, 4).\n colors (list[str or tuple or Color]): A list of colors.\n top_k (int): Plot the first k bboxes only if set positive.\n thickness (int): Thickness of lines.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str, optional): The filename to write the image.\n\n Returns:\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n img = imread(img)\n\n if isinstance(bboxes, np.ndarray):\n bboxes = [bboxes]\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(bboxes))]\n colors = [color_val(c) for c in colors]\n assert len(bboxes) == len(colors)\n\n for i, _bboxes in enumerate(bboxes):\n _bboxes = _bboxes.astype(np.int32)\n if top_k <= 0:\n _top_k = _bboxes.shape[0]\n else:\n _top_k = min(top_k, _bboxes.shape[0])\n for j in range(_top_k):\n left_top = (_bboxes[j, 0], _bboxes[j, 1])\n right_bottom = (_bboxes[j, 2], _bboxes[j, 3])\n cv2.rectangle(\n img, left_top, right_bottom, colors[i], thickness=thickness)\n\n if show:\n imshow(img, win_name, wait_time)\n if out_file is not None:\n imwrite(img, out_file)\n return img\n\n\ndef imshow_det_bboxes(img,\n bboxes,\n labels,\n class_names=None,\n score_thr=0,\n bbox_color='green',\n text_color='green',\n thickness=1,\n font_scale=0.5,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw bboxes and class labels (with scores) on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or\n (n, 5).\n labels (ndarray): Labels of bboxes.\n class_names (list[str]): Names of each classes.\n score_thr (float): Minimum score of bboxes to be shown.\n bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str or None): The filename to write the image.\n\n Returns:\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n assert bboxes.ndim == 2\n assert labels.ndim == 1\n assert bboxes.shape[0] == labels.shape[0]\n assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5\n img = imread(img)\n\n if score_thr > 0:\n assert bboxes.shape[1] == 5\n scores = bboxes[:, -1]\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds]\n\n bbox_color = color_val(bbox_color)\n text_color = color_val(text_color)\n img = np.ascontiguousarray(img)\n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n right_bottom = (bbox_int[2], bbox_int[3])\n cv2.rectangle(\n img, left_top, right_bottom, bbox_color, thickness=thickness)\n label_text = class_names[\n label] if class_names is not None else f'cls {label}'\n if len(bbox) > 4:\n label_text += f'|{bbox[-1]:.02f}'\n cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)\n\n if show:\n imshow(img, win_name, wait_time)\n if out_file is not None:\n imwrite(img, out_file)\n return img\n", "path": "mmcv/visualization/image.py"}]} | 2,304 | 272 |
gh_patches_debug_39977 | rasdani/github-patches | git_diff | intel__dffml-568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
operations: io: Fixup example
https://github.com/intel/dffml/blob/c0946b2d212239cfe4e470e63ab3da22c9cd97c4/dffml/operation/io.py#L116
copy the code here into a Python file and format it with black, then copy it back.
We also want to change
```python
... definition=dataflow.definitions["DataToPrint"],
... parents=None,)]
```
to
```python
... definition=print_output.inputs["data"],
... )
... ]
```
</issue>
<code>
[start of dffml/operation/preprocess.py]
1 import ast
2 from dffml.df.types import Definition
3 from dffml.df.base import op
4
5
6 # Definitions
7 InputStr = Definition(name="InputStr", primitive="str")
8 EvaluatedStr = Definition(name="EvaluatedStr", primitive="generic")
9
10
11 @op(
12 inputs={"str_to_eval": InputStr},
13 outputs={"str_after_eval": EvaluatedStr},
14 conditions=[],
15 )
16 async def literal_eval(str_to_eval: str):
17 """
18 Evaluate the input using ast.literal_eval()
19
20 Parameters
21 ++++++++++
22 inputs : str
23 A string to be evaluated.
24
25 Returns
26 +++++++
27 A python literal.
28
29 Examples
30 ++++++++
31
32 The following example shows how to use literal_eval.
33
34 >>> dataflow = DataFlow.auto(literal_eval, GetSingle)
35 >>> dataflow.seed.append(
36 ... Input(
37 ... value=[literal_eval.op.outputs["str_after_eval"].name,],
38 ... definition=GetSingle.op.inputs["spec"],
39 ... )
40 ... )
41 >>> inputs = [
42 ... Input(
43 ... value="[1,2,3]",
44 ... definition=literal_eval.op.inputs["str_to_eval"],
45 ... parents=None,
46 ... )
47 ... ]
48 >>>
49 >>> async def main():
50 ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
51 ... print(results)
52 >>>
53 >>> asyncio.run(main())
54 {'EvaluatedStr': [1, 2, 3]}
55 """
56 value = ast.literal_eval(str_to_eval)
57 return {"str_after_eval": value}
58
[end of dffml/operation/preprocess.py]
[start of dffml/operation/io.py]
1 import asyncio
2 import concurrent.futures
3 from typing import Dict, Any
4
5 from dffml.df.types import Operation, Definition
6 from dffml.df.base import (
7 op,
8 OperationImplementationContext,
9 OperationImplementation,
10 )
11
12
13 # Definitions
14 UserInput = Definition(name="UserInput", primitive="str")
15 DataToPrint = Definition(name="DataToPrint", primitive="str")
16
17 AcceptUserInput = Operation(
18 name="AcceptUserInput",
19 inputs={},
20 outputs={"InputData": UserInput},
21 conditions=[],
22 )
23
24
25 class AcceptUserInputContext(OperationImplementationContext):
26 @staticmethod
27 def receive_input():
28 print("Enter the value: ", end="")
29 return input()
30
31 async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
32 user_input = await self.parent.loop.run_in_executor(
33 self.parent.pool, self.receive_input
34 )
35 return {"InputData": user_input}
36
37
38 class AcceptUserInput(OperationImplementation):
39 """
40 Accept input from stdin using python input()
41
42 Parameters
43 ++++++++++
44 inputs : dict
45 A dictionary with a key and empty list as value.
46
47 Returns
48 +++++++
49 dict
50 A dictionary containing user input.
51
52 Examples
53 ++++++++
54
55 The following example shows how to use AcceptUserInput.
56 (Assumes that the input from stdio is "Data flow is awesome"!)
57
58 >>> dataflow = DataFlow.auto(AcceptUserInput, GetSingle)
59 >>> dataflow.seed.append(
60 ... Input(
61 ... value=[AcceptUserInput.op.outputs["InputData"].name],
62 ... definition=GetSingle.op.inputs["spec"]
63 ... )
64 ... )
65 >>>
66 >>> async def main():
67 ... async for ctx, results in MemoryOrchestrator.run(dataflow, {"input":[]}):
68 ... print(results)
69 >>>
70 >>> asyncio.run(main())
71 Enter the value: {'UserInput': 'Data flow is awesome'}
72 """
73
74 op = AcceptUserInput
75 CONTEXT = AcceptUserInputContext
76
77 def __init__(self, *args, **kwargs):
78 super().__init__(*args, **kwargs)
79 self.loop = None
80 self.pool = None
81 self.__pool = None
82
83 async def __aenter__(self) -> "OperationImplementationContext":
84 self.loop = asyncio.get_event_loop()
85 self.pool = concurrent.futures.ThreadPoolExecutor()
86 self.__pool = self.pool.__enter__()
87 return self
88
89 async def __aexit__(self, exc_type, exc_value, traceback):
90 self.__pool.__exit__(exc_type, exc_value, traceback)
91 self.__pool = None
92 self.pool = None
93 self.loop = None
94
95
96 @op(inputs={"data": DataToPrint}, outputs={}, conditions=[])
97 async def print_output(data: str):
98 """
99 Print the output on stdout using python print()
100
101 Parameters
102 ++++++++++
103 inputs : list
104 A list of Inputs whose value is to be printed.
105
106 Examples
107 ++++++++
108
109 The following example shows how to use print_output.
110
111 >>> dataflow = DataFlow.auto(print_output, GetSingle)
112 >>> inputs = [
113 ... Input(
114 ... value="print_output example",
115 ... definition=dataflow.definitions["DataToPrint"],
116 ... parents=None,)]
117 >>>
118 >>> async def main():
119 ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
120 ... print("String to be printed is 'print_output example'")
121 >>>
122 >>> asyncio.run(main())
123 print_output example
124 String to be printed is 'print_output example'
125 """
126 print(data)
127
[end of dffml/operation/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dffml/operation/io.py b/dffml/operation/io.py
--- a/dffml/operation/io.py
+++ b/dffml/operation/io.py
@@ -12,7 +12,7 @@
# Definitions
UserInput = Definition(name="UserInput", primitive="str")
-DataToPrint = Definition(name="DataToPrint", primitive="str")
+DataToPrint = Definition(name="DataToPrint", primitive="generic")
AcceptUserInput = Operation(
name="AcceptUserInput",
@@ -39,11 +39,6 @@
"""
Accept input from stdin using python input()
- Parameters
- ++++++++++
- inputs : dict
- A dictionary with a key and empty list as value.
-
Returns
+++++++
dict
@@ -59,12 +54,12 @@
>>> dataflow.seed.append(
... Input(
... value=[AcceptUserInput.op.outputs["InputData"].name],
- ... definition=GetSingle.op.inputs["spec"]
+ ... definition=GetSingle.op.inputs["spec"],
... )
... )
>>>
>>> async def main():
- ... async for ctx, results in MemoryOrchestrator.run(dataflow, {"input":[]}):
+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, {"input": []}):
... print(results)
>>>
>>> asyncio.run(main())
@@ -94,33 +89,32 @@
@op(inputs={"data": DataToPrint}, outputs={}, conditions=[])
-async def print_output(data: str):
+async def print_output(data: Any):
"""
Print the output on stdout using python print()
Parameters
++++++++++
- inputs : list
- A list of Inputs whose value is to be printed.
+ data : Any
+ A python literal to be printed.
Examples
++++++++
The following example shows how to use print_output.
- >>> dataflow = DataFlow.auto(print_output, GetSingle)
+ >>> dataflow = DataFlow.auto(print_output)
>>> inputs = [
... Input(
- ... value="print_output example",
- ... definition=dataflow.definitions["DataToPrint"],
- ... parents=None,)]
+ ... value="print_output example", definition=print_output.op.inputs["data"]
+ ... )
+ ... ]
>>>
>>> async def main():
... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
- ... print("String to be printed is 'print_output example'")
+ ... pass
>>>
>>> asyncio.run(main())
print_output example
- String to be printed is 'print_output example'
"""
print(data)
diff --git a/dffml/operation/preprocess.py b/dffml/operation/preprocess.py
--- a/dffml/operation/preprocess.py
+++ b/dffml/operation/preprocess.py
@@ -19,12 +19,13 @@
Parameters
++++++++++
- inputs : str
+ str_to_eval : str
A string to be evaluated.
Returns
+++++++
- A python literal.
+ dict
+ A dict containing python literal.
Examples
++++++++
| {"golden_diff": "diff --git a/dffml/operation/io.py b/dffml/operation/io.py\n--- a/dffml/operation/io.py\n+++ b/dffml/operation/io.py\n@@ -12,7 +12,7 @@\n \n # Definitions\n UserInput = Definition(name=\"UserInput\", primitive=\"str\")\n-DataToPrint = Definition(name=\"DataToPrint\", primitive=\"str\")\n+DataToPrint = Definition(name=\"DataToPrint\", primitive=\"generic\")\n \n AcceptUserInput = Operation(\n name=\"AcceptUserInput\",\n@@ -39,11 +39,6 @@\n \"\"\"\n Accept input from stdin using python input()\n \n- Parameters\n- ++++++++++\n- inputs : dict\n- A dictionary with a key and empty list as value.\n-\n Returns\n +++++++\n dict\n@@ -59,12 +54,12 @@\n >>> dataflow.seed.append(\n ... Input(\n ... value=[AcceptUserInput.op.outputs[\"InputData\"].name],\n- ... definition=GetSingle.op.inputs[\"spec\"]\n+ ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>>\n >>> async def main():\n- ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\":[]}):\n+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\": []}):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n@@ -94,33 +89,32 @@\n \n \n @op(inputs={\"data\": DataToPrint}, outputs={}, conditions=[])\n-async def print_output(data: str):\n+async def print_output(data: Any):\n \"\"\"\n Print the output on stdout using python print()\n \n Parameters\n ++++++++++\n- inputs : list\n- A list of Inputs whose value is to be printed.\n+ data : Any\n+ A python literal to be printed.\n \n Examples\n ++++++++\n \n The following example shows how to use print_output.\n \n- >>> dataflow = DataFlow.auto(print_output, GetSingle)\n+ >>> dataflow = DataFlow.auto(print_output)\n >>> inputs = [\n ... Input(\n- ... value=\"print_output example\",\n- ... definition=dataflow.definitions[\"DataToPrint\"],\n- ... parents=None,)]\n+ ... value=\"print_output example\", definition=print_output.op.inputs[\"data\"]\n+ ... )\n+ ... ]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n- ... print(\"String to be printed is 'print_output example'\")\n+ ... pass\n >>>\n >>> asyncio.run(main())\n print_output example\n- String to be printed is 'print_output example'\n \"\"\"\n print(data)\ndiff --git a/dffml/operation/preprocess.py b/dffml/operation/preprocess.py\n--- a/dffml/operation/preprocess.py\n+++ b/dffml/operation/preprocess.py\n@@ -19,12 +19,13 @@\n \n Parameters\n ++++++++++\n- inputs : str\n+ str_to_eval : str\n A string to be evaluated.\n \n Returns\n +++++++\n- A python literal.\n+ dict\n+ A dict containing python literal.\n \n Examples\n ++++++++\n", "issue": "operations: io: Fixup example\nhttps://github.com/intel/dffml/blob/c0946b2d212239cfe4e470e63ab3da22c9cd97c4/dffml/operation/io.py#L116\r\n\r\ncopy the code here into a Python file and format it with black, then copy it back.\r\n\r\nWe also want to change\r\n\r\n```python\r\n ... definition=dataflow.definitions[\"DataToPrint\"],\r\n ... parents=None,)]\r\n```\r\n\r\nto\r\n\r\n```python\r\n ... definition=print_output.inputs[\"data\"],\r\n ... )\r\n ... ]\r\n```\n", "before_files": [{"content": "import ast\nfrom dffml.df.types import Definition\nfrom dffml.df.base import op\n\n\n# Definitions\nInputStr = Definition(name=\"InputStr\", primitive=\"str\")\nEvaluatedStr = Definition(name=\"EvaluatedStr\", primitive=\"generic\")\n\n\n@op(\n inputs={\"str_to_eval\": InputStr},\n outputs={\"str_after_eval\": EvaluatedStr},\n conditions=[],\n)\nasync def literal_eval(str_to_eval: str):\n \"\"\"\n Evaluate the input using ast.literal_eval()\n\n Parameters\n ++++++++++\n inputs : str\n A string to be evaluated.\n\n Returns\n +++++++\n A python literal.\n\n Examples\n ++++++++\n\n The following example shows how to use literal_eval.\n\n >>> dataflow = DataFlow.auto(literal_eval, GetSingle)\n >>> dataflow.seed.append(\n ... Input(\n ... value=[literal_eval.op.outputs[\"str_after_eval\"].name,],\n ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>> inputs = [\n ... Input(\n ... value=\"[1,2,3]\",\n ... definition=literal_eval.op.inputs[\"str_to_eval\"],\n ... parents=None,\n ... )\n ... ]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n {'EvaluatedStr': [1, 2, 3]}\n \"\"\"\n value = ast.literal_eval(str_to_eval)\n return {\"str_after_eval\": value}\n", "path": "dffml/operation/preprocess.py"}, {"content": "import asyncio\nimport concurrent.futures\nfrom typing import Dict, Any\n\nfrom dffml.df.types import Operation, Definition\nfrom dffml.df.base import (\n op,\n OperationImplementationContext,\n OperationImplementation,\n)\n\n\n# Definitions\nUserInput = Definition(name=\"UserInput\", primitive=\"str\")\nDataToPrint = Definition(name=\"DataToPrint\", primitive=\"str\")\n\nAcceptUserInput = Operation(\n name=\"AcceptUserInput\",\n inputs={},\n outputs={\"InputData\": UserInput},\n conditions=[],\n)\n\n\nclass AcceptUserInputContext(OperationImplementationContext):\n @staticmethod\n def receive_input():\n print(\"Enter the value: \", end=\"\")\n return input()\n\n async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:\n user_input = await self.parent.loop.run_in_executor(\n self.parent.pool, self.receive_input\n )\n return {\"InputData\": user_input}\n\n\nclass AcceptUserInput(OperationImplementation):\n \"\"\"\n Accept input from stdin using python input()\n\n Parameters\n ++++++++++\n inputs : dict\n A dictionary with a key and empty list as value.\n\n Returns\n +++++++\n dict\n A dictionary containing user input.\n\n Examples\n ++++++++\n\n The following example shows how to use AcceptUserInput.\n (Assumes that the input from stdio is \"Data flow is awesome\"!)\n\n >>> dataflow = DataFlow.auto(AcceptUserInput, GetSingle)\n >>> dataflow.seed.append(\n ... Input(\n ... value=[AcceptUserInput.op.outputs[\"InputData\"].name],\n ... definition=GetSingle.op.inputs[\"spec\"]\n ... )\n ... )\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\":[]}):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n Enter the value: {'UserInput': 'Data flow is awesome'}\n \"\"\"\n\n op = AcceptUserInput\n CONTEXT = AcceptUserInputContext\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.loop = None\n self.pool = None\n self.__pool = None\n\n async def __aenter__(self) -> \"OperationImplementationContext\":\n self.loop = asyncio.get_event_loop()\n self.pool = concurrent.futures.ThreadPoolExecutor()\n self.__pool = self.pool.__enter__()\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n self.__pool.__exit__(exc_type, exc_value, traceback)\n self.__pool = None\n self.pool = None\n self.loop = None\n\n\n@op(inputs={\"data\": DataToPrint}, outputs={}, conditions=[])\nasync def print_output(data: str):\n \"\"\"\n Print the output on stdout using python print()\n\n Parameters\n ++++++++++\n inputs : list\n A list of Inputs whose value is to be printed.\n\n Examples\n ++++++++\n\n The following example shows how to use print_output.\n\n >>> dataflow = DataFlow.auto(print_output, GetSingle)\n >>> inputs = [\n ... Input(\n ... value=\"print_output example\",\n ... definition=dataflow.definitions[\"DataToPrint\"],\n ... parents=None,)]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... print(\"String to be printed is 'print_output example'\")\n >>>\n >>> asyncio.run(main())\n print_output example\n String to be printed is 'print_output example'\n \"\"\"\n print(data)\n", "path": "dffml/operation/io.py"}]} | 2,240 | 753 |
gh_patches_debug_7947 | rasdani/github-patches | git_diff | coreruleset__coreruleset-3232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Review links to OWASP wiki
### Describe the bug
We have references to other OWASP projects in our files:
```
rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf
28:# https://www.owasp.org/index.php/PHP_Top_5#P1:_Remote_Code_Executionh
366:# https://www.owasp.org/index.php/PHP_Object_Injection
rules/REQUEST-921-PROTOCOL-ATTACK.conf
194:# Reference: https://www.owasp.org/index.php/Testing_for_HTTP_Splitting/Smuggling_(OTG-INPVAL-016)
rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf
97:# https://www.owasp.org/index.php/ModSecurity_CRS_RuleID-96000
CHANGES.md
977: https://www.owasp.org/index.php/AppSensor_DetectionPoints
rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf
690:# https://www.owasp.org/index.php/Unrestricted_File_Upload
rules/scanners-user-agents.data
58:# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project
```
We need to double check they are still valid and update if not.
</issue>
<code>
[start of util/regexp-tricks/negative-lookahead.py]
1 import argparse
2
3 # WARNING: This script is EXPERIMENTAL. Use with caution.
4 #
5 # Known issues:
6 # * At the moment, it will probably not work with more than two strings.
7 #
8 # Known limitations:
9 # * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,
10 # make sure that subtrings of the negative lookahead are not harmful in any way.
11
12 parser = argparse.ArgumentParser(description="This script takes a list of strings and converts them into \
13 a regex that acts like a negative lookahead")
14 parser.add_argument("strings", type=str, nargs='+',
15 help="the strings to convert into a negative lookahead")
16 parser.add_argument("--prefix", type=str, default="",
17 help="sets a prefix for the resulting regex")
18 parser.add_argument("--suffix", type=str, default="",
19 help="sets a suffix for the resulting regex")
20
21 args = parser.parse_args()
22
23 # Return the longest prefix of all list elements. Shamelessly copied from:
24 # https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings
25 def commonprefix(m):
26 "Given a list of pathnames, returns the longest common leading component"
27 if not m: return ''
28 s1 = min(m)
29 s2 = max(m)
30 for i, c in enumerate(s1):
31 if c != s2[i]:
32 return s1[:i]
33 return s1
34
35 # flatten returns a string with concatenated dictionary keys
36 def flatten(dict):
37 s = ""
38
39 for key in dict.keys():
40 s += key
41
42 return s
43
44 # set returns a character set containing the unique characters across all strings for the given index
45 def set(strings, index, flags):
46 dict = {}
47
48 for s in strings:
49 # Continue so we don't panic
50 if index > len(s) -1:
51 continue
52
53 dict[s[index]] = ''
54
55 return "[" + flags + flatten(dict) + "]"
56
57 # prepare converts a string for negative lookaheads emulation
58 def prepare(s, offset):
59 r = ""
60
61 if len(s) == 0:
62 return r
63
64 for i in range(offset, len(s)):
65 for j in range(0, i + 1):
66 if j == i:
67 r += "[^" + s[j] + "]"
68 else:
69 r += s[j]
70
71 if i != len(s) - 1:
72 r += "|"
73
74 return r
75
76 # run runs the
77 def run():
78 strings = args.strings
79
80 r = ""
81 r += set(strings, 0, "^")
82
83 c = ""
84 d = {}
85
86 # Only find common string if we have more than one
87 if len(strings) > 1:
88 c = commonprefix(strings)
89
90 # Collect all characters after the common substring from every string
91 for s in strings:
92 if len(s) > len(c) and s.startswith(c):
93 d[s[len(c)]] = ''
94
95 # Add the common string to the regex to prevent accidental matching
96 if len(c) > 0:
97 if len(c) > 1:
98 r += "|" + "(?:" + prepare(c, 1) + ")"
99
100 r += "|" + "(?:" + c + "[^" + flatten(d) + "]" + ")"
101
102 for s in strings:
103 g = ""
104
105 # When the common string is > 0, offset with len(c) + 1 because we handled this earlier
106 if len(c) > 0:
107 g = prepare(s, len(c) + 1)
108 else:
109 g = prepare(s, 1)
110
111 # Add OR boolean if necessary
112 if len(g) > 0:
113 r += "|"
114
115 r += g
116
117 print(args.prefix + "(?:" + r + ")" + args.suffix)
118
119 # Only run if script is called directly
120 if __name__ == "__main__":
121 run()
122
[end of util/regexp-tricks/negative-lookahead.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/util/regexp-tricks/negative-lookahead.py b/util/regexp-tricks/negative-lookahead.py
--- a/util/regexp-tricks/negative-lookahead.py
+++ b/util/regexp-tricks/negative-lookahead.py
@@ -21,7 +21,7 @@
args = parser.parse_args()
# Return the longest prefix of all list elements. Shamelessly copied from:
-# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings
+# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
| {"golden_diff": "diff --git a/util/regexp-tricks/negative-lookahead.py b/util/regexp-tricks/negative-lookahead.py\n--- a/util/regexp-tricks/negative-lookahead.py\n+++ b/util/regexp-tricks/negative-lookahead.py\n@@ -21,7 +21,7 @@\n args = parser.parse_args()\n \n # Return the longest prefix of all list elements. Shamelessly copied from:\n-# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\n+# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings\n def commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n", "issue": "Review links to OWASP wiki\n### Describe the bug\r\n\r\nWe have references to other OWASP projects in our files:\r\n\r\n```\r\nrules/REQUEST-933-APPLICATION-ATTACK-PHP.conf\r\n28:# https://www.owasp.org/index.php/PHP_Top_5#P1:_Remote_Code_Executionh\r\n366:# https://www.owasp.org/index.php/PHP_Object_Injection\r\n\r\nrules/REQUEST-921-PROTOCOL-ATTACK.conf\r\n194:# Reference: https://www.owasp.org/index.php/Testing_for_HTTP_Splitting/Smuggling_(OTG-INPVAL-016)\r\n\r\nrules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf\r\n97:# https://www.owasp.org/index.php/ModSecurity_CRS_RuleID-96000\r\n\r\nCHANGES.md\r\n977: https://www.owasp.org/index.php/AppSensor_DetectionPoints\r\n\r\nrules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\r\n690:# https://www.owasp.org/index.php/Unrestricted_File_Upload\r\n\r\nrules/scanners-user-agents.data\r\n58:# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project\r\n```\r\nWe need to double check they are still valid and update if not.\n", "before_files": [{"content": "import argparse\n\n# WARNING: This script is EXPERIMENTAL. Use with caution.\n#\n# Known issues:\n# * At the moment, it will probably not work with more than two strings.\n#\n# Known limitations:\n# * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,\n# make sure that subtrings of the negative lookahead are not harmful in any way.\n\nparser = argparse.ArgumentParser(description=\"This script takes a list of strings and converts them into \\\n a regex that acts like a negative lookahead\")\nparser.add_argument(\"strings\", type=str, nargs='+',\n help=\"the strings to convert into a negative lookahead\")\nparser.add_argument(\"--prefix\", type=str, default=\"\",\n help=\"sets a prefix for the resulting regex\")\nparser.add_argument(\"--suffix\", type=str, default=\"\",\n help=\"sets a suffix for the resulting regex\")\n\nargs = parser.parse_args()\n\n# Return the longest prefix of all list elements. Shamelessly copied from:\n# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\ndef commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1\n\n# flatten returns a string with concatenated dictionary keys\ndef flatten(dict):\n s = \"\"\n\n for key in dict.keys():\n s += key\n\n return s\n\n# set returns a character set containing the unique characters across all strings for the given index\ndef set(strings, index, flags):\n dict = {}\n\n for s in strings:\n # Continue so we don't panic\n if index > len(s) -1:\n continue\n \n dict[s[index]] = ''\n \n return \"[\" + flags + flatten(dict) + \"]\"\n\n# prepare converts a string for negative lookaheads emulation\ndef prepare(s, offset):\n r = \"\"\n\n if len(s) == 0:\n return r\n\n for i in range(offset, len(s)):\n for j in range(0, i + 1):\n if j == i:\n r += \"[^\" + s[j] + \"]\"\n else:\n r += s[j]\n\n if i != len(s) - 1:\n r += \"|\"\n\n return r\n\n# run runs the \ndef run():\n strings = args.strings\n\n r = \"\"\n r += set(strings, 0, \"^\")\n\n c = \"\"\n d = {}\n\n # Only find common string if we have more than one\n if len(strings) > 1:\n c = commonprefix(strings)\n \n # Collect all characters after the common substring from every string\n for s in strings:\n if len(s) > len(c) and s.startswith(c):\n d[s[len(c)]] = ''\n\n # Add the common string to the regex to prevent accidental matching\n if len(c) > 0:\n if len(c) > 1:\n r += \"|\" + \"(?:\" + prepare(c, 1) + \")\"\n\n r += \"|\" + \"(?:\" + c + \"[^\" + flatten(d) + \"]\" + \")\"\n\n for s in strings:\n g = \"\"\n\n # When the common string is > 0, offset with len(c) + 1 because we handled this earlier\n if len(c) > 0:\n g = prepare(s, len(c) + 1)\n else:\n g = prepare(s, 1)\n \n # Add OR boolean if necessary\n if len(g) > 0:\n r += \"|\"\n\n r += g\n\n print(args.prefix + \"(?:\" + r + \")\" + args.suffix)\n\n# Only run if script is called directly\nif __name__ == \"__main__\":\n run()\n", "path": "util/regexp-tricks/negative-lookahead.py"}]} | 1,962 | 170 |
gh_patches_debug_15909 | rasdani/github-patches | git_diff | mindsdb__mindsdb-3116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Error if upload file with not valid symbol
Here is example of such file: https://www.kaggle.com/datasets/fedesoriano/covid19-effect-on-liver-cancer-prediction-dataset?select=covid-liver.csv
Column `Type_of_incidental_finding` contains not valid symbols.
</issue>
<code>
[start of mindsdb/integrations/handlers/file_handler/file_handler.py]
1 from io import BytesIO, StringIO
2 import os
3 import csv
4 import json
5 import codecs
6 import traceback
7 import tempfile
8 from urllib.parse import urlparse
9
10 import requests
11 import pandas as pd
12 from charset_normalizer import from_bytes
13
14 from mindsdb_sql import parse_sql
15 from mindsdb_sql.parser.ast.base import ASTNode
16 from mindsdb_sql.parser.ast import DropTables, Select
17
18 from mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df
19 from mindsdb.integrations.libs.base_handler import DatabaseHandler
20 from mindsdb.integrations.libs.response import (
21 HandlerStatusResponse as StatusResponse,
22 HandlerResponse as Response,
23 RESPONSE_TYPE
24 )
25
26
27 def clean_row(row):
28 n_row = []
29 for cell in row:
30 if str(cell) in ['', ' ', ' ', 'NaN', 'nan', 'NA']:
31 n_row.append(None)
32 else:
33 n_row.append(cell)
34
35 return n_row
36
37
38 class FileHandler(DatabaseHandler):
39 """
40 Handler for files
41 """
42 name = 'files'
43
44 def __init__(self, name=None, file_storage=None, connection_data={}, file_controller=None):
45 super().__init__(name)
46 self.parser = parse_sql
47 self.fs_store = file_storage
48 self.custom_parser = connection_data.get('custom_parser', None)
49 self.clean_rows = connection_data.get('clean_rows', True)
50 self.file_controller = file_controller
51
52 def connect(self, **kwargs):
53 return
54
55 def disconnect(self, **kwargs):
56 return
57
58 def check_connection(self) -> StatusResponse:
59 return StatusResponse(True)
60
61 def query(self, query: ASTNode) -> Response:
62 if type(query) == DropTables:
63 for table_identifier in query.tables:
64 if len(table_identifier.parts) == 2 and table_identifier.parts[0] != self.name:
65 return Response(
66 RESPONSE_TYPE.ERROR,
67 error_message=f"Can't delete table from database '{table_identifier.parts[0]}'"
68 )
69 table_name = table_identifier.parts[-1]
70 try:
71 self.file_controller.delete_file(table_name)
72 except Exception as e:
73 return Response(
74 RESPONSE_TYPE.ERROR,
75 error_message=f"Can't delete table '{table_name}': {e}"
76 )
77 return Response(RESPONSE_TYPE.OK)
78 elif type(query) == Select:
79 table_name = query.from_table.parts[-1]
80 file_path = self.file_controller.get_file_path(table_name)
81 df, _columns = self._handle_source(file_path, self.clean_rows, self.custom_parser)
82 result_df = query_df(df, query)
83 return Response(
84 RESPONSE_TYPE.TABLE,
85 data_frame=result_df
86 )
87 else:
88 return Response(
89 RESPONSE_TYPE.ERROR,
90 error_message="Only 'select' and 'drop' queries allowed for files"
91 )
92
93 def native_query(self, query: str) -> Response:
94 ast = self.parser(query, dialect='mindsdb')
95 return self.query(ast)
96
97 @staticmethod
98 def _handle_source(file_path, clean_rows=True, custom_parser=None):
99 # get file data io, format and dialect
100 data, fmt, dialect = FileHandler._get_data_io(file_path)
101 data.seek(0) # make sure we are at 0 in file pointer
102
103 if custom_parser:
104 header, file_data = custom_parser(data, fmt)
105
106 elif fmt == 'csv':
107 csv_reader = list(csv.reader(data, dialect))
108 header = csv_reader[0]
109 file_data = csv_reader[1:]
110
111 elif fmt in ['xlsx', 'xls']:
112 data.seek(0)
113 df = pd.read_excel(data)
114 header = df.columns.values.tolist()
115 file_data = df.values.tolist()
116
117 elif fmt == 'json':
118 data.seek(0)
119 json_doc = json.loads(data.read())
120 df = pd.json_normalize(json_doc, max_level=0)
121 header = df.columns.values.tolist()
122 file_data = df.values.tolist()
123
124 else:
125 raise ValueError('Could not load file into any format, supported formats are csv, json, xls, xlsx')
126
127 if clean_rows:
128 file_list_data = [clean_row(row) for row in file_data]
129 else:
130 file_list_data = file_data
131
132 header = [x.strip() for x in header]
133 col_map = dict((col, col) for col in header)
134 return pd.DataFrame(file_list_data, columns=header), col_map
135
136 @staticmethod
137 def _get_data_io(file_path):
138 """
139 This gets a file either url or local file and defines what the format is as well as dialect
140 :param file: file path or url
141 :return: data_io, format, dialect
142 """
143
144 ############
145 # get file as io object
146 ############
147
148 # file_path = self._get_file_path()
149
150 data = BytesIO()
151
152 try:
153 with open(file_path, 'rb') as fp:
154 data = BytesIO(fp.read())
155 except Exception as e:
156 error = 'Could not load file, possible exception : {exception}'.format(exception=e)
157 print(error)
158 raise ValueError(error)
159
160 dialect = None
161
162 ############
163 # check for file type
164 ############
165
166 # try to guess if its an excel file
167 xlsx_sig = b'\x50\x4B\x05\06'
168 # xlsx_sig2 = b'\x50\x4B\x03\x04'
169 xls_sig = b'\x09\x08\x10\x00\x00\x06\x05\x00'
170
171 # different whence, offset, size for different types
172 excel_meta = [('xls', 0, 512, 8), ('xlsx', 2, -22, 4)]
173
174 for filename, whence, offset, size in excel_meta:
175
176 try:
177 data.seek(offset, whence) # Seek to the offset.
178 bytes = data.read(size) # Capture the specified number of bytes.
179 data.seek(0)
180 codecs.getencoder('hex')(bytes)
181
182 if bytes == xls_sig:
183 return data, 'xls', dialect
184 elif bytes == xlsx_sig:
185 return data, 'xlsx', dialect
186
187 except Exception:
188 data.seek(0)
189
190 # if not excel it can be a json file or a CSV, convert from binary to stringio
191
192 byte_str = data.read()
193 # Move it to StringIO
194 try:
195 # Handle Microsoft's BOM "special" UTF-8 encoding
196 if byte_str.startswith(codecs.BOM_UTF8):
197 data = StringIO(byte_str.decode('utf-8-sig'))
198 else:
199 file_encoding_meta = from_bytes(
200 byte_str[:32 * 1024],
201 steps=32, # Number of steps/block to extract from my_byte_str
202 chunk_size=1024, # Set block size of each extraction)
203 explain=False
204 )
205 best_meta = file_encoding_meta.best()
206 if best_meta is not None:
207 encoding = file_encoding_meta.best().encoding
208 else:
209 encoding = 'utf-8'
210 data = StringIO(byte_str.decode(encoding))
211 except Exception:
212 print(traceback.format_exc())
213 print('Could not load into string')
214
215 # see if its JSON
216 buffer = data.read(100)
217 data.seek(0)
218 text = buffer.strip()
219 # analyze first n characters
220 if len(text) > 0:
221 text = text.strip()
222 # it it looks like a json, then try to parse it
223 if text.startswith('{') or text.startswith('['):
224 try:
225 json.loads(data.read())
226 data.seek(0)
227 return data, 'json', dialect
228 except Exception:
229 data.seek(0)
230 return data, None, dialect
231
232 # lets try to figure out if its a csv
233 try:
234 dialect = FileHandler._get_csv_dialect(data)
235 if dialect:
236 return data, 'csv', dialect
237 return data, None, dialect
238 except Exception:
239 data.seek(0)
240 print('Could not detect format for this file')
241 print(traceback.format_exc())
242 # No file type identified
243 return data, None, dialect
244
245 @staticmethod
246 def _get_file_path(path) -> str:
247 try:
248 is_url = urlparse(path).scheme in ('http', 'https')
249 except Exception:
250 is_url = False
251 if is_url:
252 path = FileHandler._fetch_url(path)
253 return path
254
255 @staticmethod
256 def _get_csv_dialect(buffer) -> csv.Dialect:
257 sample = buffer.read(128 * 1024)
258 buffer.seek(0)
259 try:
260 accepted_csv_delimiters = [',', '\t', ';']
261 dialect = csv.Sniffer().sniff(sample, delimiters=accepted_csv_delimiters)
262 except csv.Error:
263 dialect = None
264 return dialect
265
266 @staticmethod
267 def _fetch_url(url: str) -> str:
268 temp_dir = tempfile.mkdtemp(prefix='mindsdb_file_url_')
269 try:
270 r = requests.get(url, stream=True)
271 if r.status_code == 200:
272 with open(os.path.join(temp_dir, 'file'), 'wb') as f:
273 for chunk in r:
274 f.write(chunk)
275 else:
276 raise Exception(f'Responce status code is {r.status_code}')
277 except Exception as e:
278 print(f'Error during getting {url}')
279 print(e)
280 raise
281 return os.path.join(temp_dir, 'file')
282
283 def get_tables(self) -> Response:
284 """
285 List all files
286 """
287 files_meta = self.file_controller.get_files()
288 data = [{
289 'TABLE_NAME': x['name'],
290 'TABLE_ROWS': x['row_count']
291 } for x in files_meta]
292 return Response(
293 RESPONSE_TYPE.TABLE,
294 data_frame=pd.DataFrame(data)
295 )
296
297 def get_columns(self, table_name) -> Response:
298 file_meta = self.file_controller.get_file_meta(table_name)
299 result = Response(
300 RESPONSE_TYPE.TABLE,
301 data_frame=pd.DataFrame([
302 {
303 'Field': x.strip(),
304 'Type': 'str'
305 } for x in file_meta['columns']
306 ])
307 )
308 return result
309
[end of mindsdb/integrations/handlers/file_handler/file_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/integrations/handlers/file_handler/file_handler.py b/mindsdb/integrations/handlers/file_handler/file_handler.py
--- a/mindsdb/integrations/handlers/file_handler/file_handler.py
+++ b/mindsdb/integrations/handlers/file_handler/file_handler.py
@@ -203,11 +203,13 @@
explain=False
)
best_meta = file_encoding_meta.best()
+ errors = 'strict'
if best_meta is not None:
encoding = file_encoding_meta.best().encoding
else:
encoding = 'utf-8'
- data = StringIO(byte_str.decode(encoding))
+ errors = 'replace'
+ data = StringIO(byte_str.decode(encoding, errors))
except Exception:
print(traceback.format_exc())
print('Could not load into string')
| {"golden_diff": "diff --git a/mindsdb/integrations/handlers/file_handler/file_handler.py b/mindsdb/integrations/handlers/file_handler/file_handler.py\n--- a/mindsdb/integrations/handlers/file_handler/file_handler.py\n+++ b/mindsdb/integrations/handlers/file_handler/file_handler.py\n@@ -203,11 +203,13 @@\n explain=False\n )\n best_meta = file_encoding_meta.best()\n+ errors = 'strict'\n if best_meta is not None:\n encoding = file_encoding_meta.best().encoding\n else:\n encoding = 'utf-8'\n- data = StringIO(byte_str.decode(encoding))\n+ errors = 'replace'\n+ data = StringIO(byte_str.decode(encoding, errors))\n except Exception:\n print(traceback.format_exc())\n print('Could not load into string')\n", "issue": "[BUG] Error if upload file with not valid symbol\nHere is example of such file: https://www.kaggle.com/datasets/fedesoriano/covid19-effect-on-liver-cancer-prediction-dataset?select=covid-liver.csv\r\nColumn `Type_of_incidental_finding` contains not valid symbols. \n", "before_files": [{"content": "from io import BytesIO, StringIO\nimport os\nimport csv\nimport json\nimport codecs\nimport traceback\nimport tempfile\nfrom urllib.parse import urlparse\n\nimport requests\nimport pandas as pd\nfrom charset_normalizer import from_bytes\n\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast.base import ASTNode\nfrom mindsdb_sql.parser.ast import DropTables, Select\n\nfrom mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\n\n\ndef clean_row(row):\n n_row = []\n for cell in row:\n if str(cell) in ['', ' ', ' ', 'NaN', 'nan', 'NA']:\n n_row.append(None)\n else:\n n_row.append(cell)\n\n return n_row\n\n\nclass FileHandler(DatabaseHandler):\n \"\"\"\n Handler for files\n \"\"\"\n name = 'files'\n\n def __init__(self, name=None, file_storage=None, connection_data={}, file_controller=None):\n super().__init__(name)\n self.parser = parse_sql\n self.fs_store = file_storage\n self.custom_parser = connection_data.get('custom_parser', None)\n self.clean_rows = connection_data.get('clean_rows', True)\n self.file_controller = file_controller\n\n def connect(self, **kwargs):\n return\n\n def disconnect(self, **kwargs):\n return\n\n def check_connection(self) -> StatusResponse:\n return StatusResponse(True)\n\n def query(self, query: ASTNode) -> Response:\n if type(query) == DropTables:\n for table_identifier in query.tables:\n if len(table_identifier.parts) == 2 and table_identifier.parts[0] != self.name:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Can't delete table from database '{table_identifier.parts[0]}'\"\n )\n table_name = table_identifier.parts[-1]\n try:\n self.file_controller.delete_file(table_name)\n except Exception as e:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Can't delete table '{table_name}': {e}\"\n )\n return Response(RESPONSE_TYPE.OK)\n elif type(query) == Select:\n table_name = query.from_table.parts[-1]\n file_path = self.file_controller.get_file_path(table_name)\n df, _columns = self._handle_source(file_path, self.clean_rows, self.custom_parser)\n result_df = query_df(df, query)\n return Response(\n RESPONSE_TYPE.TABLE,\n data_frame=result_df\n )\n else:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=\"Only 'select' and 'drop' queries allowed for files\"\n )\n\n def native_query(self, query: str) -> Response:\n ast = self.parser(query, dialect='mindsdb')\n return self.query(ast)\n\n @staticmethod\n def _handle_source(file_path, clean_rows=True, custom_parser=None):\n # get file data io, format and dialect\n data, fmt, dialect = FileHandler._get_data_io(file_path)\n data.seek(0) # make sure we are at 0 in file pointer\n\n if custom_parser:\n header, file_data = custom_parser(data, fmt)\n\n elif fmt == 'csv':\n csv_reader = list(csv.reader(data, dialect))\n header = csv_reader[0]\n file_data = csv_reader[1:]\n\n elif fmt in ['xlsx', 'xls']:\n data.seek(0)\n df = pd.read_excel(data)\n header = df.columns.values.tolist()\n file_data = df.values.tolist()\n\n elif fmt == 'json':\n data.seek(0)\n json_doc = json.loads(data.read())\n df = pd.json_normalize(json_doc, max_level=0)\n header = df.columns.values.tolist()\n file_data = df.values.tolist()\n\n else:\n raise ValueError('Could not load file into any format, supported formats are csv, json, xls, xlsx')\n\n if clean_rows:\n file_list_data = [clean_row(row) for row in file_data]\n else:\n file_list_data = file_data\n\n header = [x.strip() for x in header]\n col_map = dict((col, col) for col in header)\n return pd.DataFrame(file_list_data, columns=header), col_map\n\n @staticmethod\n def _get_data_io(file_path):\n \"\"\"\n This gets a file either url or local file and defines what the format is as well as dialect\n :param file: file path or url\n :return: data_io, format, dialect\n \"\"\"\n\n ############\n # get file as io object\n ############\n\n # file_path = self._get_file_path()\n\n data = BytesIO()\n\n try:\n with open(file_path, 'rb') as fp:\n data = BytesIO(fp.read())\n except Exception as e:\n error = 'Could not load file, possible exception : {exception}'.format(exception=e)\n print(error)\n raise ValueError(error)\n\n dialect = None\n\n ############\n # check for file type\n ############\n\n # try to guess if its an excel file\n xlsx_sig = b'\\x50\\x4B\\x05\\06'\n # xlsx_sig2 = b'\\x50\\x4B\\x03\\x04'\n xls_sig = b'\\x09\\x08\\x10\\x00\\x00\\x06\\x05\\x00'\n\n # different whence, offset, size for different types\n excel_meta = [('xls', 0, 512, 8), ('xlsx', 2, -22, 4)]\n\n for filename, whence, offset, size in excel_meta:\n\n try:\n data.seek(offset, whence) # Seek to the offset.\n bytes = data.read(size) # Capture the specified number of bytes.\n data.seek(0)\n codecs.getencoder('hex')(bytes)\n\n if bytes == xls_sig:\n return data, 'xls', dialect\n elif bytes == xlsx_sig:\n return data, 'xlsx', dialect\n\n except Exception:\n data.seek(0)\n\n # if not excel it can be a json file or a CSV, convert from binary to stringio\n\n byte_str = data.read()\n # Move it to StringIO\n try:\n # Handle Microsoft's BOM \"special\" UTF-8 encoding\n if byte_str.startswith(codecs.BOM_UTF8):\n data = StringIO(byte_str.decode('utf-8-sig'))\n else:\n file_encoding_meta = from_bytes(\n byte_str[:32 * 1024],\n steps=32, # Number of steps/block to extract from my_byte_str\n chunk_size=1024, # Set block size of each extraction)\n explain=False\n )\n best_meta = file_encoding_meta.best()\n if best_meta is not None:\n encoding = file_encoding_meta.best().encoding\n else:\n encoding = 'utf-8'\n data = StringIO(byte_str.decode(encoding))\n except Exception:\n print(traceback.format_exc())\n print('Could not load into string')\n\n # see if its JSON\n buffer = data.read(100)\n data.seek(0)\n text = buffer.strip()\n # analyze first n characters\n if len(text) > 0:\n text = text.strip()\n # it it looks like a json, then try to parse it\n if text.startswith('{') or text.startswith('['):\n try:\n json.loads(data.read())\n data.seek(0)\n return data, 'json', dialect\n except Exception:\n data.seek(0)\n return data, None, dialect\n\n # lets try to figure out if its a csv\n try:\n dialect = FileHandler._get_csv_dialect(data)\n if dialect:\n return data, 'csv', dialect\n return data, None, dialect\n except Exception:\n data.seek(0)\n print('Could not detect format for this file')\n print(traceback.format_exc())\n # No file type identified\n return data, None, dialect\n\n @staticmethod\n def _get_file_path(path) -> str:\n try:\n is_url = urlparse(path).scheme in ('http', 'https')\n except Exception:\n is_url = False\n if is_url:\n path = FileHandler._fetch_url(path)\n return path\n\n @staticmethod\n def _get_csv_dialect(buffer) -> csv.Dialect:\n sample = buffer.read(128 * 1024)\n buffer.seek(0)\n try:\n accepted_csv_delimiters = [',', '\\t', ';']\n dialect = csv.Sniffer().sniff(sample, delimiters=accepted_csv_delimiters)\n except csv.Error:\n dialect = None\n return dialect\n\n @staticmethod\n def _fetch_url(url: str) -> str:\n temp_dir = tempfile.mkdtemp(prefix='mindsdb_file_url_')\n try:\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(os.path.join(temp_dir, 'file'), 'wb') as f:\n for chunk in r:\n f.write(chunk)\n else:\n raise Exception(f'Responce status code is {r.status_code}')\n except Exception as e:\n print(f'Error during getting {url}')\n print(e)\n raise\n return os.path.join(temp_dir, 'file')\n\n def get_tables(self) -> Response:\n \"\"\"\n List all files\n \"\"\"\n files_meta = self.file_controller.get_files()\n data = [{\n 'TABLE_NAME': x['name'],\n 'TABLE_ROWS': x['row_count']\n } for x in files_meta]\n return Response(\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame(data)\n )\n\n def get_columns(self, table_name) -> Response:\n file_meta = self.file_controller.get_file_meta(table_name)\n result = Response(\n RESPONSE_TYPE.TABLE,\n data_frame=pd.DataFrame([\n {\n 'Field': x.strip(),\n 'Type': 'str'\n } for x in file_meta['columns']\n ])\n )\n return result\n", "path": "mindsdb/integrations/handlers/file_handler/file_handler.py"}]} | 3,677 | 184 |
gh_patches_debug_26830 | rasdani/github-patches | git_diff | nilearn__nilearn-1219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sklearn.covariance.LedoitWolf or ConnectivityMeasure in plot_adhd_spheres
`ConnectivityMeasure` can be used here, and its default covariance estimator is `LedoitWolf`from `sklearn.covariance`.
I also prefer using partial correlations rather than precision, because no need for negating the connections.
</issue>
<code>
[start of examples/03_connectivity/plot_adhd_spheres.py]
1 """
2 Extracting brain signal from spheres
3 ====================================
4
5 This example extract brain signals from spheres described by the coordinates
6 of their center in MNI space and a given radius in millimeters. In particular,
7 this example extracts signals from Default Mode Network regions and compute a
8 connectome from them.
9
10 """
11
12 ##########################################################################
13 # Retrieve the dataset
14 from nilearn import datasets
15 adhd_dataset = datasets.fetch_adhd(n_subjects=1)
16
17 # print basic information on the dataset
18 print('First subject functional nifti image (4D) is at: %s' %
19 adhd_dataset.func[0]) # 4D data
20
21
22 ##########################################################################
23 # Coordinates of Default Mode Network
24 dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]
25 labels = [
26 'Posterior Cingulate Cortex',
27 'Left Temporoparietal junction',
28 'Right Temporoparietal junction',
29 'Medial prefrontal cortex'
30 ]
31
32
33 ##########################################################################
34 # Extracts signal from sphere around DMN seeds
35 from nilearn import input_data
36
37 masker = input_data.NiftiSpheresMasker(
38 dmn_coords, radius=8,
39 detrend=True, standardize=True,
40 low_pass=0.1, high_pass=0.01, t_r=2.5,
41 memory='nilearn_cache', memory_level=1, verbose=2)
42
43 func_filename = adhd_dataset.func[0]
44 confound_filename = adhd_dataset.confounds[0]
45
46 time_series = masker.fit_transform(func_filename,
47 confounds=[confound_filename])
48
49 ##########################################################################
50 # Display time series
51 import matplotlib.pyplot as plt
52 for time_serie, label in zip(time_series.T, labels):
53 plt.plot(time_serie, label=label)
54
55 plt.title('Default Mode Network Time Series')
56 plt.xlabel('Scan number')
57 plt.ylabel('Normalized signal')
58 plt.legend()
59 plt.tight_layout()
60
61
62 ##########################################################################
63 # Compute precision matrices
64 from sklearn.covariance import LedoitWolf
65 cve = LedoitWolf()
66 cve.fit(time_series)
67
68
69 ##########################################################################
70 # Display connectome
71 from nilearn import plotting
72
73 plotting.plot_connectome(cve.precision_, dmn_coords,
74 title="Default Mode Network Connectivity")
75
76 # Display connectome with hemispheric projections.
77 # Notice (0, -52, 18) is included in both hemispheres since x == 0.
78 title = "Connectivity projected on hemispheres"
79 plotting.plot_connectome(cve.precision_, dmn_coords, title=title,
80 display_mode='lyrz')
81
82 plotting.show()
83
[end of examples/03_connectivity/plot_adhd_spheres.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py
--- a/examples/03_connectivity/plot_adhd_spheres.py
+++ b/examples/03_connectivity/plot_adhd_spheres.py
@@ -60,23 +60,25 @@
##########################################################################
-# Compute precision matrices
-from sklearn.covariance import LedoitWolf
-cve = LedoitWolf()
-cve.fit(time_series)
-
+# Compute partial correlation matrix using object
+# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance
+# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.
+from nilearn.connectome import ConnectivityMeasure
+connectivity_measure = ConnectivityMeasure(kind='partial correlation')
+partial_correlation_matrix = connectivity_measure.fit_transform(
+ [time_series])[0]
##########################################################################
# Display connectome
from nilearn import plotting
-plotting.plot_connectome(cve.precision_, dmn_coords,
+plotting.plot_connectome(partial_correlation_matrix, dmn_coords,
title="Default Mode Network Connectivity")
# Display connectome with hemispheric projections.
# Notice (0, -52, 18) is included in both hemispheres since x == 0.
title = "Connectivity projected on hemispheres"
-plotting.plot_connectome(cve.precision_, dmn_coords, title=title,
+plotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,
display_mode='lyrz')
plotting.show()
| {"golden_diff": "diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py\n--- a/examples/03_connectivity/plot_adhd_spheres.py\n+++ b/examples/03_connectivity/plot_adhd_spheres.py\n@@ -60,23 +60,25 @@\n \n \n ##########################################################################\n-# Compute precision matrices\n-from sklearn.covariance import LedoitWolf\n-cve = LedoitWolf()\n-cve.fit(time_series)\n-\n+# Compute partial correlation matrix using object\n+# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance\n+# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.\n+from nilearn.connectome import ConnectivityMeasure\n+connectivity_measure = ConnectivityMeasure(kind='partial correlation')\n+partial_correlation_matrix = connectivity_measure.fit_transform(\n+ [time_series])[0]\n \n ##########################################################################\n # Display connectome\n from nilearn import plotting\n \n-plotting.plot_connectome(cve.precision_, dmn_coords,\n+plotting.plot_connectome(partial_correlation_matrix, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n \n # Display connectome with hemispheric projections.\n # Notice (0, -52, 18) is included in both hemispheres since x == 0.\n title = \"Connectivity projected on hemispheres\"\n-plotting.plot_connectome(cve.precision_, dmn_coords, title=title,\n+plotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,\n display_mode='lyrz')\n \n plotting.show()\n", "issue": "sklearn.covariance.LedoitWolf or ConnectivityMeasure in plot_adhd_spheres\n`ConnectivityMeasure` can be used here, and its default covariance estimator is `LedoitWolf`from `sklearn.covariance`.\nI also prefer using partial correlations rather than precision, because no need for negating the connections.\n\n", "before_files": [{"content": "\"\"\"\nExtracting brain signal from spheres\n====================================\n\nThis example extract brain signals from spheres described by the coordinates\nof their center in MNI space and a given radius in millimeters. In particular,\nthis example extracts signals from Default Mode Network regions and compute a\nconnectome from them.\n\n\"\"\"\n\n##########################################################################\n# Retrieve the dataset\nfrom nilearn import datasets\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n adhd_dataset.func[0]) # 4D data\n\n\n##########################################################################\n# Coordinates of Default Mode Network\ndmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]\nlabels = [\n 'Posterior Cingulate Cortex',\n 'Left Temporoparietal junction',\n 'Right Temporoparietal junction',\n 'Medial prefrontal cortex'\n]\n\n\n##########################################################################\n# Extracts signal from sphere around DMN seeds\nfrom nilearn import input_data\n\nmasker = input_data.NiftiSpheresMasker(\n dmn_coords, radius=8,\n detrend=True, standardize=True,\n low_pass=0.1, high_pass=0.01, t_r=2.5,\n memory='nilearn_cache', memory_level=1, verbose=2)\n\nfunc_filename = adhd_dataset.func[0]\nconfound_filename = adhd_dataset.confounds[0]\n\ntime_series = masker.fit_transform(func_filename,\n confounds=[confound_filename])\n\n##########################################################################\n# Display time series\nimport matplotlib.pyplot as plt\nfor time_serie, label in zip(time_series.T, labels):\n plt.plot(time_serie, label=label)\n\nplt.title('Default Mode Network Time Series')\nplt.xlabel('Scan number')\nplt.ylabel('Normalized signal')\nplt.legend()\nplt.tight_layout()\n\n\n##########################################################################\n# Compute precision matrices\nfrom sklearn.covariance import LedoitWolf\ncve = LedoitWolf()\ncve.fit(time_series)\n\n\n##########################################################################\n# Display connectome\nfrom nilearn import plotting\n\nplotting.plot_connectome(cve.precision_, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n\n# Display connectome with hemispheric projections.\n# Notice (0, -52, 18) is included in both hemispheres since x == 0.\ntitle = \"Connectivity projected on hemispheres\"\nplotting.plot_connectome(cve.precision_, dmn_coords, title=title,\n display_mode='lyrz')\n\nplotting.show()\n", "path": "examples/03_connectivity/plot_adhd_spheres.py"}]} | 1,358 | 341 |
gh_patches_debug_15405 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some modules are missing from the top-level import
Issue referenced during PR here: https://github.com/pyjanitor-devs/pyjanitor/pull/977#discussion_r781732964
For example, now running:
```python
import janitor as jn
jn.io.read_csvs("") # throws AttributeError: module 'janitor' has no attribute 'io'
```
Similarly for other modules like biology or timeseries.
</issue>
<code>
[start of janitor/__init__.py]
1 """Top-level janitor API lives here."""
2 try:
3 import janitor.xarray # noqa: F401
4 except ImportError:
5 pass
6
7 from .functions import * # noqa: F403, F401
8 from .math import * # noqa: F403, F401
9 from .ml import get_features_targets as _get_features_targets
10 from .utils import refactored_function
11 from .accessors import * # noqa: F403, F401
12
13
14 @refactored_function(
15 "get_features_targets() has moved. Please use ml.get_features_targets()."
16 )
17 def get_features_targets(*args, **kwargs):
18 """Wrapper for get_features_targets."""
19 return _get_features_targets(*args, **kwargs)
20
21
22 __version__ = "0.22.0"
23
[end of janitor/__init__.py]
[start of janitor/io.py]
1 import os
2 import subprocess
3 from glob import glob
4 from io import StringIO
5 from typing import Iterable, Union
6
7 import pandas as pd
8
9 from .errors import JanitorError
10 from .utils import deprecated_alias, check
11
12
13 @deprecated_alias(seperate_df="separate_df", filespath="files_path")
14 def read_csvs(
15 files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs
16 ) -> Union[pd.DataFrame, dict]:
17 """
18 Read multiple CSV files and return a dictionary of DataFrames, or
19 one concatenated DataFrame.
20
21 :param files_path: The filepath pattern matching the CSV files.
22 Accepts regular expressions, with or without `.csv` extension.
23 Also accepts iterable of file paths.
24 :param separate_df: If `False` (default), returns a single Dataframe
25 with the concatenation of the csv files.
26 If `True`, returns a dictionary of separate DataFrames
27 for each CSV file.
28 :param kwargs: Keyword arguments to pass into the
29 original pandas `read_csv`.
30 :returns: DataFrame of concatenated DataFrames or dictionary of DataFrames.
31 :raises JanitorError: if `None` provided for `files_path`.
32 :raises JanitorError: if length of `files_path` is `0`.
33 :raises ValueError: if no CSV files exist in `files_path`.
34 :raises ValueError: if columns in input CSV files do not match.
35 """
36 # Sanitize input
37 if files_path is None:
38 raise JanitorError("`None` provided for `files_path`")
39 if len(files_path) == 0:
40 raise JanitorError("0 length `files_path` provided")
41
42 # Read the csv files
43 # String to file/folder or file pattern provided
44 if isinstance(files_path, str):
45 dfs_dict = {
46 os.path.basename(f): pd.read_csv(f, **kwargs)
47 for f in glob(files_path)
48 }
49 # Iterable of file paths provided
50 else:
51 dfs_dict = {
52 os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path
53 }
54 # Check if dataframes have been read
55 if len(dfs_dict) == 0:
56 raise ValueError("No CSV files to read with the given `files_path`")
57 # Concatenate the dataframes if requested (default)
58 col_names = list(dfs_dict.values())[0].columns # noqa: PD011
59 if not separate_df:
60 # If columns do not match raise an error
61 for df in dfs_dict.values(): # noqa: PD011
62 if not all(df.columns == col_names):
63 raise ValueError(
64 "Columns in input CSV files do not match."
65 "Files cannot be concatenated"
66 )
67 return pd.concat(
68 list(dfs_dict.values()),
69 ignore_index=True,
70 sort=False, # noqa: PD011
71 )
72 else:
73 return dfs_dict
74
75
76 def read_commandline(cmd: str, **kwargs) -> pd.DataFrame:
77 """
78 Read a CSV file based on a command-line command.
79
80 For example, you may wish to run the following command on `sep-quarter.csv`
81 before reading it into a pandas DataFrame:
82
83 ```bash
84 cat sep-quarter.csv | grep .SEA1AA
85 ```
86
87 In this case, you can use the following Python code to load the dataframe:
88
89 ```python
90 import janitor as jn
91 df = jn.io.read_commandline("cat data.csv | grep .SEA1AA")
92
93 This function assumes that your command line command will return
94 an output that is parsable using pandas.read_csv and StringIO.
95 We default to using pd.read_csv underneath the hood.
96 Keyword arguments are passed through to read_csv.
97 ```
98
99 :param cmd: Shell command to preprocess a file on disk.
100 :param kwargs: Keyword arguments that are passed through to pd.read_csv().
101 :raises JanitorError: If commandline command is malformed or invalid.
102 :returns: A pandas DataFrame parsed from the stdout of the underlying
103 shell.
104 """
105
106 check("cmd", cmd, [str])
107 outcome = subprocess.run(cmd, shell=True, capture_output=True, text=True)
108 if outcome.returncode != 0:
109 raise JanitorError(outcome.stderr)
110 else:
111 outcome = outcome.stdout
112 return pd.read_csv(StringIO(outcome), **kwargs)
113
[end of janitor/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/__init__.py b/janitor/__init__.py
--- a/janitor/__init__.py
+++ b/janitor/__init__.py
@@ -5,6 +5,7 @@
pass
from .functions import * # noqa: F403, F401
+from .io import * # noqa: F403, F401
from .math import * # noqa: F403, F401
from .ml import get_features_targets as _get_features_targets
from .utils import refactored_function
diff --git a/janitor/io.py b/janitor/io.py
--- a/janitor/io.py
+++ b/janitor/io.py
@@ -88,7 +88,7 @@
```python
import janitor as jn
- df = jn.io.read_commandline("cat data.csv | grep .SEA1AA")
+ df = jn.read_commandline("cat data.csv | grep .SEA1AA")
This function assumes that your command line command will return
an output that is parsable using pandas.read_csv and StringIO.
| {"golden_diff": "diff --git a/janitor/__init__.py b/janitor/__init__.py\n--- a/janitor/__init__.py\n+++ b/janitor/__init__.py\n@@ -5,6 +5,7 @@\n pass\n \n from .functions import * # noqa: F403, F401\n+from .io import * # noqa: F403, F401\n from .math import * # noqa: F403, F401\n from .ml import get_features_targets as _get_features_targets\n from .utils import refactored_function\ndiff --git a/janitor/io.py b/janitor/io.py\n--- a/janitor/io.py\n+++ b/janitor/io.py\n@@ -88,7 +88,7 @@\n \n ```python\n import janitor as jn\n- df = jn.io.read_commandline(\"cat data.csv | grep .SEA1AA\")\n+ df = jn.read_commandline(\"cat data.csv | grep .SEA1AA\")\n \n This function assumes that your command line command will return\n an output that is parsable using pandas.read_csv and StringIO.\n", "issue": "Some modules are missing from the top-level import\nIssue referenced during PR here: https://github.com/pyjanitor-devs/pyjanitor/pull/977#discussion_r781732964\r\n\r\nFor example, now running:\r\n\r\n```python\r\nimport janitor as jn\r\njn.io.read_csvs(\"\") # throws AttributeError: module 'janitor' has no attribute 'io'\r\n```\r\n\r\nSimilarly for other modules like biology or timeseries.\n", "before_files": [{"content": "\"\"\"Top-level janitor API lives here.\"\"\"\ntry:\n import janitor.xarray # noqa: F401\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .math import * # noqa: F403, F401\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\nfrom .accessors import * # noqa: F403, F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n \"\"\"Wrapper for get_features_targets.\"\"\"\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.22.0\"\n", "path": "janitor/__init__.py"}, {"content": "import os\nimport subprocess\nfrom glob import glob\nfrom io import StringIO\nfrom typing import Iterable, Union\n\nimport pandas as pd\n\nfrom .errors import JanitorError\nfrom .utils import deprecated_alias, check\n\n\n@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\ndef read_csvs(\n files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n) -> Union[pd.DataFrame, dict]:\n \"\"\"\n Read multiple CSV files and return a dictionary of DataFrames, or\n one concatenated DataFrame.\n\n :param files_path: The filepath pattern matching the CSV files.\n Accepts regular expressions, with or without `.csv` extension.\n Also accepts iterable of file paths.\n :param separate_df: If `False` (default), returns a single Dataframe\n with the concatenation of the csv files.\n If `True`, returns a dictionary of separate DataFrames\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n :returns: DataFrame of concatenated DataFrames or dictionary of DataFrames.\n :raises JanitorError: if `None` provided for `files_path`.\n :raises JanitorError: if length of `files_path` is `0`.\n :raises ValueError: if no CSV files exist in `files_path`.\n :raises ValueError: if columns in input CSV files do not match.\n \"\"\"\n # Sanitize input\n if files_path is None:\n raise JanitorError(\"`None` provided for `files_path`\")\n if len(files_path) == 0:\n raise JanitorError(\"0 length `files_path` provided\")\n\n # Read the csv files\n # String to file/folder or file pattern provided\n if isinstance(files_path, str):\n dfs_dict = {\n os.path.basename(f): pd.read_csv(f, **kwargs)\n for f in glob(files_path)\n }\n # Iterable of file paths provided\n else:\n dfs_dict = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n }\n # Check if dataframes have been read\n if len(dfs_dict) == 0:\n raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs_dict.values())[0].columns # noqa: PD011\n if not separate_df:\n # If columns do not match raise an error\n for df in dfs_dict.values(): # noqa: PD011\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(\n list(dfs_dict.values()),\n ignore_index=True,\n sort=False, # noqa: PD011\n )\n else:\n return dfs_dict\n\n\ndef read_commandline(cmd: str, **kwargs) -> pd.DataFrame:\n \"\"\"\n Read a CSV file based on a command-line command.\n\n For example, you may wish to run the following command on `sep-quarter.csv`\n before reading it into a pandas DataFrame:\n\n ```bash\n cat sep-quarter.csv | grep .SEA1AA\n ```\n\n In this case, you can use the following Python code to load the dataframe:\n\n ```python\n import janitor as jn\n df = jn.io.read_commandline(\"cat data.csv | grep .SEA1AA\")\n\n This function assumes that your command line command will return\n an output that is parsable using pandas.read_csv and StringIO.\n We default to using pd.read_csv underneath the hood.\n Keyword arguments are passed through to read_csv.\n ```\n\n :param cmd: Shell command to preprocess a file on disk.\n :param kwargs: Keyword arguments that are passed through to pd.read_csv().\n :raises JanitorError: If commandline command is malformed or invalid.\n :returns: A pandas DataFrame parsed from the stdout of the underlying\n shell.\n \"\"\"\n\n check(\"cmd\", cmd, [str])\n outcome = subprocess.run(cmd, shell=True, capture_output=True, text=True)\n if outcome.returncode != 0:\n raise JanitorError(outcome.stderr)\n else:\n outcome = outcome.stdout\n return pd.read_csv(StringIO(outcome), **kwargs)\n", "path": "janitor/io.py"}]} | 2,050 | 258 |
gh_patches_debug_6697 | rasdani/github-patches | git_diff | SeldonIO__MLServer-911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with own logging configuration
Currently I have the problem that my logging configuration is not accepted everywhere. As soon as the REST server starts (Uvicorn Worker), my logging configuration is ignored. I have created a repo that represents my scenario and also which is configuration used. Maybe my configuration is just wrong. In the model itself, I print out all the loggers and the associated handlers and formatter and can see here that it should actually fit. Do you have any ideas?
Here is my small example repo: https://github.com/JustinDroege/mlserver-logging
</issue>
<code>
[start of mlserver/metrics/server.py]
1 import uvicorn
2
3 from fastapi import FastAPI
4 from starlette_exporter import handle_metrics
5
6 from ..settings import Settings
7 from .logging import logger
8 from typing import Optional
9
10
11 class _NoSignalServer(uvicorn.Server):
12 def install_signal_handlers(self):
13 pass
14
15
16 class MetricsServer:
17 def __init__(self, settings: Settings):
18 self._settings = settings
19 self._app = self._get_app()
20
21 def _get_app(self):
22 app = FastAPI(debug=self._settings.debug)
23 app.add_route(self._settings.metrics_endpoint, handle_metrics)
24 return app
25
26 async def start(self):
27 cfg = self._get_config()
28 self._server = _NoSignalServer(cfg)
29
30 metrics_server = f"http://{self._settings.host}:{self._settings.metrics_port}"
31 logger.info(f"Metrics server running on {metrics_server}")
32 logger.info(
33 "Prometheus scraping endpoint can be accessed on "
34 f"{metrics_server}{self._settings.metrics_endpoint}"
35 )
36 await self._server.serve()
37
38 def _get_config(self):
39 kwargs = {}
40
41 if self._settings._custom_metrics_server_settings:
42 logger.warning(
43 "REST custom configuration is out of support. Use as your own risk"
44 )
45 kwargs.update(self._settings._custom_metrics_server_settings)
46
47 kwargs.update(
48 {
49 "host": self._settings.host,
50 "port": self._settings.metrics_port,
51 "access_log": self._settings.debug,
52 }
53 )
54
55 # TODO: we want to disable logger unless debug is enabled (otherwise,
56 # prom reqs can be spammy)
57 return uvicorn.Config(self._app, **kwargs)
58
59 async def stop(self, sig: Optional[int] = None):
60 self._server.handle_exit(sig=sig, frame=None)
61
[end of mlserver/metrics/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/metrics/server.py b/mlserver/metrics/server.py
--- a/mlserver/metrics/server.py
+++ b/mlserver/metrics/server.py
@@ -52,8 +52,11 @@
}
)
- # TODO: we want to disable logger unless debug is enabled (otherwise,
- # prom reqs can be spammy)
+ if self._settings.logging_settings:
+ # If not None, use ours. Otherwise, let Uvicorn fall back on its
+ # own config.
+ kwargs.update({"log_config": self._settings.logging_settings})
+
return uvicorn.Config(self._app, **kwargs)
async def stop(self, sig: Optional[int] = None):
| {"golden_diff": "diff --git a/mlserver/metrics/server.py b/mlserver/metrics/server.py\n--- a/mlserver/metrics/server.py\n+++ b/mlserver/metrics/server.py\n@@ -52,8 +52,11 @@\n }\n )\n \n- # TODO: we want to disable logger unless debug is enabled (otherwise,\n- # prom reqs can be spammy)\n+ if self._settings.logging_settings:\n+ # If not None, use ours. Otherwise, let Uvicorn fall back on its\n+ # own config.\n+ kwargs.update({\"log_config\": self._settings.logging_settings})\n+\n return uvicorn.Config(self._app, **kwargs)\n \n async def stop(self, sig: Optional[int] = None):\n", "issue": "Problems with own logging configuration\nCurrently I have the problem that my logging configuration is not accepted everywhere. As soon as the REST server starts (Uvicorn Worker), my logging configuration is ignored. I have created a repo that represents my scenario and also which is configuration used. Maybe my configuration is just wrong. In the model itself, I print out all the loggers and the associated handlers and formatter and can see here that it should actually fit. Do you have any ideas?\r\n\r\nHere is my small example repo: https://github.com/JustinDroege/mlserver-logging\n", "before_files": [{"content": "import uvicorn\n\nfrom fastapi import FastAPI\nfrom starlette_exporter import handle_metrics\n\nfrom ..settings import Settings\nfrom .logging import logger\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass MetricsServer:\n def __init__(self, settings: Settings):\n self._settings = settings\n self._app = self._get_app()\n\n def _get_app(self):\n app = FastAPI(debug=self._settings.debug)\n app.add_route(self._settings.metrics_endpoint, handle_metrics)\n return app\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n\n metrics_server = f\"http://{self._settings.host}:{self._settings.metrics_port}\"\n logger.info(f\"Metrics server running on {metrics_server}\")\n logger.info(\n \"Prometheus scraping endpoint can be accessed on \"\n f\"{metrics_server}{self._settings.metrics_endpoint}\"\n )\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_metrics_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_metrics_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.metrics_port,\n \"access_log\": self._settings.debug,\n }\n )\n\n # TODO: we want to disable logger unless debug is enabled (otherwise,\n # prom reqs can be spammy)\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/metrics/server.py"}]} | 1,160 | 160 |
gh_patches_debug_38993 | rasdani/github-patches | git_diff | zulip__zulip-29641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update first message ID when first message is deleted
When a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous "more topics" link in the left sidebar, with no additional topics shown when you click it.
Note: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.
</issue>
<code>
[start of zerver/actions/message_delete.py]
1 from typing import Iterable, List, TypedDict
2
3 from zerver.lib import retention
4 from zerver.lib.retention import move_messages_to_archive
5 from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
6 from zerver.models import Message, Realm, UserMessage, UserProfile
7 from zerver.tornado.django_api import send_event_on_commit
8
9
10 class DeleteMessagesEvent(TypedDict, total=False):
11 type: str
12 message_ids: List[int]
13 message_type: str
14 topic: str
15 stream_id: int
16
17
18 def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
19 # messages in delete_message event belong to the same topic
20 # or is a single direct message, as any other behaviour is not possible with
21 # the current callers to this method.
22 messages = list(messages)
23 message_ids = [message.id for message in messages]
24 if not message_ids:
25 return
26
27 event: DeleteMessagesEvent = {
28 "type": "delete_message",
29 "message_ids": message_ids,
30 }
31
32 sample_message = messages[0]
33 message_type = "stream"
34 users_to_notify = []
35 if not sample_message.is_stream_message():
36 assert len(messages) == 1
37 message_type = "private"
38 ums = UserMessage.objects.filter(message_id__in=message_ids)
39 users_to_notify = [um.user_profile_id for um in ums]
40 archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
41
42 if message_type == "stream":
43 stream_id = sample_message.recipient.type_id
44 event["stream_id"] = stream_id
45 event["topic"] = sample_message.topic_name()
46 subscriptions = get_active_subscriptions_for_stream_id(
47 stream_id, include_deactivated_users=False
48 )
49 # We exclude long-term idle users, since they by definition have no active clients.
50 subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
51 users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
52 archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
53
54 move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
55
56 event["message_type"] = message_type
57 send_event_on_commit(realm, event, users_to_notify)
58
59
60 def do_delete_messages_by_sender(user: UserProfile) -> None:
61 message_ids = list(
62 # Uses index: zerver_message_realm_sender_recipient (prefix)
63 Message.objects.filter(realm_id=user.realm_id, sender=user)
64 .values_list("id", flat=True)
65 .order_by("id")
66 )
67 if message_ids:
68 move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
69
[end of zerver/actions/message_delete.py]
[start of version.py]
1 import os
2
3 ZULIP_VERSION = "9.0-dev+git"
4
5 # Add information on number of commits and commit hash to version, if available
6 zulip_git_version_file = os.path.join(
7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version"
8 )
9 lines = [ZULIP_VERSION, ""]
10 if os.path.exists(zulip_git_version_file):
11 with open(zulip_git_version_file) as f:
12 lines = [*f, "", ""]
13 ZULIP_VERSION = lines.pop(0).strip()
14 ZULIP_MERGE_BASE = lines.pop(0).strip()
15
16 LATEST_MAJOR_VERSION = "8.0"
17 LATEST_RELEASE_VERSION = "8.3"
18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2023/12/15/zulip-8-0-released/"
19
20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be
21 # prevented from connecting to the Zulip server. Versions above
22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have
23 # a banner at the top of the page asking the user to upgrade.
24 DESKTOP_MINIMUM_VERSION = "5.4.3"
25 DESKTOP_WARNING_VERSION = "5.9.3"
26
27 # Bump the API_FEATURE_LEVEL whenever an API change is made
28 # that clients might want to condition on. If we forget at
29 # the time we make the change, then bump it later as soon
30 # as we notice; clients using API_FEATURE_LEVEL will just not
31 # use the new feature/API until the bump.
32 #
33 # Changes should be accompanied by documentation explaining what the
34 # new level means in api_docs/changelog.md, as well as "**Changes**"
35 # entries in the endpoint's documentation in `zulip.yaml`.
36 API_FEATURE_LEVEL = 255
37
38 # Bump the minor PROVISION_VERSION to indicate that folks should provision
39 # only when going from an old version of the code to a newer version. Bump
40 # the major version to indicate that folks should provision in both
41 # directions.
42
43 # Typically,
44 # * adding a dependency only requires a minor version bump;
45 # * removing a dependency requires a major version bump;
46 # * upgrading a dependency requires a major version bump, unless the
47 # upgraded dependency is backwards compatible with all of our
48 # historical commits sharing the same major version, in which case a
49 # minor version bump suffices.
50
51 PROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore
52
[end of version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in api_docs/changelog.md, as well as "**Changes**"
# entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 255
+API_FEATURE_LEVEL = 256
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py
--- a/zerver/actions/message_delete.py
+++ b/zerver/actions/message_delete.py
@@ -3,7 +3,7 @@
from zerver.lib import retention
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
-from zerver.models import Message, Realm, UserMessage, UserProfile
+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile
from zerver.tornado.django_api import send_event_on_commit
@@ -15,6 +15,34 @@
stream_id: int
+def check_update_first_message_id(
+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]
+) -> None:
+ # This will not update the `first_message_id` of streams where the
+ # first message was deleted prior to the implementation of this function.
+ assert stream.recipient_id is not None
+ if stream.first_message_id not in message_ids:
+ return
+ current_first_message_id = (
+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)
+ .values_list("id", flat=True)
+ .order_by("id")
+ .first()
+ )
+
+ stream.first_message_id = current_first_message_id
+ stream.save(update_fields=["first_message_id"])
+
+ stream_event = dict(
+ type="stream",
+ op="update",
+ property="first_message_id",
+ value=stream.first_message_id,
+ stream_id=stream.id,
+ )
+ send_event_on_commit(realm, stream_event, users_to_notify)
+
+
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single direct message, as any other behaviour is not possible with
@@ -52,6 +80,9 @@
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
+ if message_type == "stream":
+ stream = Stream.objects.get(id=sample_message.recipient.type_id)
+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)
event["message_type"] = message_type
send_event_on_commit(realm, event, users_to_notify)
| {"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -33,7 +33,7 @@\n # Changes should be accompanied by documentation explaining what the\n # new level means in api_docs/changelog.md, as well as \"**Changes**\"\n # entries in the endpoint's documentation in `zulip.yaml`.\n-API_FEATURE_LEVEL = 255\n+API_FEATURE_LEVEL = 256\n \n # Bump the minor PROVISION_VERSION to indicate that folks should provision\n # only when going from an old version of the code to a newer version. Bump\ndiff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py\n--- a/zerver/actions/message_delete.py\n+++ b/zerver/actions/message_delete.py\n@@ -3,7 +3,7 @@\n from zerver.lib import retention\n from zerver.lib.retention import move_messages_to_archive\n from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\n-from zerver.models import Message, Realm, UserMessage, UserProfile\n+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile\n from zerver.tornado.django_api import send_event_on_commit\n \n \n@@ -15,6 +15,34 @@\n stream_id: int\n \n \n+def check_update_first_message_id(\n+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]\n+) -> None:\n+ # This will not update the `first_message_id` of streams where the\n+ # first message was deleted prior to the implementation of this function.\n+ assert stream.recipient_id is not None\n+ if stream.first_message_id not in message_ids:\n+ return\n+ current_first_message_id = (\n+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)\n+ .values_list(\"id\", flat=True)\n+ .order_by(\"id\")\n+ .first()\n+ )\n+\n+ stream.first_message_id = current_first_message_id\n+ stream.save(update_fields=[\"first_message_id\"])\n+\n+ stream_event = dict(\n+ type=\"stream\",\n+ op=\"update\",\n+ property=\"first_message_id\",\n+ value=stream.first_message_id,\n+ stream_id=stream.id,\n+ )\n+ send_event_on_commit(realm, stream_event, users_to_notify)\n+\n+\n def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n@@ -52,6 +80,9 @@\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n \n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n+ if message_type == \"stream\":\n+ stream = Stream.objects.get(id=sample_message.recipient.type_id)\n+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)\n \n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n", "issue": "Update first message ID when first message is deleted\nWhen a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous \"more topics\" link in the left sidebar, with no additional topics shown when you click it.\r\n\r\nNote: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.\n", "before_files": [{"content": "from typing import Iterable, List, TypedDict\n\nfrom zerver.lib import retention\nfrom zerver.lib.retention import move_messages_to_archive\nfrom zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\nfrom zerver.models import Message, Realm, UserMessage, UserProfile\nfrom zerver.tornado.django_api import send_event_on_commit\n\n\nclass DeleteMessagesEvent(TypedDict, total=False):\n type: str\n message_ids: List[int]\n message_type: str\n topic: str\n stream_id: int\n\n\ndef do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n # the current callers to this method.\n messages = list(messages)\n message_ids = [message.id for message in messages]\n if not message_ids:\n return\n\n event: DeleteMessagesEvent = {\n \"type\": \"delete_message\",\n \"message_ids\": message_ids,\n }\n\n sample_message = messages[0]\n message_type = \"stream\"\n users_to_notify = []\n if not sample_message.is_stream_message():\n assert len(messages) == 1\n message_type = \"private\"\n ums = UserMessage.objects.filter(message_id__in=message_ids)\n users_to_notify = [um.user_profile_id for um in ums]\n archiving_chunk_size = retention.MESSAGE_BATCH_SIZE\n\n if message_type == \"stream\":\n stream_id = sample_message.recipient.type_id\n event[\"stream_id\"] = stream_id\n event[\"topic\"] = sample_message.topic_name()\n subscriptions = get_active_subscriptions_for_stream_id(\n stream_id, include_deactivated_users=False\n )\n # We exclude long-term idle users, since they by definition have no active clients.\n subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)\n users_to_notify = list(subscriptions.values_list(\"user_profile_id\", flat=True))\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n\n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n\n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n\n\ndef do_delete_messages_by_sender(user: UserProfile) -> None:\n message_ids = list(\n # Uses index: zerver_message_realm_sender_recipient (prefix)\n Message.objects.filter(realm_id=user.realm_id, sender=user)\n .values_list(\"id\", flat=True)\n .order_by(\"id\")\n )\n if message_ids:\n move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)\n", "path": "zerver/actions/message_delete.py"}, {"content": "import os\n\nZULIP_VERSION = \"9.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = [*f, \"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"8.0\"\nLATEST_RELEASE_VERSION = \"8.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2023/12/15/zulip-8-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.4.3\"\nDESKTOP_WARNING_VERSION = \"5.9.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in api_docs/changelog.md, as well as \"**Changes**\"\n# entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 255\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore\n", "path": "version.py"}]} | 2,018 | 677 |
gh_patches_debug_26011 | rasdani/github-patches | git_diff | ray-project__ray-3711 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tune[ partial function cannot be registered as trainable
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04
- **Ray installed from (source or binary)**: binary
- **Ray version**: 0.6.1
- **Python version**: 3.7
- **Exact command to reproduce**:
The following code fails:
```
def dummy_fn(c, a, b):
print("Called")
from functools import partial
from ray.tune import register_trainable
register_trainable("test", partial(dummy_fn, c=None))
```
while the following code works:
```
def dummy_fn(a, b):
print("Called")
from functools import partial
from ray.tune import register_trainable
register_trainable("test", dummy_fn)
```
### Describe the problem
The first code sample does not work, despite the function (after the `partial`) fullfills all requirements to be properly registered.
### Source code / logs
Traceback:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/temp/schock/conda/envs/delira_new/lib/python3.7/site-packages/ray/tune/registry.py", line 35, in register_trainable
if not issubclass(trainable, Trainable):
TypeError: issubclass() arg 1 must be a class
```
</issue>
<code>
[start of python/ray/tune/registry.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 from types import FunctionType
6
7 import ray
8 import ray.cloudpickle as pickle
9 from ray.experimental.internal_kv import _internal_kv_initialized, \
10 _internal_kv_get, _internal_kv_put
11
12 TRAINABLE_CLASS = "trainable_class"
13 ENV_CREATOR = "env_creator"
14 RLLIB_MODEL = "rllib_model"
15 RLLIB_PREPROCESSOR = "rllib_preprocessor"
16 KNOWN_CATEGORIES = [
17 TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR
18 ]
19
20
21 def register_trainable(name, trainable):
22 """Register a trainable function or class.
23
24 Args:
25 name (str): Name to register.
26 trainable (obj): Function or tune.Trainable class. Functions must
27 take (config, status_reporter) as arguments and will be
28 automatically converted into a class during registration.
29 """
30
31 from ray.tune.trainable import Trainable, wrap_function
32
33 if isinstance(trainable, FunctionType):
34 trainable = wrap_function(trainable)
35 if not issubclass(trainable, Trainable):
36 raise TypeError("Second argument must be convertable to Trainable",
37 trainable)
38 _global_registry.register(TRAINABLE_CLASS, name, trainable)
39
40
41 def register_env(name, env_creator):
42 """Register a custom environment for use with RLlib.
43
44 Args:
45 name (str): Name to register.
46 env_creator (obj): Function that creates an env.
47 """
48
49 if not isinstance(env_creator, FunctionType):
50 raise TypeError("Second argument must be a function.", env_creator)
51 _global_registry.register(ENV_CREATOR, name, env_creator)
52
53
54 def _make_key(category, key):
55 """Generate a binary key for the given category and key.
56
57 Args:
58 category (str): The category of the item
59 key (str): The unique identifier for the item
60
61 Returns:
62 The key to use for storing a the value.
63 """
64 return (b"TuneRegistry:" + category.encode("ascii") + b"/" +
65 key.encode("ascii"))
66
67
68 class _Registry(object):
69 def __init__(self):
70 self._to_flush = {}
71
72 def register(self, category, key, value):
73 if category not in KNOWN_CATEGORIES:
74 from ray.tune import TuneError
75 raise TuneError("Unknown category {} not among {}".format(
76 category, KNOWN_CATEGORIES))
77 self._to_flush[(category, key)] = pickle.dumps(value)
78 if _internal_kv_initialized():
79 self.flush_values()
80
81 def contains(self, category, key):
82 if _internal_kv_initialized():
83 value = _internal_kv_get(_make_key(category, key))
84 return value is not None
85 else:
86 return (category, key) in self._to_flush
87
88 def get(self, category, key):
89 if _internal_kv_initialized():
90 value = _internal_kv_get(_make_key(category, key))
91 if value is None:
92 raise ValueError(
93 "Registry value for {}/{} doesn't exist.".format(
94 category, key))
95 return pickle.loads(value)
96 else:
97 return pickle.loads(self._to_flush[(category, key)])
98
99 def flush_values(self):
100 for (category, key), value in self._to_flush.items():
101 _internal_kv_put(_make_key(category, key), value, overwrite=True)
102 self._to_flush.clear()
103
104
105 _global_registry = _Registry()
106 ray.worker._post_init_hooks.append(_global_registry.flush_values)
107
[end of python/ray/tune/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/tune/registry.py b/python/ray/tune/registry.py
--- a/python/ray/tune/registry.py
+++ b/python/ray/tune/registry.py
@@ -2,6 +2,7 @@
from __future__ import division
from __future__ import print_function
+import logging
from types import FunctionType
import ray
@@ -17,6 +18,8 @@
TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR
]
+logger = logging.getLogger(__name__)
+
def register_trainable(name, trainable):
"""Register a trainable function or class.
@@ -30,8 +33,16 @@
from ray.tune.trainable import Trainable, wrap_function
- if isinstance(trainable, FunctionType):
+ if isinstance(trainable, type):
+ logger.debug("Detected class for trainable.")
+ elif isinstance(trainable, FunctionType):
+ logger.debug("Detected function for trainable.")
+ trainable = wrap_function(trainable)
+ elif callable(trainable):
+ logger.warning(
+ "Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable)
+
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
| {"golden_diff": "diff --git a/python/ray/tune/registry.py b/python/ray/tune/registry.py\n--- a/python/ray/tune/registry.py\n+++ b/python/ray/tune/registry.py\n@@ -2,6 +2,7 @@\n from __future__ import division\n from __future__ import print_function\n \n+import logging\n from types import FunctionType\n \n import ray\n@@ -17,6 +18,8 @@\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n ]\n \n+logger = logging.getLogger(__name__)\n+\n \n def register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n@@ -30,8 +33,16 @@\n \n from ray.tune.trainable import Trainable, wrap_function\n \n- if isinstance(trainable, FunctionType):\n+ if isinstance(trainable, type):\n+ logger.debug(\"Detected class for trainable.\")\n+ elif isinstance(trainable, FunctionType):\n+ logger.debug(\"Detected function for trainable.\")\n+ trainable = wrap_function(trainable)\n+ elif callable(trainable):\n+ logger.warning(\n+ \"Detected unknown callable for trainable. Converting to class.\")\n trainable = wrap_function(trainable)\n+\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n", "issue": "[tune[ partial function cannot be registered as trainable\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04\r\n- **Ray installed from (source or binary)**: binary\r\n- **Ray version**: 0.6.1\r\n- **Python version**: 3.7\r\n- **Exact command to reproduce**:\r\n\r\nThe following code fails:\r\n```\r\ndef dummy_fn(c, a, b):\r\n print(\"Called\")\r\n\r\nfrom functools import partial\r\nfrom ray.tune import register_trainable\r\nregister_trainable(\"test\", partial(dummy_fn, c=None))\r\n\r\n```\r\n\r\nwhile the following code works:\r\n```\r\ndef dummy_fn(a, b):\r\n print(\"Called\")\r\n\r\nfrom functools import partial\r\nfrom ray.tune import register_trainable\r\nregister_trainable(\"test\", dummy_fn)\r\n\r\n```\r\n### Describe the problem\r\nThe first code sample does not work, despite the function (after the `partial`) fullfills all requirements to be properly registered.\r\n\r\n### Source code / logs\r\nTraceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/temp/schock/conda/envs/delira_new/lib/python3.7/site-packages/ray/tune/registry.py\", line 35, in register_trainable\r\n if not issubclass(trainable, Trainable):\r\nTypeError: issubclass() arg 1 must be a class\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom types import FunctionType\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.experimental.internal_kv import _internal_kv_initialized, \\\n _internal_kv_get, _internal_kv_put\n\nTRAINABLE_CLASS = \"trainable_class\"\nENV_CREATOR = \"env_creator\"\nRLLIB_MODEL = \"rllib_model\"\nRLLIB_PREPROCESSOR = \"rllib_preprocessor\"\nKNOWN_CATEGORIES = [\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n]\n\n\ndef register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n\n Args:\n name (str): Name to register.\n trainable (obj): Function or tune.Trainable class. Functions must\n take (config, status_reporter) as arguments and will be\n automatically converted into a class during registration.\n \"\"\"\n\n from ray.tune.trainable import Trainable, wrap_function\n\n if isinstance(trainable, FunctionType):\n trainable = wrap_function(trainable)\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n _global_registry.register(TRAINABLE_CLASS, name, trainable)\n\n\ndef register_env(name, env_creator):\n \"\"\"Register a custom environment for use with RLlib.\n\n Args:\n name (str): Name to register.\n env_creator (obj): Function that creates an env.\n \"\"\"\n\n if not isinstance(env_creator, FunctionType):\n raise TypeError(\"Second argument must be a function.\", env_creator)\n _global_registry.register(ENV_CREATOR, name, env_creator)\n\n\ndef _make_key(category, key):\n \"\"\"Generate a binary key for the given category and key.\n\n Args:\n category (str): The category of the item\n key (str): The unique identifier for the item\n\n Returns:\n The key to use for storing a the value.\n \"\"\"\n return (b\"TuneRegistry:\" + category.encode(\"ascii\") + b\"/\" +\n key.encode(\"ascii\"))\n\n\nclass _Registry(object):\n def __init__(self):\n self._to_flush = {}\n\n def register(self, category, key, value):\n if category not in KNOWN_CATEGORIES:\n from ray.tune import TuneError\n raise TuneError(\"Unknown category {} not among {}\".format(\n category, KNOWN_CATEGORIES))\n self._to_flush[(category, key)] = pickle.dumps(value)\n if _internal_kv_initialized():\n self.flush_values()\n\n def contains(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n return value is not None\n else:\n return (category, key) in self._to_flush\n\n def get(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n if value is None:\n raise ValueError(\n \"Registry value for {}/{} doesn't exist.\".format(\n category, key))\n return pickle.loads(value)\n else:\n return pickle.loads(self._to_flush[(category, key)])\n\n def flush_values(self):\n for (category, key), value in self._to_flush.items():\n _internal_kv_put(_make_key(category, key), value, overwrite=True)\n self._to_flush.clear()\n\n\n_global_registry = _Registry()\nray.worker._post_init_hooks.append(_global_registry.flush_values)\n", "path": "python/ray/tune/registry.py"}]} | 1,833 | 296 |
gh_patches_debug_20190 | rasdani/github-patches | git_diff | kartoza__prj.app-134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Permalink to a feature is not working.
The permalink to a feature does not work. For instance `http://changelog.linfiniti.com/qgis/version/2.10/#275` does not scroll to the anchor.
Moreover,if we click on the anchor, the webbrowser doesn't keep the link.
</issue>
<code>
[start of django_project/core/urls.py]
1 # coding=utf-8
2 """Project level url handler."""
3 from django.conf.urls import patterns, include, url
4 from django.conf.urls.i18n import i18n_patterns
5 from django.contrib.auth import views as auth_views
6 from django.contrib import admin
7 from django.conf import settings
8 from django.conf.urls.static import static
9 admin.autodiscover()
10
11 handler404 = 'base.views.error_views.custom_404'
12
13 urlpatterns = [
14 # '',
15 # # Enable the admin (use non standard name for obscurity)
16 # url(r'^site-admin/', include(admin.site.urls)),
17 # url(r'^', include('base.urls')),
18 # url(r'^', include('changes.urls')),
19 # url(r'^', include('vota.urls')),
20 # url(r'^', include('github_issue.urls')),
21 #
22 # # This over-ride is required to fix 500 errors as per:
23 # # https://github.com/bread-and-pepper/django-userena/issues/380
24 # url(r'^password/reset/done/$',
25 # auth_views.password_reset_done,
26 # {'template_name': 'userena/password_reset_done.html'},
27 # name='password_reset_done'),
28 # url(r'^accounts/', include('userena.urls')),
29 ]
30
31 urlpatterns += i18n_patterns(
32 url(r'^site-admin/', include(admin.site.urls)),
33 url(r'^', include('base.urls')),
34 url(r'^', include('changes.urls')),
35 url(r'^', include('vota.urls')),
36 url(r'^', include('github_issue.urls')),
37 url(r'^password/reset/done/$',
38 auth_views.password_reset_done,
39 {'template_name': 'userena/password_reset_done.html'},
40 name='password_reset_done'),
41 url(r'^accounts/', include('userena.urls')),
42 )
43
44 if 'rosetta' in settings.INSTALLED_APPS:
45 urlpatterns += patterns(
46 '',
47 url(r'^rosetta/', include('rosetta.urls')),
48 )
49
50 if settings.DEBUG:
51 urlpatterns += static(
52 settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
53
[end of django_project/core/urls.py]
[start of django_project/core/settings/base.py]
1 # coding=utf-8
2 """
3 core.settings.base
4 """
5 # Django settings for projecta project.
6
7 from .utils import absolute_path
8
9 ADMINS = (
10 ('Tim Sutton', '[email protected]'),
11 ('Rischan Mafrur', '[email protected]')
12 )
13 SERVER_EMAIL = '[email protected]'
14 EMAIL_HOST = 'localhost'
15 DEFAULT_FROM_EMAIL = '[email protected]'
16
17 MANAGERS = ADMINS
18
19 # Local time zone for this installation. Choices can be found here:
20 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
21 # although not all choices may be available on all operating systems.
22 # In a Windows environment this must be set to your system time zone.
23 TIME_ZONE = 'Africa/Johannesburg'
24
25 # Language code for this installation. All choices can be found here:
26 # http://www.i18nguy.com/unicode/language-identifiers.html
27 LANGUAGE_CODE = 'en-us'
28
29 SITE_ID = 1
30
31 # If you set this to False, Django will make some optimizations so as not
32 # to load the internationalization machinery.
33 USE_I18N = True
34
35 # If you set this to False, Django will not format dates, numbers and
36 # calendars according to the current locale.
37 USE_L10N = True
38
39 # If you set this to False, Django will not use timezone-aware datetimes.
40 USE_TZ = True
41
42 # Absolute filesystem path to the directory that will hold user-uploaded files.
43 # Example: "/var/www/example.com/media/"
44 MEDIA_ROOT = '/home/web/media'
45
46 # URL that handles the media served from MEDIA_ROOT. Make sure to use a
47 # trailing slash.
48 # Examples: "http://example.com/media/", "http://media.example.com/"
49 # MEDIA_URL = '/media/'
50 # setting full MEDIA_URL to be able to use it for the feeds
51 MEDIA_URL = '/media/'
52
53 # Absolute path to the directory static files should be collected to.
54 # Don't put anything in this directory yourself; store your static files
55 # in apps' "static/" subdirectories and in STATICFILES_DIRS.
56 # Example: "/var/www/example.com/static/"
57 STATIC_ROOT = '/home/web/static'
58
59 # URL prefix for static files.
60 # Example: "http://example.com/static/", "http://static.example.com/"
61 STATIC_URL = '/static/'
62
63 # Additional locations of static files
64 STATICFILES_DIRS = (
65 # Put strings here, like "/home/html/static" or "C:/www/django/static".
66 # Always use forward slashes, even on Windows.
67 # Don't forget to use absolute paths, not relative paths.
68 absolute_path('core', 'base_static'),
69 )
70
71 # List of finder classes that know how to find static files in
72 # various locations.
73 STATICFILES_FINDERS = (
74 'django.contrib.staticfiles.finders.FileSystemFinder',
75 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
76 # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
77 )
78
79 # import SECRET_KEY into current namespace
80 # noinspection PyUnresolvedReferences
81 from .secret import SECRET_KEY # noqa
82
83 # List of callables that know how to import templates from various sources.
84 TEMPLATE_LOADERS = (
85 'django.template.loaders.filesystem.Loader',
86 'django.template.loaders.app_directories.Loader',
87 # 'django.template.loaders.eggs.Loader',
88 )
89
90 TEMPLATE_CONTEXT_PROCESSORS = (
91 'django.contrib.auth.context_processors.auth',
92 'django.core.context_processors.request',
93 'django.core.context_processors.media',
94 'core.context_processors.add_intercom_app_id',
95 'django.template.context_processors.i18n'
96 )
97
98 MIDDLEWARE_CLASSES = (
99 'django.middleware.common.CommonMiddleware',
100 'django.contrib.sessions.middleware.SessionMiddleware',
101 'django.middleware.csrf.CsrfViewMiddleware',
102 'django.contrib.auth.middleware.AuthenticationMiddleware',
103 'django.contrib.messages.middleware.MessageMiddleware',
104 # Uncomment the next line for simple clickjacking protection:
105 # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
106 'core.custom_middleware.NavContextMiddleware',
107 'django.middleware.locale.LocaleMiddleware'
108 )
109
110 ROOT_URLCONF = 'core.urls'
111
112 # Python dotted path to the WSGI application used by Django's runserver.
113 WSGI_APPLICATION = 'core.wsgi.application'
114
115 TEMPLATE_DIRS = (
116 # project level templates
117 absolute_path('core', 'base_templates'),
118 absolute_path('vota', 'templates'),
119 absolute_path('changes', 'templates'),
120
121 )
122
123 INSTALLED_APPS = (
124 'django.contrib.auth',
125 'django.contrib.contenttypes',
126 'django.contrib.sessions',
127 'django.contrib.sites',
128 'django.contrib.messages',
129 'django.contrib.staticfiles',
130 'django.contrib.admin',
131 'django.contrib.syndication',
132 )
133
134 # A sample logging configuration. The only tangible logging
135 # performed by this configuration is to send an email to
136 # the site admins on every HTTP 500 error when DEBUG=False.
137 # See http://docs.djangoproject.com/en/dev/topics/logging for
138 # more details on how to customize your logging configuration.
139 LOGGING = {
140 'version': 1,
141 'disable_existing_loggers': False,
142 'filters': {
143 'require_debug_false': {
144 '()': 'django.utils.log.RequireDebugFalse'
145 }
146 },
147 'handlers': {
148 'mail_admins': {
149 'level': 'ERROR',
150 'filters': ['require_debug_false'],
151 'class': 'django.utils.log.AdminEmailHandler'
152 }
153 },
154 'loggers': {
155 'django.request': {
156 'handlers': ['mail_admins'],
157 'level': 'ERROR',
158 'propagate': True,
159 },
160 }
161 }
162
[end of django_project/core/settings/base.py]
[start of django_project/core/settings/contrib.py]
1 # coding=utf-8
2 """
3 core.settings.contrib
4 """
5 from .base import * # noqa
6
7 # Extra installed apps
8 INSTALLED_APPS += (
9 # 'raven.contrib.django', # enable Raven plugin
10 'crispy_forms',
11 'widget_tweaks', # lets us add some bootstrap css to form elements
12 'accounts', # userena
13 'guardian', # for userena
14 'easy_thumbnails', # also needed for userena
15 'userena',
16 'raven.contrib.django',
17 'reversion',
18 'rosetta',
19 'embed_video',
20 # 'user_map',
21 )
22
23 STOP_WORDS = (
24 'a', 'an', 'and', 'if', 'is', 'the', 'in', 'i', 'you', 'other',
25 'this', 'that'
26 )
27
28 CRISPY_TEMPLATE_PACK = 'bootstrap3'
29
30 # Added for userena
31 AUTHENTICATION_BACKENDS = (
32 'userena.backends.UserenaAuthenticationBackend',
33 'guardian.backends.ObjectPermissionBackend',
34 'django.contrib.auth.backends.ModelBackend',
35 )
36 ANONYMOUS_USER_ID = -1
37 AUTH_PROFILE_MODULE = 'accounts.Profile'
38 LOGIN_REDIRECT_URL = '/accounts/%(username)s/'
39 LOGIN_URL = '/accounts/signin/'
40 LOGOUT_URL = '/accounts/signout/'
41
42 # Easy-thumbnails options
43 THUMBNAIL_SUBDIR = 'thumbnails'
44 THUMBNAIL_ALIASES = {
45 '': {
46 'entry': {'size': (50, 50), 'crop': True},
47 'medium-entry': {'size': (100, 100), 'crop': True},
48 'large-entry': {'size': (400, 300), 'crop': True},
49 'thumb300x200': {'size': (300, 200), 'crop': True},
50 },
51 }
52
53 # Pipeline related settings
54
55
56 INSTALLED_APPS += (
57 'pipeline',)
58
59 DEFAULT_FILE_STORAGE = (
60 'django_hashedfilenamestorage.storage.HashedFilenameFileSystemStorage')
61
62 # use underscore template function
63 PIPELINE_TEMPLATE_FUNC = '_.template'
64
65 # enable cached storage - requires uglify.js (node.js)
66 STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
67
68 PIPELINE_JS = {
69 'contrib': {
70 'source_filenames': (
71 'js/jquery-1.10.1.min.js',
72 'js/csrf-ajax.js',
73 'js/underscore-min.js',
74 'js/bootstrap.min.js',
75 'js/changelog.js',
76 'js/github-issue.js',
77 ),
78 'output_filename': 'js/contrib.js',
79 }
80 }
81
82 PIPELINE_CSS = {
83 'contrib': {
84 'source_filenames': (
85 'css/bootstrap.min.css',
86 'css/bootstrap-theme.min.css',
87 'css/changelog.css',
88 ),
89 'output_filename': 'css/contrib.css',
90 'extra_context': {
91 'media': 'screen, projection',
92 },
93 }
94 }
95
96 # These get enabled in prod.py
97 PIPELINE_ENABLED = False
98 PIPELINE_CSS_COMPRESSOR = None
99 PIPELINE_JS_COMPRESSOR = None
100
[end of django_project/core/settings/contrib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/core/settings/base.py b/django_project/core/settings/base.py
--- a/django_project/core/settings/base.py
+++ b/django_project/core/settings/base.py
@@ -108,6 +108,7 @@
)
ROOT_URLCONF = 'core.urls'
+GRAPPELLI_ADMIN_TITLE = ''
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'core.wsgi.application'
diff --git a/django_project/core/settings/contrib.py b/django_project/core/settings/contrib.py
--- a/django_project/core/settings/contrib.py
+++ b/django_project/core/settings/contrib.py
@@ -5,6 +5,10 @@
from .base import * # noqa
# Extra installed apps
+INSTALLED_APPS = (
+ 'grappelli',
+) + INSTALLED_APPS
+
INSTALLED_APPS += (
# 'raven.contrib.django', # enable Raven plugin
'crispy_forms',
diff --git a/django_project/core/urls.py b/django_project/core/urls.py
--- a/django_project/core/urls.py
+++ b/django_project/core/urls.py
@@ -29,6 +29,7 @@
]
urlpatterns += i18n_patterns(
+ url(r'^grappelli/', include('grappelli.urls')),
url(r'^site-admin/', include(admin.site.urls)),
url(r'^', include('base.urls')),
url(r'^', include('changes.urls')),
| {"golden_diff": "diff --git a/django_project/core/settings/base.py b/django_project/core/settings/base.py\n--- a/django_project/core/settings/base.py\n+++ b/django_project/core/settings/base.py\n@@ -108,6 +108,7 @@\n )\n \n ROOT_URLCONF = 'core.urls'\n+GRAPPELLI_ADMIN_TITLE = ''\n \n # Python dotted path to the WSGI application used by Django's runserver.\n WSGI_APPLICATION = 'core.wsgi.application'\ndiff --git a/django_project/core/settings/contrib.py b/django_project/core/settings/contrib.py\n--- a/django_project/core/settings/contrib.py\n+++ b/django_project/core/settings/contrib.py\n@@ -5,6 +5,10 @@\n from .base import * # noqa\n \n # Extra installed apps\n+INSTALLED_APPS = (\n+ 'grappelli',\n+) + INSTALLED_APPS\n+\n INSTALLED_APPS += (\n # 'raven.contrib.django', # enable Raven plugin\n 'crispy_forms',\ndiff --git a/django_project/core/urls.py b/django_project/core/urls.py\n--- a/django_project/core/urls.py\n+++ b/django_project/core/urls.py\n@@ -29,6 +29,7 @@\n ]\n \n urlpatterns += i18n_patterns(\n+ url(r'^grappelli/', include('grappelli.urls')),\n url(r'^site-admin/', include(admin.site.urls)),\n url(r'^', include('base.urls')),\n url(r'^', include('changes.urls')),\n", "issue": "Permalink to a feature is not working.\nThe permalink to a feature does not work. For instance `http://changelog.linfiniti.com/qgis/version/2.10/#275` does not scroll to the anchor.\n\nMoreover,if we click on the anchor, the webbrowser doesn't keep the link.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Project level url handler.\"\"\"\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nadmin.autodiscover()\n\nhandler404 = 'base.views.error_views.custom_404'\n\nurlpatterns = [\n # '',\n # # Enable the admin (use non standard name for obscurity)\n # url(r'^site-admin/', include(admin.site.urls)),\n # url(r'^', include('base.urls')),\n # url(r'^', include('changes.urls')),\n # url(r'^', include('vota.urls')),\n # url(r'^', include('github_issue.urls')),\n #\n # # This over-ride is required to fix 500 errors as per:\n # # https://github.com/bread-and-pepper/django-userena/issues/380\n # url(r'^password/reset/done/$',\n # auth_views.password_reset_done,\n # {'template_name': 'userena/password_reset_done.html'},\n # name='password_reset_done'),\n # url(r'^accounts/', include('userena.urls')),\n]\n\nurlpatterns += i18n_patterns(\n url(r'^site-admin/', include(admin.site.urls)),\n url(r'^', include('base.urls')),\n url(r'^', include('changes.urls')),\n url(r'^', include('vota.urls')),\n url(r'^', include('github_issue.urls')),\n url(r'^password/reset/done/$',\n auth_views.password_reset_done,\n {'template_name': 'userena/password_reset_done.html'},\n name='password_reset_done'),\n url(r'^accounts/', include('userena.urls')),\n)\n\nif 'rosetta' in settings.INSTALLED_APPS:\n urlpatterns += patterns(\n '',\n url(r'^rosetta/', include('rosetta.urls')),\n )\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "django_project/core/urls.py"}, {"content": "# coding=utf-8\n\"\"\"\ncore.settings.base\n\"\"\"\n# Django settings for projecta project.\n\nfrom .utils import absolute_path\n\nADMINS = (\n ('Tim Sutton', '[email protected]'),\n ('Rischan Mafrur', '[email protected]')\n)\nSERVER_EMAIL = '[email protected]'\nEMAIL_HOST = 'localhost'\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nMANAGERS = ADMINS\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Africa/Johannesburg'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = '/home/web/media'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\n# MEDIA_URL = '/media/'\n# setting full MEDIA_URL to be able to use it for the feeds\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = '/home/web/static'\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n absolute_path('core', 'base_static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# import SECRET_KEY into current namespace\n# noinspection PyUnresolvedReferences\nfrom .secret import SECRET_KEY # noqa\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n 'core.context_processors.add_intercom_app_id',\n 'django.template.context_processors.i18n'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.custom_middleware.NavContextMiddleware',\n 'django.middleware.locale.LocaleMiddleware'\n)\n\nROOT_URLCONF = 'core.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'core.wsgi.application'\n\nTEMPLATE_DIRS = (\n # project level templates\n absolute_path('core', 'base_templates'),\n absolute_path('vota', 'templates'),\n absolute_path('changes', 'templates'),\n\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.syndication',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n", "path": "django_project/core/settings/base.py"}, {"content": "# coding=utf-8\n\"\"\"\ncore.settings.contrib\n\"\"\"\nfrom .base import * # noqa\n\n# Extra installed apps\nINSTALLED_APPS += (\n # 'raven.contrib.django', # enable Raven plugin\n 'crispy_forms',\n 'widget_tweaks', # lets us add some bootstrap css to form elements\n 'accounts', # userena\n 'guardian', # for userena\n 'easy_thumbnails', # also needed for userena\n 'userena',\n 'raven.contrib.django',\n 'reversion',\n 'rosetta',\n 'embed_video',\n # 'user_map',\n)\n\nSTOP_WORDS = (\n 'a', 'an', 'and', 'if', 'is', 'the', 'in', 'i', 'you', 'other',\n 'this', 'that'\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# Added for userena\nAUTHENTICATION_BACKENDS = (\n 'userena.backends.UserenaAuthenticationBackend',\n 'guardian.backends.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\nANONYMOUS_USER_ID = -1\nAUTH_PROFILE_MODULE = 'accounts.Profile'\nLOGIN_REDIRECT_URL = '/accounts/%(username)s/'\nLOGIN_URL = '/accounts/signin/'\nLOGOUT_URL = '/accounts/signout/'\n\n# Easy-thumbnails options\nTHUMBNAIL_SUBDIR = 'thumbnails'\nTHUMBNAIL_ALIASES = {\n '': {\n 'entry': {'size': (50, 50), 'crop': True},\n 'medium-entry': {'size': (100, 100), 'crop': True},\n 'large-entry': {'size': (400, 300), 'crop': True},\n 'thumb300x200': {'size': (300, 200), 'crop': True},\n },\n}\n\n# Pipeline related settings\n\n\nINSTALLED_APPS += (\n 'pipeline',)\n\nDEFAULT_FILE_STORAGE = (\n 'django_hashedfilenamestorage.storage.HashedFilenameFileSystemStorage')\n\n# use underscore template function\nPIPELINE_TEMPLATE_FUNC = '_.template'\n\n# enable cached storage - requires uglify.js (node.js)\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\n\nPIPELINE_JS = {\n 'contrib': {\n 'source_filenames': (\n 'js/jquery-1.10.1.min.js',\n 'js/csrf-ajax.js',\n 'js/underscore-min.js',\n 'js/bootstrap.min.js',\n 'js/changelog.js',\n 'js/github-issue.js',\n ),\n 'output_filename': 'js/contrib.js',\n }\n}\n\nPIPELINE_CSS = {\n 'contrib': {\n 'source_filenames': (\n 'css/bootstrap.min.css',\n 'css/bootstrap-theme.min.css',\n 'css/changelog.css',\n ),\n 'output_filename': 'css/contrib.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n }\n}\n\n# These get enabled in prod.py\nPIPELINE_ENABLED = False\nPIPELINE_CSS_COMPRESSOR = None\nPIPELINE_JS_COMPRESSOR = None\n", "path": "django_project/core/settings/contrib.py"}]} | 3,647 | 328 |
gh_patches_debug_20771 | rasdani/github-patches | git_diff | cupy__cupy-7068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cupy.apply_along_axis failed with cupy.nonzero
### Description
cp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]])) - failed with error
*** AttributeError: 'tuple' object has no attribute 'shape'
np.apply_along_axis(np.nonzero, 1, np.array([[1,2],[2,3]])) - is OK
UPDATE. Problem in _shape_base.py.
line 53:
buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)
res - is tuple (as a result of cp.nonzero(1d array) of single cupy-array, so line 44 ( if cupy.isscalar(res):) doesnt convert it from tuple to cupy-array
as a temporal solution is possible to use "buffer-like" function
def cupy_nonzero (a):
return cp.nonzero(a)[0]
### To Reproduce
```py
cp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]]))
```
### Installation
_No response_
### Environment
```
# Paste the output here
```
### Additional Information
_No response_
</issue>
<code>
[start of cupy/lib/_shape_base.py]
1 from numpy.lib import index_tricks
2
3 import cupy
4 from cupy._core import internal
5
6
7 def apply_along_axis(func1d, axis, arr, *args, **kwargs):
8 """Apply a function to 1-D slices along the given axis.
9
10 Args:
11 func1d (function (M,) -> (Nj...)): This function should accept 1-D
12 arrays. It is applied to 1-D slices of ``arr`` along the specified
13 axis. It must return a 1-D ``cupy.ndarray``.
14 axis (integer): Axis along which ``arr`` is sliced.
15 arr (cupy.ndarray (Ni..., M, Nk...)): Input array.
16 args: Additional arguments for ``func1d``.
17 kwargs: Additional keyword arguments for ``func1d``.
18
19 Returns:
20 cupy.ndarray: The output array. The shape of ``out`` is identical to
21 the shape of ``arr``, except along the ``axis`` dimension. This
22 axis is removed, and replaced with new dimensions equal to the
23 shape of the return value of ``func1d``. So if ``func1d`` returns a
24 scalar ``out`` will have one fewer dimensions than ``arr``.
25
26 .. seealso:: :func:`numpy.apply_along_axis`
27 """
28 ndim = arr.ndim
29 axis = internal._normalize_axis_index(axis, ndim)
30 inarr_view = cupy.moveaxis(arr, axis, -1)
31
32 # compute indices for the iteration axes, and append a trailing ellipsis to
33 # prevent 0d arrays decaying to scalars
34 inds = index_tricks.ndindex(inarr_view.shape[:-1])
35 inds = (ind + (Ellipsis,) for ind in inds)
36
37 # invoke the function on the first item
38 try:
39 ind0 = next(inds)
40 except StopIteration:
41 raise ValueError(
42 'Cannot apply_along_axis when any iteration dimensions are 0'
43 )
44 res = func1d(inarr_view[ind0], *args, **kwargs)
45 if cupy.isscalar(res):
46 # scalar outputs need to be transfered to a device ndarray
47 res = cupy.asarray(res)
48
49 # build a buffer for storing evaluations of func1d.
50 # remove the requested axis, and add the new ones on the end.
51 # laid out so that each write is contiguous.
52 # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
53 buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)
54
55 # save the first result, then compute and save all remaining results
56 buff[ind0] = res
57 for ind in inds:
58 buff[ind] = func1d(inarr_view[ind], *args, **kwargs)
59
60 # restore the inserted axes back to where they belong
61 for i in range(res.ndim):
62 buff = cupy.moveaxis(buff, -1, axis)
63
64 return buff
65
[end of cupy/lib/_shape_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/lib/_shape_base.py b/cupy/lib/_shape_base.py
--- a/cupy/lib/_shape_base.py
+++ b/cupy/lib/_shape_base.py
@@ -42,9 +42,7 @@
'Cannot apply_along_axis when any iteration dimensions are 0'
)
res = func1d(inarr_view[ind0], *args, **kwargs)
- if cupy.isscalar(res):
- # scalar outputs need to be transfered to a device ndarray
- res = cupy.asarray(res)
+ res = cupy.asarray(res)
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
@@ -55,7 +53,8 @@
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
- buff[ind] = func1d(inarr_view[ind], *args, **kwargs)
+ out = func1d(inarr_view[ind], *args, **kwargs)
+ buff[ind] = cupy.asarray(out)
# restore the inserted axes back to where they belong
for i in range(res.ndim):
| {"golden_diff": "diff --git a/cupy/lib/_shape_base.py b/cupy/lib/_shape_base.py\n--- a/cupy/lib/_shape_base.py\n+++ b/cupy/lib/_shape_base.py\n@@ -42,9 +42,7 @@\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n- if cupy.isscalar(res):\n- # scalar outputs need to be transfered to a device ndarray\n- res = cupy.asarray(res)\n+ res = cupy.asarray(res)\n \n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n@@ -55,7 +53,8 @@\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n- buff[ind] = func1d(inarr_view[ind], *args, **kwargs)\n+ out = func1d(inarr_view[ind], *args, **kwargs)\n+ buff[ind] = cupy.asarray(out)\n \n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n", "issue": "cupy.apply_along_axis failed with cupy.nonzero\n### Description\r\n\r\ncp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]])) - failed with error\r\n\r\n*** AttributeError: 'tuple' object has no attribute 'shape'\r\n\r\nnp.apply_along_axis(np.nonzero, 1, np.array([[1,2],[2,3]])) - is OK\r\n\r\nUPDATE. Problem in _shape_base.py. \r\nline 53:\r\nbuff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\r\n\r\nres - is tuple (as a result of cp.nonzero(1d array) of single cupy-array, so line 44 ( if cupy.isscalar(res):) doesnt convert it from tuple to cupy-array\r\n\r\nas a temporal solution is possible to use \"buffer-like\" function\r\ndef cupy_nonzero (a):\r\n return cp.nonzero(a)[0]\r\n\r\n### To Reproduce\r\n\r\n```py\r\ncp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]]))\r\n```\r\n\r\n\r\n### Installation\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```\r\n# Paste the output here\r\n```\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\n", "before_files": [{"content": "from numpy.lib import index_tricks\n\nimport cupy\nfrom cupy._core import internal\n\n\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"Apply a function to 1-D slices along the given axis.\n\n Args:\n func1d (function (M,) -> (Nj...)): This function should accept 1-D\n arrays. It is applied to 1-D slices of ``arr`` along the specified\n axis. It must return a 1-D ``cupy.ndarray``.\n axis (integer): Axis along which ``arr`` is sliced.\n arr (cupy.ndarray (Ni..., M, Nk...)): Input array.\n args: Additional arguments for ``func1d``.\n kwargs: Additional keyword arguments for ``func1d``.\n\n Returns:\n cupy.ndarray: The output array. The shape of ``out`` is identical to\n the shape of ``arr``, except along the ``axis`` dimension. This\n axis is removed, and replaced with new dimensions equal to the\n shape of the return value of ``func1d``. So if ``func1d`` returns a\n scalar ``out`` will have one fewer dimensions than ``arr``.\n\n .. seealso:: :func:`numpy.apply_along_axis`\n \"\"\"\n ndim = arr.ndim\n axis = internal._normalize_axis_index(axis, ndim)\n inarr_view = cupy.moveaxis(arr, axis, -1)\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars\n inds = index_tricks.ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError(\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n if cupy.isscalar(res):\n # scalar outputs need to be transfered to a device ndarray\n res = cupy.asarray(res)\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n buff[ind] = func1d(inarr_view[ind], *args, **kwargs)\n\n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n buff = cupy.moveaxis(buff, -1, axis)\n\n return buff\n", "path": "cupy/lib/_shape_base.py"}]} | 1,570 | 271 |
gh_patches_debug_27156 | rasdani/github-patches | git_diff | falconry__falcon-364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not all modules are cythonized
Missing some modules, such as hooks and those in the util package.
</issue>
<code>
[start of setup.py]
1 import imp
2 import io
3 import sys
4 from os import path
5 from setuptools import setup, find_packages, Extension
6
7 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
8 VERSION = VERSION.__version__
9
10 # NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3
11 # TODO(kgriffs): Fork and optimize/modernize python-mimeparse
12 REQUIRES = ['six', 'python-mimeparse']
13
14 PYPY = True
15 CYTHON = False
16 try:
17 sys.pypy_version_info
18 except AttributeError:
19 PYPY = False
20
21 if not PYPY:
22 try:
23 from Cython.Distutils import build_ext
24 CYTHON = True
25 except ImportError:
26 print('\nWARNING: Cython not installed. '
27 'Falcon will still work fine, but may run '
28 'a bit slower.\n')
29 CYTHON = False
30
31 if CYTHON:
32 ext_names = (
33 'api',
34 'api_helpers',
35 'errors',
36 'http_error',
37 'request',
38 'request_helpers',
39 'responders',
40 'response',
41 'response_helpers',
42 )
43
44 cmdclass = {'build_ext': build_ext}
45 ext_modules = [
46 Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
47 for ext in ext_names]
48 else:
49 cmdclass = {}
50 ext_modules = []
51
52 setup(
53 name='falcon',
54 version=VERSION,
55 description='An unladen web framework for building APIs and app backends.',
56 long_description=io.open('README.rst', 'r', encoding='utf-8').read(),
57 classifiers=[
58 'Development Status :: 5 - Production/Stable',
59 'Environment :: Web Environment',
60 'Natural Language :: English',
61 'Intended Audience :: Developers',
62 'Intended Audience :: System Administrators',
63 'License :: OSI Approved :: Apache Software License',
64 'Operating System :: MacOS :: MacOS X',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: POSIX',
67 'Topic :: Internet :: WWW/HTTP :: WSGI',
68 'Topic :: Software Development :: Libraries :: Application Frameworks',
69 'Programming Language :: Python',
70 'Programming Language :: Python :: Implementation :: CPython',
71 'Programming Language :: Python :: Implementation :: PyPy',
72 'Programming Language :: Python :: 2.6',
73 'Programming Language :: Python :: 2.7',
74 'Programming Language :: Python :: 3.3',
75 'Programming Language :: Python :: 3.4',
76 ],
77 keywords='wsgi web api framework rest http cloud',
78 author='Kurt Griffiths',
79 author_email='[email protected]',
80 url='http://falconframework.org',
81 license='Apache 2.0',
82 packages=find_packages(exclude=['tests']),
83 include_package_data=True,
84 zip_safe=False,
85 install_requires=REQUIRES,
86 setup_requires=[],
87 cmdclass=cmdclass,
88 ext_modules=ext_modules,
89 test_suite='nose.collector',
90 entry_points={
91 'console_scripts': [
92 'falcon-bench = falcon.cmd.bench:main'
93 ]
94 }
95 )
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,12 @@
+import glob
import imp
import io
-import sys
+import os
from os import path
from setuptools import setup, find_packages, Extension
+import sys
+
+MYDIR = path.abspath(os.path.dirname(__file__))
VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
VERSION = VERSION.__version__
@@ -29,22 +33,29 @@
CYTHON = False
if CYTHON:
- ext_names = (
- 'api',
- 'api_helpers',
- 'errors',
- 'http_error',
- 'request',
- 'request_helpers',
- 'responders',
- 'response',
- 'response_helpers',
- )
+ def list_modules(dirname):
+ filenames = glob.glob(path.join(dirname, '*.py'))
+
+ module_names = []
+ for name in filenames:
+ module, ext = path.splitext(path.basename(name))
+ if module != '__init__':
+ module_names.append(module)
+
+ return module_names
- cmdclass = {'build_ext': build_ext}
ext_modules = [
Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
- for ext in ext_names]
+ for ext in list_modules(path.join(MYDIR, 'falcon'))]
+
+ ext_modules += [
+ Extension('falcon.util.' + ext,
+ [path.join('falcon', 'util', ext + '.py')])
+
+ for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
+
+ cmdclass = {'build_ext': build_ext}
+
else:
cmdclass = {}
ext_modules = []
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,12 @@\n+import glob\n import imp\n import io\n-import sys\n+import os\n from os import path\n from setuptools import setup, find_packages, Extension\n+import sys\n+\n+MYDIR = path.abspath(os.path.dirname(__file__))\n \n VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\n VERSION = VERSION.__version__\n@@ -29,22 +33,29 @@\n CYTHON = False\n \n if CYTHON:\n- ext_names = (\n- 'api',\n- 'api_helpers',\n- 'errors',\n- 'http_error',\n- 'request',\n- 'request_helpers',\n- 'responders',\n- 'response',\n- 'response_helpers',\n- )\n+ def list_modules(dirname):\n+ filenames = glob.glob(path.join(dirname, '*.py'))\n+\n+ module_names = []\n+ for name in filenames:\n+ module, ext = path.splitext(path.basename(name))\n+ if module != '__init__':\n+ module_names.append(module)\n+\n+ return module_names\n \n- cmdclass = {'build_ext': build_ext}\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n- for ext in ext_names]\n+ for ext in list_modules(path.join(MYDIR, 'falcon'))]\n+\n+ ext_modules += [\n+ Extension('falcon.util.' + ext,\n+ [path.join('falcon', 'util', ext + '.py')])\n+\n+ for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n+\n+ cmdclass = {'build_ext': build_ext}\n+\n else:\n cmdclass = {}\n ext_modules = []\n", "issue": "Not all modules are cythonized\nMissing some modules, such as hooks and those in the util package.\n\n", "before_files": [{"content": "import imp\nimport io\nimport sys\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six', 'python-mimeparse']\n\nPYPY = True\nCYTHON = False\ntry:\n sys.pypy_version_info\nexcept AttributeError:\n PYPY = False\n\nif not PYPY:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n print('\\nWARNING: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n ext_names = (\n 'api',\n 'api_helpers',\n 'errors',\n 'http_error',\n 'request',\n 'request_helpers',\n 'responders',\n 'response',\n 'response_helpers',\n )\n\n cmdclass = {'build_ext': build_ext}\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n for ext in ext_names]\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}]} | 1,424 | 404 |
gh_patches_debug_26745 | rasdani/github-patches | git_diff | dask__distributed-900 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename local_client to worker_client?
The term "local", might be confusing. Perhaps `worker_client` is more clear? Or are there better names?
</issue>
<code>
[start of distributed/__init__.py]
1 from __future__ import print_function, division, absolute_import
2
3 from .config import config
4 from .core import connect, rpc
5 from .deploy import LocalCluster
6 from .diagnostics import progress
7 from .client import (Client, Executor, CompatibleExecutor, wait, as_completed,
8 default_client)
9 from .nanny import Nanny
10 from .scheduler import Scheduler
11 from .utils import sync
12 from .worker import Worker
13 from .worker_client import local_client
14
15 try:
16 from .collections import futures_to_collection
17 except:
18 pass
19
20 from ._version import get_versions
21 versions = get_versions()
22 __version__ = versions['version']
23 __git_revision__ = versions['full-revisionid']
24 del get_versions, versions
25
[end of distributed/__init__.py]
[start of distributed/worker_client.py]
1 from __future__ import print_function, division, absolute_import
2
3 from contextlib import contextmanager
4 from time import sleep
5 from tornado import gen
6 from toolz import keymap, valmap, merge, assoc
7 import uuid
8
9 from dask.base import tokenize
10 from tornado import gen
11
12 from .client import AllExit, Client, Future, pack_data, unpack_remotedata
13 from dask.compatibility import apply
14 from .sizeof import sizeof
15 from .threadpoolexecutor import secede
16 from .utils import All, log_errors, sync, tokey, ignoring
17 from .worker import thread_state
18
19
20 @contextmanager
21 def local_client(timeout=3):
22 """ Get client for this thread
23
24 Note: This interface is new and experimental. It may change without
25 notice.
26
27 This context manager is intended to be called within functions that we run
28 on workers. When run as a context manager it delivers a client
29 ``Client`` object that can submit other tasks directly from that worker.
30
31 Examples
32 --------
33
34 >>> def func(x):
35 ... with local_client() as e: # connect from worker back to scheduler
36 ... a = e.submit(inc, x) # this task can submit more tasks
37 ... b = e.submit(dec, x)
38 ... result = e.gather([a, b]) # and gather results
39 ... return result
40
41 >>> future = e.submit(func, 1) # submit func(1) on cluster
42 """
43 address = thread_state.execution_state['scheduler']
44 worker = thread_state.execution_state['worker']
45 secede() # have this thread secede from the thread pool
46 # so that it doesn't take up a fixed resource while waiting
47 worker.loop.add_callback(worker.transition, thread_state.key, 'long-running')
48
49 with WorkerClient(address, loop=worker.loop) as wc:
50 # Make sure connection errors are bubbled to the caller
51 sync(wc.loop, wc._start, timeout=timeout)
52 assert wc.status == 'running'
53 yield wc
54
55
56 def get_worker():
57 return thread_state.execution_state['worker']
58
59
60 class WorkerClient(Client):
61 """ An Client designed to operate from a Worker process
62
63 This client has had a few methods altered to make it more efficient for
64 working directly from the worker nodes. In particular scatter/gather first
65 look to the local data dictionary rather than sending data over the network
66 """
67 def __init__(self, *args, **kwargs):
68 loop = kwargs.get('loop')
69 self.worker = get_worker()
70 sync(loop, apply, Client.__init__, (self,) + args, assoc(kwargs, 'start', False))
71
72 @gen.coroutine
73 def _scatter(self, data, workers=None, broadcast=False):
74 """ Scatter data to local data dictionary
75
76 Rather than send data out to the cluster we keep data local. However
77 we do report to the scheduler that the local worker has the scattered
78 data. This allows other workers to come by and steal this data if
79 desired.
80
81 Keywords like ``broadcast=`` do not work, however operations like
82 ``.replicate`` work fine after calling scatter, which can fill in for
83 this functionality.
84 """
85 with log_errors():
86 if not (workers is None and broadcast is False):
87 raise NotImplementedError("Scatter from worker doesn't support workers or broadcast keywords")
88
89 if isinstance(data, dict) and not all(isinstance(k, (bytes, str))
90 for k in data):
91 d = yield self._scatter(keymap(tokey, data), workers, broadcast)
92 raise gen.Return({k: d[tokey(k)] for k in data})
93
94 if isinstance(data, (list, tuple, set, frozenset)):
95 keys = []
96 for x in data:
97 try:
98 keys.append(tokenize(x))
99 except:
100 keys.append(str(uuid.uuid1()))
101 data2 = dict(zip(keys, data))
102 elif isinstance(data, dict):
103 keys = set(data)
104 data2 = data
105 else:
106 raise TypeError("Don't know how to scatter %s" % type(data))
107
108 self.worker.update_data(data=data2, report=False)
109
110 yield self.scheduler.update_data(
111 who_has={key: [self.worker.address] for key in data2},
112 nbytes=valmap(sizeof, data2),
113 client=self.id)
114
115 if isinstance(data, dict):
116 out = {k: Future(k, self) for k in data}
117 elif isinstance(data, (tuple, list, set, frozenset)):
118 out = type(data)([Future(k, self) for k in keys])
119 else:
120 raise TypeError(
121 "Input to scatter must be a list or dict")
122
123 for key in keys:
124 self.futures[key].finish(type=None)
125
126 raise gen.Return(out)
127
128 @gen.coroutine
129 def _gather(self, futures, errors='raise'):
130 """
131
132 Exactly like Client._gather, but get data directly from the local
133 worker data dictionary directly rather than through the scheduler.
134
135 TODO: avoid scheduler for other communications, and assume that we can
136 communicate directly with the other workers.
137 """
138 futures2, keys = unpack_remotedata(futures, byte_keys=True)
139 keys = [tokey(k) for k in keys]
140
141 @gen.coroutine
142 def wait(k):
143 """ Want to stop the All(...) early if we find an error """
144 yield self.futures[k].event.wait()
145 if self.futures[k].status != 'finished':
146 raise AllExit()
147
148 with ignoring(AllExit):
149 yield All([wait(key) for key in keys if key in self.futures])
150
151 local = {k: self.worker.data[k] for k in keys
152 if k in self.worker.data}
153
154 futures3 = {k: Future(k, self) for k in keys if k not in local}
155
156 futures4 = pack_data(futures2, merge(local, futures3))
157 if not futures3:
158 raise gen.Return(futures4)
159
160 result = yield Client._gather(self, futures4, errors=errors)
161 raise gen.Return(result)
162
[end of distributed/worker_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/__init__.py b/distributed/__init__.py
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -10,7 +10,7 @@
from .scheduler import Scheduler
from .utils import sync
from .worker import Worker
-from .worker_client import local_client
+from .worker_client import local_client, worker_client
try:
from .collections import futures_to_collection
diff --git a/distributed/worker_client.py b/distributed/worker_client.py
--- a/distributed/worker_client.py
+++ b/distributed/worker_client.py
@@ -18,7 +18,7 @@
@contextmanager
-def local_client(timeout=3):
+def worker_client(timeout=3):
""" Get client for this thread
Note: This interface is new and experimental. It may change without
@@ -32,7 +32,7 @@
--------
>>> def func(x):
- ... with local_client() as e: # connect from worker back to scheduler
+ ... with worker_client() as e: # connect from worker back to scheduler
... a = e.submit(inc, x) # this task can submit more tasks
... b = e.submit(dec, x)
... result = e.gather([a, b]) # and gather results
@@ -53,6 +53,8 @@
yield wc
+local_client = worker_client
+
def get_worker():
return thread_state.execution_state['worker']
| {"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -10,7 +10,7 @@\n from .scheduler import Scheduler\n from .utils import sync\n from .worker import Worker\n-from .worker_client import local_client\n+from .worker_client import local_client, worker_client\n \n try:\n from .collections import futures_to_collection\ndiff --git a/distributed/worker_client.py b/distributed/worker_client.py\n--- a/distributed/worker_client.py\n+++ b/distributed/worker_client.py\n@@ -18,7 +18,7 @@\n \n \n @contextmanager\n-def local_client(timeout=3):\n+def worker_client(timeout=3):\n \"\"\" Get client for this thread\n \n Note: This interface is new and experimental. It may change without\n@@ -32,7 +32,7 @@\n --------\n \n >>> def func(x):\n- ... with local_client() as e: # connect from worker back to scheduler\n+ ... with worker_client() as e: # connect from worker back to scheduler\n ... a = e.submit(inc, x) # this task can submit more tasks\n ... b = e.submit(dec, x)\n ... result = e.gather([a, b]) # and gather results\n@@ -53,6 +53,8 @@\n yield wc\n \n \n+local_client = worker_client\n+\n def get_worker():\n return thread_state.execution_state['worker']\n", "issue": "Rename local_client to worker_client?\nThe term \"local\", might be confusing. Perhaps `worker_client` is more clear? Or are there better names?\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom .config import config\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster\nfrom .diagnostics import progress\nfrom .client import (Client, Executor, CompatibleExecutor, wait, as_completed,\n default_client)\nfrom .nanny import Nanny\nfrom .scheduler import Scheduler\nfrom .utils import sync\nfrom .worker import Worker\nfrom .worker_client import local_client\n\ntry:\n from .collections import futures_to_collection\nexcept:\n pass\n\nfrom ._version import get_versions\nversions = get_versions()\n__version__ = versions['version']\n__git_revision__ = versions['full-revisionid']\ndel get_versions, versions\n", "path": "distributed/__init__.py"}, {"content": "from __future__ import print_function, division, absolute_import\n\nfrom contextlib import contextmanager\nfrom time import sleep\nfrom tornado import gen\nfrom toolz import keymap, valmap, merge, assoc\nimport uuid\n\nfrom dask.base import tokenize\nfrom tornado import gen\n\nfrom .client import AllExit, Client, Future, pack_data, unpack_remotedata\nfrom dask.compatibility import apply\nfrom .sizeof import sizeof\nfrom .threadpoolexecutor import secede\nfrom .utils import All, log_errors, sync, tokey, ignoring\nfrom .worker import thread_state\n\n\n@contextmanager\ndef local_client(timeout=3):\n \"\"\" Get client for this thread\n\n Note: This interface is new and experimental. It may change without\n notice.\n\n This context manager is intended to be called within functions that we run\n on workers. When run as a context manager it delivers a client\n ``Client`` object that can submit other tasks directly from that worker.\n\n Examples\n --------\n\n >>> def func(x):\n ... with local_client() as e: # connect from worker back to scheduler\n ... a = e.submit(inc, x) # this task can submit more tasks\n ... b = e.submit(dec, x)\n ... result = e.gather([a, b]) # and gather results\n ... return result\n\n >>> future = e.submit(func, 1) # submit func(1) on cluster\n \"\"\"\n address = thread_state.execution_state['scheduler']\n worker = thread_state.execution_state['worker']\n secede() # have this thread secede from the thread pool\n # so that it doesn't take up a fixed resource while waiting\n worker.loop.add_callback(worker.transition, thread_state.key, 'long-running')\n\n with WorkerClient(address, loop=worker.loop) as wc:\n # Make sure connection errors are bubbled to the caller\n sync(wc.loop, wc._start, timeout=timeout)\n assert wc.status == 'running'\n yield wc\n\n\ndef get_worker():\n return thread_state.execution_state['worker']\n\n\nclass WorkerClient(Client):\n \"\"\" An Client designed to operate from a Worker process\n\n This client has had a few methods altered to make it more efficient for\n working directly from the worker nodes. In particular scatter/gather first\n look to the local data dictionary rather than sending data over the network\n \"\"\"\n def __init__(self, *args, **kwargs):\n loop = kwargs.get('loop')\n self.worker = get_worker()\n sync(loop, apply, Client.__init__, (self,) + args, assoc(kwargs, 'start', False))\n\n @gen.coroutine\n def _scatter(self, data, workers=None, broadcast=False):\n \"\"\" Scatter data to local data dictionary\n\n Rather than send data out to the cluster we keep data local. However\n we do report to the scheduler that the local worker has the scattered\n data. This allows other workers to come by and steal this data if\n desired.\n\n Keywords like ``broadcast=`` do not work, however operations like\n ``.replicate`` work fine after calling scatter, which can fill in for\n this functionality.\n \"\"\"\n with log_errors():\n if not (workers is None and broadcast is False):\n raise NotImplementedError(\"Scatter from worker doesn't support workers or broadcast keywords\")\n\n if isinstance(data, dict) and not all(isinstance(k, (bytes, str))\n for k in data):\n d = yield self._scatter(keymap(tokey, data), workers, broadcast)\n raise gen.Return({k: d[tokey(k)] for k in data})\n\n if isinstance(data, (list, tuple, set, frozenset)):\n keys = []\n for x in data:\n try:\n keys.append(tokenize(x))\n except:\n keys.append(str(uuid.uuid1()))\n data2 = dict(zip(keys, data))\n elif isinstance(data, dict):\n keys = set(data)\n data2 = data\n else:\n raise TypeError(\"Don't know how to scatter %s\" % type(data))\n\n self.worker.update_data(data=data2, report=False)\n\n yield self.scheduler.update_data(\n who_has={key: [self.worker.address] for key in data2},\n nbytes=valmap(sizeof, data2),\n client=self.id)\n\n if isinstance(data, dict):\n out = {k: Future(k, self) for k in data}\n elif isinstance(data, (tuple, list, set, frozenset)):\n out = type(data)([Future(k, self) for k in keys])\n else:\n raise TypeError(\n \"Input to scatter must be a list or dict\")\n\n for key in keys:\n self.futures[key].finish(type=None)\n\n raise gen.Return(out)\n\n @gen.coroutine\n def _gather(self, futures, errors='raise'):\n \"\"\"\n\n Exactly like Client._gather, but get data directly from the local\n worker data dictionary directly rather than through the scheduler.\n\n TODO: avoid scheduler for other communications, and assume that we can\n communicate directly with the other workers.\n \"\"\"\n futures2, keys = unpack_remotedata(futures, byte_keys=True)\n keys = [tokey(k) for k in keys]\n\n @gen.coroutine\n def wait(k):\n \"\"\" Want to stop the All(...) early if we find an error \"\"\"\n yield self.futures[k].event.wait()\n if self.futures[k].status != 'finished':\n raise AllExit()\n\n with ignoring(AllExit):\n yield All([wait(key) for key in keys if key in self.futures])\n\n local = {k: self.worker.data[k] for k in keys\n if k in self.worker.data}\n\n futures3 = {k: Future(k, self) for k in keys if k not in local}\n\n futures4 = pack_data(futures2, merge(local, futures3))\n if not futures3:\n raise gen.Return(futures4)\n\n result = yield Client._gather(self, futures4, errors=errors)\n raise gen.Return(result)\n", "path": "distributed/worker_client.py"}]} | 2,495 | 339 |
gh_patches_debug_34750 | rasdani/github-patches | git_diff | plotly__dash-2859 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Patch += operation not working on the patch object.
If you have a patch object for a number prop and try to do `patched += 1`, the callback will fail with an error, `returned non serializable object`.
Example:
```
app = Dash()
app.layout = [dcc.Store(data=0, store="store"), html.Button("click", id="click")]
@app.callback(Output("store", "data"), Input("click", "n_clicks"))
def on_click(_):
patched = Patch()
patched += 1
return patched
```
Clicking on the button result in the error, it should instead update the store data prop to +1.
</issue>
<code>
[start of dash/_patch.py]
1 def _operation(name, location, **kwargs):
2 return {"operation": name, "location": location, "params": dict(**kwargs)}
3
4
5 _noop = object()
6
7
8 def validate_slice(obj):
9 if isinstance(obj, slice):
10 raise TypeError("a slice is not a valid index for patch")
11
12
13 class Patch:
14 """
15 Patch a callback output value
16
17 Act like a proxy of the output prop value on the frontend.
18
19 Supported prop types: Dictionaries and lists.
20 """
21
22 def __init__(self, location=None, parent=None):
23 if location is not None:
24 self._location = location
25 else:
26 # pylint: disable=consider-using-ternary
27 self._location = (parent and parent._location) or []
28 if parent is not None:
29 self._operations = parent._operations
30 else:
31 self._operations = []
32
33 def __getstate__(self):
34 return vars(self)
35
36 def __setstate__(self, state):
37 vars(self).update(state)
38
39 def __getitem__(self, item):
40 validate_slice(item)
41 return Patch(location=self._location + [item], parent=self)
42
43 def __getattr__(self, item):
44 if item == "tolist":
45 # to_json fix
46 raise AttributeError
47 if item == "_location":
48 return self._location
49 if item == "_operations":
50 return self._operations
51 return self.__getitem__(item)
52
53 def __setattr__(self, key, value):
54 if key in ("_location", "_operations"):
55 self.__dict__[key] = value
56 else:
57 self.__setitem__(key, value)
58
59 def __delattr__(self, item):
60 self.__delitem__(item)
61
62 def __setitem__(self, key, value):
63 validate_slice(key)
64 if value is _noop:
65 # The += set themselves.
66 return
67 self._operations.append(
68 _operation(
69 "Assign",
70 self._location + [key],
71 value=value,
72 )
73 )
74
75 def __delitem__(self, key):
76 validate_slice(key)
77 self._operations.append(_operation("Delete", self._location + [key]))
78
79 def __iadd__(self, other):
80 if isinstance(other, (list, tuple)):
81 self.extend(other)
82 else:
83 self._operations.append(_operation("Add", self._location, value=other))
84 return _noop
85
86 def __isub__(self, other):
87 self._operations.append(_operation("Sub", self._location, value=other))
88 return _noop
89
90 def __imul__(self, other):
91 self._operations.append(_operation("Mul", self._location, value=other))
92 return _noop
93
94 def __itruediv__(self, other):
95 self._operations.append(_operation("Div", self._location, value=other))
96 return _noop
97
98 def __ior__(self, other):
99 self.update(E=other)
100 return _noop
101
102 def __iter__(self):
103 raise TypeError("Patch objects are write-only, you cannot iterate them.")
104
105 def __repr__(self):
106 return f"<write-only dash.Patch object at {self._location}>"
107
108 def append(self, item):
109 """Add the item to the end of a list"""
110 self._operations.append(_operation("Append", self._location, value=item))
111
112 def prepend(self, item):
113 """Add the item to the start of a list"""
114 self._operations.append(_operation("Prepend", self._location, value=item))
115
116 def insert(self, index, item):
117 """Add the item at the index of a list"""
118 self._operations.append(
119 _operation("Insert", self._location, value=item, index=index)
120 )
121
122 def clear(self):
123 """Remove all items in a list"""
124 self._operations.append(_operation("Clear", self._location))
125
126 def reverse(self):
127 """Reversal of the order of items in a list"""
128 self._operations.append(_operation("Reverse", self._location))
129
130 def extend(self, item):
131 """Add all the items to the end of a list"""
132 if not isinstance(item, (list, tuple)):
133 raise TypeError(f"{item} should be a list or tuple")
134 self._operations.append(_operation("Extend", self._location, value=item))
135
136 def remove(self, item):
137 """filter the item out of a list on the frontend"""
138 self._operations.append(_operation("Remove", self._location, value=item))
139
140 def update(self, E=None, **F):
141 """Merge a dict or keyword arguments with another dictionary"""
142 value = E or {}
143 value.update(F)
144 self._operations.append(_operation("Merge", self._location, value=value))
145
146 # pylint: disable=no-self-use
147 def sort(self):
148 raise KeyError(
149 "sort is reserved for future use, use brackets to access this key on your object"
150 )
151
152 def to_plotly_json(self):
153 return {
154 "__dash_patch_update": "__dash_patch_update",
155 "operations": self._operations,
156 }
157
[end of dash/_patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dash/_patch.py b/dash/_patch.py
--- a/dash/_patch.py
+++ b/dash/_patch.py
@@ -36,18 +36,18 @@
def __setstate__(self, state):
vars(self).update(state)
- def __getitem__(self, item):
+ def __getitem__(self, item) -> "Patch":
validate_slice(item)
return Patch(location=self._location + [item], parent=self)
- def __getattr__(self, item):
+ def __getattr__(self, item) -> "Patch":
if item == "tolist":
# to_json fix
raise AttributeError
if item == "_location":
- return self._location
+ return self._location # type: ignore
if item == "_operations":
- return self._operations
+ return self._operations # type: ignore
return self.__getitem__(item)
def __setattr__(self, key, value):
@@ -81,22 +81,32 @@
self.extend(other)
else:
self._operations.append(_operation("Add", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __isub__(self, other):
self._operations.append(_operation("Sub", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __imul__(self, other):
self._operations.append(_operation("Mul", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __itruediv__(self, other):
self._operations.append(_operation("Div", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __ior__(self, other):
self.update(E=other)
+ if not self._location:
+ return self
return _noop
def __iter__(self):
| {"golden_diff": "diff --git a/dash/_patch.py b/dash/_patch.py\n--- a/dash/_patch.py\n+++ b/dash/_patch.py\n@@ -36,18 +36,18 @@\n def __setstate__(self, state):\n vars(self).update(state)\n \n- def __getitem__(self, item):\n+ def __getitem__(self, item) -> \"Patch\":\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n \n- def __getattr__(self, item):\n+ def __getattr__(self, item) -> \"Patch\":\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n- return self._location\n+ return self._location # type: ignore\n if item == \"_operations\":\n- return self._operations\n+ return self._operations # type: ignore\n return self.__getitem__(item)\n \n def __setattr__(self, key, value):\n@@ -81,22 +81,32 @@\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __ior__(self, other):\n self.update(E=other)\n+ if not self._location:\n+ return self\n return _noop\n \n def __iter__(self):\n", "issue": "[BUG] Patch += operation not working on the patch object.\nIf you have a patch object for a number prop and try to do `patched += 1`, the callback will fail with an error, `returned non serializable object`.\r\n\r\nExample:\r\n```\r\napp = Dash()\r\napp.layout = [dcc.Store(data=0, store=\"store\"), html.Button(\"click\", id=\"click\")]\r\[email protected](Output(\"store\", \"data\"), Input(\"click\", \"n_clicks\"))\r\ndef on_click(_):\r\n patched = Patch()\r\n patched += 1\r\n return patched\r\n```\r\n\r\nClicking on the button result in the error, it should instead update the store data prop to +1.\n", "before_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def __iter__(self):\n raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n\n def __repr__(self):\n return f\"<write-only dash.Patch object at {self._location}>\"\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}]} | 2,149 | 460 |
gh_patches_debug_14625 | rasdani/github-patches | git_diff | google__fuzzbench-207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wpantund benchmark can initiate network connections
The wpantund benchmark can initiate networking connections to remote networks under some conditions. Attaching a testcase for that:
[1788a8b166e1fe4e854bdbb1572e2bb6.00000080.honggfuzz.cov.txt](https://github.com/google/fuzzbench/files/4391761/1788a8b166e1fe4e854bdbb1572e2bb6.00000080.honggfuzz.cov.txt)
```shell
$ strace -f ~/fuzz/wpantund/wpantund-fuzz ~/fuzz/wpantund/corpus/1788a8b166e1fe4e854bdbb1572e2bb6.00000080.honggfuzz.cov
...
openat(AT_FDCWD, "/tmp", O_RDWR|O_EXCL|O_TMPFILE, 0600) = 6
fcntl(6, F_GETFL) = 0x418002 (flags O_RDWR|O_LARGEFILE|O_TMPFILE)
fstat(6, {st_mode=S_IFREG|0600, st_size=0, ...}) = 0
write(6, "\235mfProxy:EnabiG:NNN\305SocketPath \""..., 127) = 127
lseek(6, 0, SEEK_SET) = 0
read(6, "\235mfProxy:EnabiG:NNN\305SocketPath \""..., 4096) = 127
read(6, "", 4096) = 0
close(6) = 0
brk(0xb518000) = 0xb518000
socket(AF_INET6, SOCK_STREAM, IPPROTO_IP) = 6
connect(6, {sa_family=AF_INET6, sin6_port=htons(4951), inet_pton(AF_INET6, "::ffff:0.0.3.122", &sin6_addr), sin6_flowinfo=htonl(0), sin6_scope_id=0}, 28^C) = ? ERESTARTSYS (To be restarted if SA_RESTART is set)
..
```
I know nothing about wpantund internals, so maybe this can be fixed (if you want to fix it) in the fuzzer harness code (maybe some API option needs to be set).
If not on API level, a quick hack could be to add something like
```C
unshare(CLONE_NEWUSER|CLONE_NEWNET);
```
at the beginning of the file.
I'm not sure if we want to leave current behavior as-is. On one hand it might be a useful case to test fuzzing timeouts, on the other, benchmarks initiating connections over non-loopback sound somehow wrong (and they also add unnecessary timeouts).
</issue>
<code>
[start of .github/workflows/build.py]
1 #!/usr/bin/env python3
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Script for building fuzzer,benchmark pairs in CI."""
16 import sys
17 import subprocess
18
19 # Don't build php benchmark since it fills up disk in GH actions.
20 OSS_FUZZ_BENCHMARKS = [
21 'bloaty_fuzz_target',
22 'curl_curl_fuzzer_http',
23 'irssi_server-fuzz',
24 'jsoncpp_jsoncpp_fuzzer',
25 'libpcap_fuzz_both',
26 'mbedtls_fuzz_dtlsclient',
27 'openssl_x509',
28 'proxygen_ProxygenHTTP1xFuzzer',
29 'sqlite3_ossfuzz',
30 'systemd_fuzz-link-parser',
31 'zlib_zlib_uncompress_fuzzer',
32 ]
33
34 STANDARD_BENCHMARKS = [
35 'freetype2-2017',
36 'harfbuzz-1.3.2',
37 'lcms-2017-03-21',
38 'libjpeg-turbo-07-2017',
39 'libpng-1.2.56',
40 'libxml2-v2.9.2',
41 'openthread-2019-12-23',
42 'proj4-2017-08-14',
43 're2-2014-12-09',
44 'vorbis-2017-12-11',
45 'woff2-2016-05-06',
46 'wpantund-2018-02-27',
47 ]
48
49
50 def get_make_targets(benchmarks, fuzzer):
51 """Return pull and build targets for |fuzzer| and each benchmark
52 in |benchmarks| to pass to make."""
53 return [('pull-%s-%s' % (fuzzer, benchmark),
54 'build-%s-%s' % (fuzzer, benchmark)) for benchmark in benchmarks]
55
56
57 def delete_docker_images():
58 """Delete docker images."""
59 # TODO(metzman): Don't delete base-runner/base-builder so it
60 # doesn't need to be pulled for every target.
61 result = subprocess.run(['docker', 'images', '-q'],
62 stdout=subprocess.PIPE,
63 check=True)
64 image_names = result.stdout.splitlines()
65 subprocess.run(['docker', 'rmi', '-f'] + image_names, check=False)
66
67
68 def make_builds(benchmarks, fuzzer):
69 """Use make to build each target in |build_targets|."""
70 make_targets = get_make_targets(benchmarks, fuzzer)
71 for pull_target, build_target in make_targets:
72 # Pull target first.
73 subprocess.run(['make', '-j', pull_target], check=False)
74
75 # Then build.
76 print('Building', build_target)
77 build_command = ['make', '-j', build_target]
78 result = subprocess.run(build_command, check=False)
79 if not result.returncode == 0:
80 return False
81 # Delete docker images so disk doesn't fill up.
82 delete_docker_images()
83 return True
84
85
86 def do_build(build_type, fuzzer):
87 """Build fuzzer,benchmark pairs for CI."""
88 if build_type == 'oss-fuzz':
89 benchmarks = OSS_FUZZ_BENCHMARKS
90 elif build_type == 'standard':
91 benchmarks = STANDARD_BENCHMARKS
92 else:
93 raise Exception('Invalid build_type: %s' % build_type)
94
95 return make_builds(benchmarks, fuzzer)
96
97
98 def main():
99 """Build OSS-Fuzz or standard benchmarks with a fuzzer."""
100 if len(sys.argv) != 3:
101 print('Usage: %s <build_type> <fuzzer>' % sys.argv[0])
102 return 1
103 build_type = sys.argv[1]
104 fuzzer = sys.argv[2]
105 result = do_build(build_type, fuzzer)
106 return 0 if result else 1
107
108
109 if __name__ == '__main__':
110 sys.exit(main())
111
[end of .github/workflows/build.py]
[start of fuzzers/afl/fuzzer.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Integration code for AFL fuzzer."""
15
16 import shutil
17 import subprocess
18 import os
19
20 from fuzzers import utils
21
22 # OUT environment variable is the location of build directory (default is /out).
23
24
25 def prepare_build_environment():
26 """Set environment variables used to build targets for AFL-based
27 fuzzers."""
28 cflags = ['-fsanitize-coverage=trace-pc-guard']
29 utils.append_flags('CFLAGS', cflags)
30 utils.append_flags('CXXFLAGS', cflags)
31
32 os.environ['CC'] = 'clang'
33 os.environ['CXX'] = 'clang++'
34 os.environ['FUZZER_LIB'] = '/libAFL.a'
35
36
37 def build():
38 """Build benchmark."""
39 prepare_build_environment()
40
41 utils.build_benchmark()
42
43 print('[post_build] Copying afl-fuzz to $OUT directory')
44 # Copy out the afl-fuzz binary as a build artifact.
45 shutil.copy('/afl/afl-fuzz', os.environ['OUT'])
46
47
48 def prepare_fuzz_environment(input_corpus):
49 """Prepare to fuzz with AFL or another AFL-based fuzzer."""
50 # Tell AFL to not use its terminal UI so we get usable logs.
51 os.environ['AFL_NO_UI'] = '1'
52 # Skip AFL's CPU frequency check (fails on Docker).
53 os.environ['AFL_SKIP_CPUFREQ'] = '1'
54 # No need to bind affinity to one core, Docker enforces 1 core usage.
55 os.environ['AFL_NO_AFFINITY'] = '1'
56 # AFL will abort on startup if the core pattern sends notifications to
57 # external programs. We don't care about this.
58 os.environ['AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES'] = '1'
59
60 # AFL needs at least one non-empty seed to start.
61 if len(os.listdir(input_corpus)) == 0:
62 with open(os.path.join(input_corpus, 'default_seed'),
63 'w') as file_handle:
64 file_handle.write('hi')
65
66
67 def run_afl_fuzz(input_corpus,
68 output_corpus,
69 target_binary,
70 additional_flags=None,
71 hide_output=False):
72 """Run afl-fuzz."""
73 # Spawn the afl fuzzing process.
74 # FIXME: Currently AFL will exit if it encounters a crashing input in seed
75 # corpus (usually timeouts). Add a way to skip/delete such inputs and
76 # re-run AFL. This currently happens with a seed in wpantund benchmark.
77 print('[run_fuzzer] Running target with afl-fuzz')
78 command = [
79 './afl-fuzz',
80 '-i',
81 input_corpus,
82 '-o',
83 output_corpus,
84 # Use deterministic mode as it does best when we don't have
85 # seeds which is often the case.
86 '-d',
87 # Use no memory limit as ASAN doesn't play nicely with one.
88 '-m',
89 'none'
90 ]
91 if additional_flags:
92 command.extend(additional_flags)
93 dictionary_path = utils.get_dictionary_path(target_binary)
94 if dictionary_path:
95 command.extend(['-x', dictionary_path])
96 command += [
97 '--',
98 target_binary,
99 # Pass INT_MAX to afl the maximize the number of persistent loops it
100 # performs.
101 '2147483647'
102 ]
103 print('[run_fuzzer] Running command: ' + ' '.join(command))
104 output_stream = subprocess.DEVNULL if hide_output else None
105 subprocess.call(command, stdout=output_stream, stderr=output_stream)
106
107
108 def fuzz(input_corpus, output_corpus, target_binary):
109 """Run afl-fuzz on target."""
110 prepare_fuzz_environment(input_corpus)
111
112 run_afl_fuzz(input_corpus, output_corpus, target_binary)
113
[end of fuzzers/afl/fuzzer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/.github/workflows/build.py b/.github/workflows/build.py
--- a/.github/workflows/build.py
+++ b/.github/workflows/build.py
@@ -42,7 +42,6 @@
're2-2014-12-09',
'vorbis-2017-12-11',
'woff2-2016-05-06',
- 'wpantund-2018-02-27',
]
diff --git a/fuzzers/afl/fuzzer.py b/fuzzers/afl/fuzzer.py
--- a/fuzzers/afl/fuzzer.py
+++ b/fuzzers/afl/fuzzer.py
@@ -73,7 +73,7 @@
# Spawn the afl fuzzing process.
# FIXME: Currently AFL will exit if it encounters a crashing input in seed
# corpus (usually timeouts). Add a way to skip/delete such inputs and
- # re-run AFL. This currently happens with a seed in wpantund benchmark.
+ # re-run AFL.
print('[run_fuzzer] Running target with afl-fuzz')
command = [
'./afl-fuzz',
| {"golden_diff": "diff --git a/.github/workflows/build.py b/.github/workflows/build.py\n--- a/.github/workflows/build.py\n+++ b/.github/workflows/build.py\n@@ -42,7 +42,6 @@\n 're2-2014-12-09',\n 'vorbis-2017-12-11',\n 'woff2-2016-05-06',\n- 'wpantund-2018-02-27',\n ]\n \n \ndiff --git a/fuzzers/afl/fuzzer.py b/fuzzers/afl/fuzzer.py\n--- a/fuzzers/afl/fuzzer.py\n+++ b/fuzzers/afl/fuzzer.py\n@@ -73,7 +73,7 @@\n # Spawn the afl fuzzing process.\n # FIXME: Currently AFL will exit if it encounters a crashing input in seed\n # corpus (usually timeouts). Add a way to skip/delete such inputs and\n- # re-run AFL. This currently happens with a seed in wpantund benchmark.\n+ # re-run AFL.\n print('[run_fuzzer] Running target with afl-fuzz')\n command = [\n './afl-fuzz',\n", "issue": "wpantund benchmark can initiate network connections\nThe wpantund benchmark can initiate networking connections to remote networks under some conditions. Attaching a testcase for that:\r\n\r\n[1788a8b166e1fe4e854bdbb1572e2bb6.00000080.honggfuzz.cov.txt](https://github.com/google/fuzzbench/files/4391761/1788a8b166e1fe4e854bdbb1572e2bb6.00000080.honggfuzz.cov.txt)\r\n\r\n```shell\r\n$ strace -f ~/fuzz/wpantund/wpantund-fuzz ~/fuzz/wpantund/corpus/1788a8b166e1fe4e854bdbb1572e2bb6.00000080.honggfuzz.cov\r\n...\r\nopenat(AT_FDCWD, \"/tmp\", O_RDWR|O_EXCL|O_TMPFILE, 0600) = 6\r\nfcntl(6, F_GETFL) = 0x418002 (flags O_RDWR|O_LARGEFILE|O_TMPFILE)\r\nfstat(6, {st_mode=S_IFREG|0600, st_size=0, ...}) = 0\r\nwrite(6, \"\\235mfProxy:EnabiG:NNN\\305SocketPath \\\"\"..., 127) = 127\r\nlseek(6, 0, SEEK_SET) = 0\r\nread(6, \"\\235mfProxy:EnabiG:NNN\\305SocketPath \\\"\"..., 4096) = 127\r\nread(6, \"\", 4096) = 0\r\nclose(6) = 0\r\nbrk(0xb518000) = 0xb518000\r\nsocket(AF_INET6, SOCK_STREAM, IPPROTO_IP) = 6\r\nconnect(6, {sa_family=AF_INET6, sin6_port=htons(4951), inet_pton(AF_INET6, \"::ffff:0.0.3.122\", &sin6_addr), sin6_flowinfo=htonl(0), sin6_scope_id=0}, 28^C) = ? ERESTARTSYS (To be restarted if SA_RESTART is set)\r\n..\r\n```\r\n\r\nI know nothing about wpantund internals, so maybe this can be fixed (if you want to fix it) in the fuzzer harness code (maybe some API option needs to be set).\r\n\r\nIf not on API level, a quick hack could be to add something like\r\n\r\n```C\r\nunshare(CLONE_NEWUSER|CLONE_NEWNET);\r\n```\r\n\r\nat the beginning of the file.\r\n\r\nI'm not sure if we want to leave current behavior as-is. On one hand it might be a useful case to test fuzzing timeouts, on the other, benchmarks initiating connections over non-loopback sound somehow wrong (and they also add unnecessary timeouts).\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script for building fuzzer,benchmark pairs in CI.\"\"\"\nimport sys\nimport subprocess\n\n# Don't build php benchmark since it fills up disk in GH actions.\nOSS_FUZZ_BENCHMARKS = [\n 'bloaty_fuzz_target',\n 'curl_curl_fuzzer_http',\n 'irssi_server-fuzz',\n 'jsoncpp_jsoncpp_fuzzer',\n 'libpcap_fuzz_both',\n 'mbedtls_fuzz_dtlsclient',\n 'openssl_x509',\n 'proxygen_ProxygenHTTP1xFuzzer',\n 'sqlite3_ossfuzz',\n 'systemd_fuzz-link-parser',\n 'zlib_zlib_uncompress_fuzzer',\n]\n\nSTANDARD_BENCHMARKS = [\n 'freetype2-2017',\n 'harfbuzz-1.3.2',\n 'lcms-2017-03-21',\n 'libjpeg-turbo-07-2017',\n 'libpng-1.2.56',\n 'libxml2-v2.9.2',\n 'openthread-2019-12-23',\n 'proj4-2017-08-14',\n 're2-2014-12-09',\n 'vorbis-2017-12-11',\n 'woff2-2016-05-06',\n 'wpantund-2018-02-27',\n]\n\n\ndef get_make_targets(benchmarks, fuzzer):\n \"\"\"Return pull and build targets for |fuzzer| and each benchmark\n in |benchmarks| to pass to make.\"\"\"\n return [('pull-%s-%s' % (fuzzer, benchmark),\n 'build-%s-%s' % (fuzzer, benchmark)) for benchmark in benchmarks]\n\n\ndef delete_docker_images():\n \"\"\"Delete docker images.\"\"\"\n # TODO(metzman): Don't delete base-runner/base-builder so it\n # doesn't need to be pulled for every target.\n result = subprocess.run(['docker', 'images', '-q'],\n stdout=subprocess.PIPE,\n check=True)\n image_names = result.stdout.splitlines()\n subprocess.run(['docker', 'rmi', '-f'] + image_names, check=False)\n\n\ndef make_builds(benchmarks, fuzzer):\n \"\"\"Use make to build each target in |build_targets|.\"\"\"\n make_targets = get_make_targets(benchmarks, fuzzer)\n for pull_target, build_target in make_targets:\n # Pull target first.\n subprocess.run(['make', '-j', pull_target], check=False)\n\n # Then build.\n print('Building', build_target)\n build_command = ['make', '-j', build_target]\n result = subprocess.run(build_command, check=False)\n if not result.returncode == 0:\n return False\n # Delete docker images so disk doesn't fill up.\n delete_docker_images()\n return True\n\n\ndef do_build(build_type, fuzzer):\n \"\"\"Build fuzzer,benchmark pairs for CI.\"\"\"\n if build_type == 'oss-fuzz':\n benchmarks = OSS_FUZZ_BENCHMARKS\n elif build_type == 'standard':\n benchmarks = STANDARD_BENCHMARKS\n else:\n raise Exception('Invalid build_type: %s' % build_type)\n\n return make_builds(benchmarks, fuzzer)\n\n\ndef main():\n \"\"\"Build OSS-Fuzz or standard benchmarks with a fuzzer.\"\"\"\n if len(sys.argv) != 3:\n print('Usage: %s <build_type> <fuzzer>' % sys.argv[0])\n return 1\n build_type = sys.argv[1]\n fuzzer = sys.argv[2]\n result = do_build(build_type, fuzzer)\n return 0 if result else 1\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": ".github/workflows/build.py"}, {"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Integration code for AFL fuzzer.\"\"\"\n\nimport shutil\nimport subprocess\nimport os\n\nfrom fuzzers import utils\n\n# OUT environment variable is the location of build directory (default is /out).\n\n\ndef prepare_build_environment():\n \"\"\"Set environment variables used to build targets for AFL-based\n fuzzers.\"\"\"\n cflags = ['-fsanitize-coverage=trace-pc-guard']\n utils.append_flags('CFLAGS', cflags)\n utils.append_flags('CXXFLAGS', cflags)\n\n os.environ['CC'] = 'clang'\n os.environ['CXX'] = 'clang++'\n os.environ['FUZZER_LIB'] = '/libAFL.a'\n\n\ndef build():\n \"\"\"Build benchmark.\"\"\"\n prepare_build_environment()\n\n utils.build_benchmark()\n\n print('[post_build] Copying afl-fuzz to $OUT directory')\n # Copy out the afl-fuzz binary as a build artifact.\n shutil.copy('/afl/afl-fuzz', os.environ['OUT'])\n\n\ndef prepare_fuzz_environment(input_corpus):\n \"\"\"Prepare to fuzz with AFL or another AFL-based fuzzer.\"\"\"\n # Tell AFL to not use its terminal UI so we get usable logs.\n os.environ['AFL_NO_UI'] = '1'\n # Skip AFL's CPU frequency check (fails on Docker).\n os.environ['AFL_SKIP_CPUFREQ'] = '1'\n # No need to bind affinity to one core, Docker enforces 1 core usage.\n os.environ['AFL_NO_AFFINITY'] = '1'\n # AFL will abort on startup if the core pattern sends notifications to\n # external programs. We don't care about this.\n os.environ['AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES'] = '1'\n\n # AFL needs at least one non-empty seed to start.\n if len(os.listdir(input_corpus)) == 0:\n with open(os.path.join(input_corpus, 'default_seed'),\n 'w') as file_handle:\n file_handle.write('hi')\n\n\ndef run_afl_fuzz(input_corpus,\n output_corpus,\n target_binary,\n additional_flags=None,\n hide_output=False):\n \"\"\"Run afl-fuzz.\"\"\"\n # Spawn the afl fuzzing process.\n # FIXME: Currently AFL will exit if it encounters a crashing input in seed\n # corpus (usually timeouts). Add a way to skip/delete such inputs and\n # re-run AFL. This currently happens with a seed in wpantund benchmark.\n print('[run_fuzzer] Running target with afl-fuzz')\n command = [\n './afl-fuzz',\n '-i',\n input_corpus,\n '-o',\n output_corpus,\n # Use deterministic mode as it does best when we don't have\n # seeds which is often the case.\n '-d',\n # Use no memory limit as ASAN doesn't play nicely with one.\n '-m',\n 'none'\n ]\n if additional_flags:\n command.extend(additional_flags)\n dictionary_path = utils.get_dictionary_path(target_binary)\n if dictionary_path:\n command.extend(['-x', dictionary_path])\n command += [\n '--',\n target_binary,\n # Pass INT_MAX to afl the maximize the number of persistent loops it\n # performs.\n '2147483647'\n ]\n print('[run_fuzzer] Running command: ' + ' '.join(command))\n output_stream = subprocess.DEVNULL if hide_output else None\n subprocess.call(command, stdout=output_stream, stderr=output_stream)\n\n\ndef fuzz(input_corpus, output_corpus, target_binary):\n \"\"\"Run afl-fuzz on target.\"\"\"\n prepare_fuzz_environment(input_corpus)\n\n run_afl_fuzz(input_corpus, output_corpus, target_binary)\n", "path": "fuzzers/afl/fuzzer.py"}]} | 3,611 | 267 |
gh_patches_debug_37943 | rasdani/github-patches | git_diff | ibis-project__ibis-4508 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feat: support amending/overriding reflected types in SQLite backend
I ran into a few problems trying to load data from [PyPI-Data](https://github.com/sethmlarson/pypi-data) that I'm not sure how best to solve:
- The database schemas use `STRING` instead of `TEXT` for string types (this has since been fixed, but [doesn't seem to be part of the latest release](https://github.com/sethmlarson/pypi-data/issues/12)). It would be nice if we could just force sqlalchemy to treat these columns as `TEXT` types rather than falling back to `NUMERIC`. The SQLAlchemy docs illustrate one way of [overriding reflected types](https://docs.sqlalchemy.org/en/14/core/reflection.html#overriding-reflected-columns), but like most things in SQLAlchemy there are probably other ways of handling this.
- The timestamp columns are using RFC3339 format (`YYYY-mm-ddTHH:MM:SS`) which isn't handled by SQLAlchemy's standard datetime parsing routine (it doesn't like the `T` delimiter). The way around this in SQLAlchemy is to pass [additional options to the DATETIME type constructor](https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#sqlalchemy.dialects.sqlite.DATETIME), but ibis currently AFAICT doesn't expose this functionality.
I'm honestly not sure what a good API here would be.
Brainstorming a bit, perhaps additional arguments to the `ibis.sqlite.connect` providing:
- A type mapping from SQLITE type (e.g. `TIMESTAMP`) to ibis type (`typemap={...}`?). This would let us map `{"STRING": ibis.expr.datatypes.String()}`
- A datetime format string in strftime syntax to use when parsing `TIMESTAMP` columns to datetime objects (`timestamp_format=...`?). This assumes the database uses the same datetime format for all tables, but hopefully that's the case.
A few things to like about this approach:
- It doesn't expose that SQLAlchemy is used to the user anywhere. Everything is a mapping between SQLite concepts and ibis itself. This would give us the freedom to drop SQLAlchemy if we wanted without forcing an API change.
- It doesn't require listing the full table schemas for all poorly typed columns - the user only needs to pass in overrides for the specific SQLite type names that require it.
For my use case the connect call might look like:
```python
con = ibis.sqlite.connect("pypi.db", typemap={"STRING": ibis.expr.datatypes.String()}, timestamp_format="%Y-%m-%dT%H:%M:%S")
```
There's probably a better way of handling this though.
</issue>
<code>
[start of ibis/backends/sqlite/__init__.py]
1 # Copyright 2015 Cloudera Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import annotations
16
17 import datetime
18 import sqlite3
19 import warnings
20 from pathlib import Path
21 from typing import TYPE_CHECKING
22
23 import pandas as pd
24 import sqlalchemy as sa
25 from sqlalchemy.dialects.sqlite import DATETIME, TIMESTAMP
26
27 if TYPE_CHECKING:
28 import ibis.expr.types as ir
29
30 from ibis.backends.base import Database
31 from ibis.backends.base.sql.alchemy import BaseAlchemyBackend
32 from ibis.backends.sqlite import udf
33 from ibis.backends.sqlite.compiler import SQLiteCompiler
34
35
36 def to_datetime(value: str | None) -> datetime.datetime | None:
37 """Convert a str to a datetime according to SQLite's rules, ignoring `None`
38 values"""
39 if value is None:
40 return None
41 if value.endswith("Z"):
42 # Parse and set the timezone as UTC
43 o = datetime.datetime.fromisoformat(value[:-1]).replace(
44 tzinfo=datetime.timezone.utc
45 )
46 else:
47 o = datetime.datetime.fromisoformat(value)
48 if o.tzinfo:
49 # Convert any aware datetime to UTC
50 return o.astimezone(datetime.timezone.utc)
51 return o
52
53
54 class ISODATETIME(DATETIME):
55 """A thin datetime type to override sqlalchemy's datetime parsing to
56 support a wider range of timestamp formats accepted by SQLite.
57
58 See https://sqlite.org/lang_datefunc.html#time_values for the full
59 list of datetime formats SQLite accepts.
60 """
61
62 def result_processor(self, value, dialect):
63 return to_datetime
64
65
66 class Backend(BaseAlchemyBackend):
67 name = 'sqlite'
68 # TODO check if there is a reason to not use the parent AlchemyDatabase, or
69 # if there is technical debt that makes this required
70 database_class = Database
71 compiler = SQLiteCompiler
72
73 def __getstate__(self) -> dict:
74 r = super().__getstate__()
75 r.update(
76 dict(
77 compiler=self.compiler,
78 database_name=self.database_name,
79 _con=None, # clear connection on copy()
80 _meta=None,
81 )
82 )
83 return r
84
85 def do_connect(
86 self,
87 database: str | Path | None = None,
88 path: str | Path | None = None,
89 ) -> None:
90 """Create an Ibis client connected to a SQLite database.
91
92 Multiple database files can be accessed using the `attach()` method.
93
94 Parameters
95 ----------
96 database
97 File path to the SQLite database file. If `None`, creates an
98 in-memory transient database and you can use attach() to add more
99 files
100
101 Examples
102 --------
103 >>> import ibis
104 >>> ibis.sqlite.connect("path/to/my/sqlite.db")
105 """
106 if path is not None:
107 warnings.warn(
108 "The `path` argument is deprecated in 4.0. Use `database=...`"
109 )
110 database = path
111
112 self.database_name = "main"
113
114 engine = sa.create_engine(
115 f"sqlite:///{database if database is not None else ':memory:'}"
116 )
117
118 sqlite3.register_adapter(pd.Timestamp, lambda value: value.isoformat())
119
120 @sa.event.listens_for(engine, "connect")
121 def connect(dbapi_connection, connection_record):
122 """Register UDFs on connection."""
123 udf.register_all(dbapi_connection)
124
125 super().do_connect(engine)
126
127 @sa.event.listens_for(self.meta, "column_reflect")
128 def column_reflect(inspector, table, column_info):
129 if type(column_info["type"]) is TIMESTAMP:
130 column_info["type"] = ISODATETIME()
131
132 def attach(
133 self,
134 name: str,
135 path: str | Path,
136 ) -> None:
137 """Connect another SQLite database file to the current connection.
138
139 Parameters
140 ----------
141 name
142 Database name within SQLite
143 path
144 Path to sqlite3 database file
145 """
146 quoted_name = self.con.dialect.identifier_preparer.quote(name)
147 self.raw_sql(f"ATTACH DATABASE {path!r} AS {quoted_name}")
148
149 def _get_sqla_table(self, name, schema=None, autoload=True):
150 return sa.Table(
151 name,
152 self.meta,
153 schema=schema or self.current_database,
154 autoload=autoload,
155 )
156
157 def table(self, name: str, database: str | None = None) -> ir.Table:
158 """Create a table expression from a table in the SQLite database.
159
160 Parameters
161 ----------
162 name
163 Table name
164 database
165 Name of the attached database that the table is located in.
166
167 Returns
168 -------
169 Table
170 Table expression
171 """
172 alch_table = self._get_sqla_table(name, schema=database)
173 node = self.table_class(source=self, sqla_table=alch_table)
174 return self.table_expr_class(node)
175
176 def _table_from_schema(
177 self, name, schema, database: str | None = None
178 ) -> sa.Table:
179 columns = self._columns_from_schema(name, schema)
180 return sa.Table(name, self.meta, schema=database, *columns)
181
182 @property
183 def _current_schema(self) -> str | None:
184 return self.current_database
185
[end of ibis/backends/sqlite/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/backends/sqlite/__init__.py b/ibis/backends/sqlite/__init__.py
--- a/ibis/backends/sqlite/__init__.py
+++ b/ibis/backends/sqlite/__init__.py
@@ -26,11 +26,13 @@
if TYPE_CHECKING:
import ibis.expr.types as ir
+ import ibis.expr.datatypes as dt
from ibis.backends.base import Database
-from ibis.backends.base.sql.alchemy import BaseAlchemyBackend
+from ibis.backends.base.sql.alchemy import BaseAlchemyBackend, to_sqla_type
from ibis.backends.sqlite import udf
from ibis.backends.sqlite.compiler import SQLiteCompiler
+from ibis.expr.schema import datatype
def to_datetime(value: str | None) -> datetime.datetime | None:
@@ -86,6 +88,7 @@
self,
database: str | Path | None = None,
path: str | Path | None = None,
+ type_map: dict[str, str | dt.DataType] | None = None,
) -> None:
"""Create an Ibis client connected to a SQLite database.
@@ -97,6 +100,10 @@
File path to the SQLite database file. If `None`, creates an
in-memory transient database and you can use attach() to add more
files
+ type_map
+ An optional mapping from a string name of a SQLite "type" to the
+ corresponding ibis DataType that it represents. This can be used
+ to override schema inference for a given SQLite database.
Examples
--------
@@ -115,6 +122,24 @@
f"sqlite:///{database if database is not None else ':memory:'}"
)
+ if type_map:
+ # Patch out ischema_names for the instantiated dialect. This
+ # attribute is required for all SQLAlchemy dialects, but as no
+ # public way of modifying it for a given dialect. Patching seems
+ # easier than subclassing the builtin SQLite dialect, and achieves
+ # the same desired behavior.
+ def _to_ischema_val(t):
+ sa_type = to_sqla_type(datatype(t))
+ if isinstance(sa_type, sa.types.TypeEngine):
+ # SQLAlchemy expects a callable here, rather than an
+ # instance. Use a lambda to work around this.
+ return lambda: sa_type
+ return sa_type
+
+ overrides = {k: _to_ischema_val(v) for k, v in type_map.items()}
+ engine.dialect.ischema_names = engine.dialect.ischema_names.copy()
+ engine.dialect.ischema_names.update(overrides)
+
sqlite3.register_adapter(pd.Timestamp, lambda value: value.isoformat())
@sa.event.listens_for(engine, "connect")
| {"golden_diff": "diff --git a/ibis/backends/sqlite/__init__.py b/ibis/backends/sqlite/__init__.py\n--- a/ibis/backends/sqlite/__init__.py\n+++ b/ibis/backends/sqlite/__init__.py\n@@ -26,11 +26,13 @@\n \n if TYPE_CHECKING:\n import ibis.expr.types as ir\n+ import ibis.expr.datatypes as dt\n \n from ibis.backends.base import Database\n-from ibis.backends.base.sql.alchemy import BaseAlchemyBackend\n+from ibis.backends.base.sql.alchemy import BaseAlchemyBackend, to_sqla_type\n from ibis.backends.sqlite import udf\n from ibis.backends.sqlite.compiler import SQLiteCompiler\n+from ibis.expr.schema import datatype\n \n \n def to_datetime(value: str | None) -> datetime.datetime | None:\n@@ -86,6 +88,7 @@\n self,\n database: str | Path | None = None,\n path: str | Path | None = None,\n+ type_map: dict[str, str | dt.DataType] | None = None,\n ) -> None:\n \"\"\"Create an Ibis client connected to a SQLite database.\n \n@@ -97,6 +100,10 @@\n File path to the SQLite database file. If `None`, creates an\n in-memory transient database and you can use attach() to add more\n files\n+ type_map\n+ An optional mapping from a string name of a SQLite \"type\" to the\n+ corresponding ibis DataType that it represents. This can be used\n+ to override schema inference for a given SQLite database.\n \n Examples\n --------\n@@ -115,6 +122,24 @@\n f\"sqlite:///{database if database is not None else ':memory:'}\"\n )\n \n+ if type_map:\n+ # Patch out ischema_names for the instantiated dialect. This\n+ # attribute is required for all SQLAlchemy dialects, but as no\n+ # public way of modifying it for a given dialect. Patching seems\n+ # easier than subclassing the builtin SQLite dialect, and achieves\n+ # the same desired behavior.\n+ def _to_ischema_val(t):\n+ sa_type = to_sqla_type(datatype(t))\n+ if isinstance(sa_type, sa.types.TypeEngine):\n+ # SQLAlchemy expects a callable here, rather than an\n+ # instance. Use a lambda to work around this.\n+ return lambda: sa_type\n+ return sa_type\n+\n+ overrides = {k: _to_ischema_val(v) for k, v in type_map.items()}\n+ engine.dialect.ischema_names = engine.dialect.ischema_names.copy()\n+ engine.dialect.ischema_names.update(overrides)\n+\n sqlite3.register_adapter(pd.Timestamp, lambda value: value.isoformat())\n \n @sa.event.listens_for(engine, \"connect\")\n", "issue": "feat: support amending/overriding reflected types in SQLite backend\nI ran into a few problems trying to load data from [PyPI-Data](https://github.com/sethmlarson/pypi-data) that I'm not sure how best to solve:\r\n\r\n- The database schemas use `STRING` instead of `TEXT` for string types (this has since been fixed, but [doesn't seem to be part of the latest release](https://github.com/sethmlarson/pypi-data/issues/12)). It would be nice if we could just force sqlalchemy to treat these columns as `TEXT` types rather than falling back to `NUMERIC`. The SQLAlchemy docs illustrate one way of [overriding reflected types](https://docs.sqlalchemy.org/en/14/core/reflection.html#overriding-reflected-columns), but like most things in SQLAlchemy there are probably other ways of handling this.\r\n- The timestamp columns are using RFC3339 format (`YYYY-mm-ddTHH:MM:SS`) which isn't handled by SQLAlchemy's standard datetime parsing routine (it doesn't like the `T` delimiter). The way around this in SQLAlchemy is to pass [additional options to the DATETIME type constructor](https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#sqlalchemy.dialects.sqlite.DATETIME), but ibis currently AFAICT doesn't expose this functionality.\r\n\r\nI'm honestly not sure what a good API here would be.\r\n\r\nBrainstorming a bit, perhaps additional arguments to the `ibis.sqlite.connect` providing:\r\n\r\n- A type mapping from SQLITE type (e.g. `TIMESTAMP`) to ibis type (`typemap={...}`?). This would let us map `{\"STRING\": ibis.expr.datatypes.String()}`\r\n- A datetime format string in strftime syntax to use when parsing `TIMESTAMP` columns to datetime objects (`timestamp_format=...`?). This assumes the database uses the same datetime format for all tables, but hopefully that's the case.\r\n\r\nA few things to like about this approach:\r\n\r\n- It doesn't expose that SQLAlchemy is used to the user anywhere. Everything is a mapping between SQLite concepts and ibis itself. This would give us the freedom to drop SQLAlchemy if we wanted without forcing an API change.\r\n- It doesn't require listing the full table schemas for all poorly typed columns - the user only needs to pass in overrides for the specific SQLite type names that require it.\r\n\r\nFor my use case the connect call might look like:\r\n\r\n```python\r\ncon = ibis.sqlite.connect(\"pypi.db\", typemap={\"STRING\": ibis.expr.datatypes.String()}, timestamp_format=\"%Y-%m-%dT%H:%M:%S\")\r\n```\r\n\r\nThere's probably a better way of handling this though.\n", "before_files": [{"content": "# Copyright 2015 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nimport datetime\nimport sqlite3\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nimport pandas as pd\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.sqlite import DATETIME, TIMESTAMP\n\nif TYPE_CHECKING:\n import ibis.expr.types as ir\n\nfrom ibis.backends.base import Database\nfrom ibis.backends.base.sql.alchemy import BaseAlchemyBackend\nfrom ibis.backends.sqlite import udf\nfrom ibis.backends.sqlite.compiler import SQLiteCompiler\n\n\ndef to_datetime(value: str | None) -> datetime.datetime | None:\n \"\"\"Convert a str to a datetime according to SQLite's rules, ignoring `None`\n values\"\"\"\n if value is None:\n return None\n if value.endswith(\"Z\"):\n # Parse and set the timezone as UTC\n o = datetime.datetime.fromisoformat(value[:-1]).replace(\n tzinfo=datetime.timezone.utc\n )\n else:\n o = datetime.datetime.fromisoformat(value)\n if o.tzinfo:\n # Convert any aware datetime to UTC\n return o.astimezone(datetime.timezone.utc)\n return o\n\n\nclass ISODATETIME(DATETIME):\n \"\"\"A thin datetime type to override sqlalchemy's datetime parsing to\n support a wider range of timestamp formats accepted by SQLite.\n\n See https://sqlite.org/lang_datefunc.html#time_values for the full\n list of datetime formats SQLite accepts.\n \"\"\"\n\n def result_processor(self, value, dialect):\n return to_datetime\n\n\nclass Backend(BaseAlchemyBackend):\n name = 'sqlite'\n # TODO check if there is a reason to not use the parent AlchemyDatabase, or\n # if there is technical debt that makes this required\n database_class = Database\n compiler = SQLiteCompiler\n\n def __getstate__(self) -> dict:\n r = super().__getstate__()\n r.update(\n dict(\n compiler=self.compiler,\n database_name=self.database_name,\n _con=None, # clear connection on copy()\n _meta=None,\n )\n )\n return r\n\n def do_connect(\n self,\n database: str | Path | None = None,\n path: str | Path | None = None,\n ) -> None:\n \"\"\"Create an Ibis client connected to a SQLite database.\n\n Multiple database files can be accessed using the `attach()` method.\n\n Parameters\n ----------\n database\n File path to the SQLite database file. If `None`, creates an\n in-memory transient database and you can use attach() to add more\n files\n\n Examples\n --------\n >>> import ibis\n >>> ibis.sqlite.connect(\"path/to/my/sqlite.db\")\n \"\"\"\n if path is not None:\n warnings.warn(\n \"The `path` argument is deprecated in 4.0. Use `database=...`\"\n )\n database = path\n\n self.database_name = \"main\"\n\n engine = sa.create_engine(\n f\"sqlite:///{database if database is not None else ':memory:'}\"\n )\n\n sqlite3.register_adapter(pd.Timestamp, lambda value: value.isoformat())\n\n @sa.event.listens_for(engine, \"connect\")\n def connect(dbapi_connection, connection_record):\n \"\"\"Register UDFs on connection.\"\"\"\n udf.register_all(dbapi_connection)\n\n super().do_connect(engine)\n\n @sa.event.listens_for(self.meta, \"column_reflect\")\n def column_reflect(inspector, table, column_info):\n if type(column_info[\"type\"]) is TIMESTAMP:\n column_info[\"type\"] = ISODATETIME()\n\n def attach(\n self,\n name: str,\n path: str | Path,\n ) -> None:\n \"\"\"Connect another SQLite database file to the current connection.\n\n Parameters\n ----------\n name\n Database name within SQLite\n path\n Path to sqlite3 database file\n \"\"\"\n quoted_name = self.con.dialect.identifier_preparer.quote(name)\n self.raw_sql(f\"ATTACH DATABASE {path!r} AS {quoted_name}\")\n\n def _get_sqla_table(self, name, schema=None, autoload=True):\n return sa.Table(\n name,\n self.meta,\n schema=schema or self.current_database,\n autoload=autoload,\n )\n\n def table(self, name: str, database: str | None = None) -> ir.Table:\n \"\"\"Create a table expression from a table in the SQLite database.\n\n Parameters\n ----------\n name\n Table name\n database\n Name of the attached database that the table is located in.\n\n Returns\n -------\n Table\n Table expression\n \"\"\"\n alch_table = self._get_sqla_table(name, schema=database)\n node = self.table_class(source=self, sqla_table=alch_table)\n return self.table_expr_class(node)\n\n def _table_from_schema(\n self, name, schema, database: str | None = None\n ) -> sa.Table:\n columns = self._columns_from_schema(name, schema)\n return sa.Table(name, self.meta, schema=database, *columns)\n\n @property\n def _current_schema(self) -> str | None:\n return self.current_database\n", "path": "ibis/backends/sqlite/__init__.py"}]} | 2,803 | 627 |
gh_patches_debug_286 | rasdani/github-patches | git_diff | Mailu__Mailu-2049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fetchmail: /var/lib/fetchmail needs persistence
According [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?
I'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?
cc: @Nebukadneza, @hoellen, @kaiyou
</issue>
<code>
[start of optional/fetchmail/fetchmail.py]
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10 import sys
11 import traceback
12
13
14 FETCHMAIL = """
15 fetchmail -N \
16 --sslcertck --sslcertpath /etc/ssl/certs \
17 -f {}
18 """
19
20
21 RC_LINE = """
22 poll "{host}" proto {protocol} port {port}
23 user "{username}" password "{password}"
24 is "{user_email}"
25 smtphost "{smtphost}"
26 {options}
27 """
28
29
30 def extract_host_port(host_and_port, default_port):
31 host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()
32 return host, int(port) if port else default_port
33
34
35 def escape_rc_string(arg):
36 return "".join("\\x%2x" % ord(char) for char in arg)
37
38
39 def fetchmail(fetchmailrc):
40 with tempfile.NamedTemporaryFile() as handler:
41 handler.write(fetchmailrc.encode("utf8"))
42 handler.flush()
43 command = FETCHMAIL.format(shlex.quote(handler.name))
44 output = subprocess.check_output(command, shell=True)
45 return output
46
47
48 def run(debug):
49 try:
50 fetches = requests.get("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch").json()
51 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
52 if smtpport is None:
53 smtphostport = smtphost
54 else:
55 smtphostport = "%s/%d" % (smtphost, smtpport)
56 for fetch in fetches:
57 fetchmailrc = ""
58 options = "options antispam 501, 504, 550, 553, 554"
59 options += " ssl" if fetch["tls"] else ""
60 options += " keep" if fetch["keep"] else " fetchall"
61 fetchmailrc += RC_LINE.format(
62 user_email=escape_rc_string(fetch["user_email"]),
63 protocol=fetch["protocol"],
64 host=escape_rc_string(fetch["host"]),
65 port=fetch["port"],
66 smtphost=smtphostport,
67 username=escape_rc_string(fetch["username"]),
68 password=escape_rc_string(fetch["password"]),
69 options=options
70 )
71 if debug:
72 print(fetchmailrc)
73 try:
74 print(fetchmail(fetchmailrc))
75 error_message = ""
76 except subprocess.CalledProcessError as error:
77 error_message = error.output.decode("utf8")
78 # No mail is not an error
79 if not error_message.startswith("fetchmail: No mail"):
80 print(error_message)
81 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
82 # Number of messages seen is not a error as well
83 if ("messages" in error_message and
84 "(seen " in error_message and
85 user_info in error_message):
86 print(error_message)
87 finally:
88 requests.post("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch/{}".format(fetch["id"]),
89 json=error_message.split("\n")[0]
90 )
91 except Exception:
92 traceback.print_exc()
93
94
95 if __name__ == "__main__":
96 while True:
97 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
98 run(os.environ.get("DEBUG", None) == "True")
99 sys.stdout.flush()
100
[end of optional/fetchmail/fetchmail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -13,6 +13,7 @@
FETCHMAIL = """
fetchmail -N \
+ --idfile /data/fetchids --uidl \
--sslcertck --sslcertpath /etc/ssl/certs \
-f {}
"""
| {"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -13,6 +13,7 @@\n \n FETCHMAIL = \"\"\"\n fetchmail -N \\\n+ --idfile /data/fetchids --uidl \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n \"\"\"\n", "issue": "Fetchmail: /var/lib/fetchmail needs persistence\nAccording [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?\r\n\r\nI'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?\r\n\r\ncc: @Nebukadneza, @hoellen, @kaiyou \n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]} | 1,599 | 105 |
gh_patches_debug_18158 | rasdani/github-patches | git_diff | openai__gym-1966 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in PixelObservationWrapper
In the pixel_observation.py, we have a bit of code that looks like this:
```
def _add_pixel_observation(self, observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
observation = type(observation)(observation)
else:
observation = collections.OrderedDict()
observation[STATE_KEY] = observation
```
If you note, the argument `observation` is being clobbered in the else case, so now the observation dictionary at the STATE_KEY refers to itself instead of the underlying env's observation.
I'm happy to fix this and submit a pull request but I wanted to raise the community's attention to this first.
</issue>
<code>
[start of gym/wrappers/pixel_observation.py]
1 """An observation wrapper that augments observations by pixel values."""
2
3 import collections
4 import copy
5
6 import numpy as np
7
8 from gym import spaces
9 from gym import ObservationWrapper
10
11 STATE_KEY = 'state'
12
13
14 class PixelObservationWrapper(ObservationWrapper):
15 """Augment observations by pixel values."""
16
17 def __init__(self,
18 env,
19 pixels_only=True,
20 render_kwargs=None,
21 pixel_keys=('pixels', )):
22 """Initializes a new pixel Wrapper.
23
24 Args:
25 env: The environment to wrap.
26 pixels_only: If `True` (default), the original observation returned
27 by the wrapped environment will be discarded, and a dictionary
28 observation will only include pixels. If `False`, the
29 observation dictionary will contain both the original
30 observations and the pixel observations.
31 render_kwargs: Optional `dict` containing keyword arguments passed
32 to the `self.render` method.
33 pixel_keys: Optional custom string specifying the pixel
34 observation's key in the `OrderedDict` of observations.
35 Defaults to 'pixels'.
36
37 Raises:
38 ValueError: If `env`'s observation spec is not compatible with the
39 wrapper. Supported formats are a single array, or a dict of
40 arrays.
41 ValueError: If `env`'s observation already contains any of the
42 specified `pixel_keys`.
43 """
44
45 super(PixelObservationWrapper, self).__init__(env)
46
47 if render_kwargs is None:
48 render_kwargs = {}
49
50 for key in pixel_keys:
51 render_kwargs.setdefault(key, {})
52
53 render_mode = render_kwargs[key].pop('mode', 'rgb_array')
54 assert render_mode == 'rgb_array', render_mode
55 render_kwargs[key]['mode'] = 'rgb_array'
56
57 wrapped_observation_space = env.observation_space
58
59 if isinstance(wrapped_observation_space, spaces.Box):
60 self._observation_is_dict = False
61 invalid_keys = set([STATE_KEY])
62 elif isinstance(wrapped_observation_space,
63 (spaces.Dict, collections.MutableMapping)):
64 self._observation_is_dict = True
65 invalid_keys = set(wrapped_observation_space.spaces.keys())
66 else:
67 raise ValueError("Unsupported observation space structure.")
68
69 if not pixels_only:
70 # Make sure that now keys in the `pixel_keys` overlap with
71 # `observation_keys`
72 overlapping_keys = set(pixel_keys) & set(invalid_keys)
73 if overlapping_keys:
74 raise ValueError("Duplicate or reserved pixel keys {!r}."
75 .format(overlapping_keys))
76
77 if pixels_only:
78 self.observation_space = spaces.Dict()
79 elif self._observation_is_dict:
80 self.observation_space = copy.deepcopy(wrapped_observation_space)
81 else:
82 self.observation_space = spaces.Dict()
83 self.observation_space.spaces[STATE_KEY] = wrapped_observation_space
84
85 # Extend observation space with pixels.
86
87 pixels_spaces = {}
88 for pixel_key in pixel_keys:
89 pixels = self.env.render(**render_kwargs[pixel_key])
90
91 if np.issubdtype(pixels.dtype, np.integer):
92 low, high = (0, 255)
93 elif np.issubdtype(pixels.dtype, np.float):
94 low, high = (-float('inf'), float('inf'))
95 else:
96 raise TypeError(pixels.dtype)
97
98 pixels_space = spaces.Box(
99 shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)
100 pixels_spaces[pixel_key] = pixels_space
101
102 self.observation_space.spaces.update(pixels_spaces)
103
104 self._env = env
105 self._pixels_only = pixels_only
106 self._render_kwargs = render_kwargs
107 self._pixel_keys = pixel_keys
108
109 def observation(self, observation):
110 pixel_observation = self._add_pixel_observation(observation)
111 return pixel_observation
112
113 def _add_pixel_observation(self, observation):
114 if self._pixels_only:
115 observation = collections.OrderedDict()
116 elif self._observation_is_dict:
117 observation = type(observation)(observation)
118 else:
119 observation = collections.OrderedDict()
120 observation[STATE_KEY] = observation
121
122 pixel_observations = {
123 pixel_key: self.env.render(**self._render_kwargs[pixel_key])
124 for pixel_key in self._pixel_keys
125 }
126
127 observation.update(pixel_observations)
128
129 return observation
130
[end of gym/wrappers/pixel_observation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py
--- a/gym/wrappers/pixel_observation.py
+++ b/gym/wrappers/pixel_observation.py
@@ -110,14 +110,14 @@
pixel_observation = self._add_pixel_observation(observation)
return pixel_observation
- def _add_pixel_observation(self, observation):
+ def _add_pixel_observation(self, wrapped_observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
- observation = type(observation)(observation)
+ observation = type(wrapped_observation)(wrapped_observation)
else:
observation = collections.OrderedDict()
- observation[STATE_KEY] = observation
+ observation[STATE_KEY] = wrapped_observation
pixel_observations = {
pixel_key: self.env.render(**self._render_kwargs[pixel_key])
| {"golden_diff": "diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py\n--- a/gym/wrappers/pixel_observation.py\n+++ b/gym/wrappers/pixel_observation.py\n@@ -110,14 +110,14 @@\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n \n- def _add_pixel_observation(self, observation):\n+ def _add_pixel_observation(self, wrapped_observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n- observation = type(observation)(observation)\n+ observation = type(wrapped_observation)(wrapped_observation)\n else:\n observation = collections.OrderedDict()\n- observation[STATE_KEY] = observation\n+ observation[STATE_KEY] = wrapped_observation\n \n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n", "issue": "Bug in PixelObservationWrapper\nIn the pixel_observation.py, we have a bit of code that looks like this: \r\n\r\n```\r\n def _add_pixel_observation(self, observation):\r\n if self._pixels_only:\r\n observation = collections.OrderedDict()\r\n elif self._observation_is_dict:\r\n observation = type(observation)(observation)\r\n else:\r\n observation = collections.OrderedDict()\r\n observation[STATE_KEY] = observation\r\n```\r\n\r\nIf you note, the argument `observation` is being clobbered in the else case, so now the observation dictionary at the STATE_KEY refers to itself instead of the underlying env's observation. \r\n\r\nI'm happy to fix this and submit a pull request but I wanted to raise the community's attention to this first. \n", "before_files": [{"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs[pixel_key])\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n", "path": "gym/wrappers/pixel_observation.py"}]} | 1,914 | 216 |
gh_patches_debug_51330 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1281 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: io.imshow() and io.show() do not work as expected in winows
In my win7-x64 environemnt, io.imshow() and io.show() do not work as expected. I use io.imshow() to show mutiple images, and when I call io.show() to show all the images, only the last image shows. In linux, it works well and all the images will show when I call io.show()
</issue>
<code>
[start of skimage/io/_plugins/matplotlib_plugin.py]
1 import matplotlib.pyplot as plt
2
3
4 def imshow(*args, **kwargs):
5 kwargs.setdefault('interpolation', 'nearest')
6 kwargs.setdefault('cmap', 'gray')
7 plt.imshow(*args, **kwargs)
8
9 imread = plt.imread
10 show = plt.show
11
12
13 def _app_show():
14 show()
15
[end of skimage/io/_plugins/matplotlib_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/_plugins/matplotlib_plugin.py b/skimage/io/_plugins/matplotlib_plugin.py
--- a/skimage/io/_plugins/matplotlib_plugin.py
+++ b/skimage/io/_plugins/matplotlib_plugin.py
@@ -2,6 +2,8 @@
def imshow(*args, **kwargs):
+ if plt.gca().has_data():
+ plt.figure()
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
plt.imshow(*args, **kwargs)
| {"golden_diff": "diff --git a/skimage/io/_plugins/matplotlib_plugin.py b/skimage/io/_plugins/matplotlib_plugin.py\n--- a/skimage/io/_plugins/matplotlib_plugin.py\n+++ b/skimage/io/_plugins/matplotlib_plugin.py\n@@ -2,6 +2,8 @@\n \n \n def imshow(*args, **kwargs):\n+ if plt.gca().has_data():\n+ plt.figure()\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n", "issue": "Bug: io.imshow() and io.show() do not work as expected in winows\nIn my win7-x64 environemnt, io.imshow() and io.show() do not work as expected. I use io.imshow() to show mutiple images, and when I call io.show() to show all the images, only the last image shows. In linux, it works well and all the images will show when I call io.show()\n\n", "before_files": [{"content": "import matplotlib.pyplot as plt\n\n\ndef imshow(*args, **kwargs):\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n\nimread = plt.imread\nshow = plt.show\n\n\ndef _app_show():\n show()\n", "path": "skimage/io/_plugins/matplotlib_plugin.py"}]} | 719 | 114 |
gh_patches_debug_9361 | rasdani/github-patches | git_diff | great-expectations__great_expectations-1489 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # great_expectations documentation build configuration file, created by
4 # sphinx-quickstart on Thu Jun 8 23:00:19 2017.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 import os
20 import sys
21
22 sys.path.insert(0, os.path.abspath("../"))
23
24
25 # -- General configuration ------------------------------------------------
26
27 # If your documentation needs a minimal Sphinx version, state it here.
28 #
29 # needs_sphinx = '1.0'
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = [
35 # 'sphinx_rtd_theme',
36 "sphinx.ext.autodoc",
37 "sphinx.ext.todo",
38 "sphinx.ext.coverage",
39 # 'sphinx.ext.mathjax'
40 "sphinx.ext.napoleon",
41 "sphinxcontrib.contentui",
42 "sphinx_gitstamp",
43 "sphinx.ext.autosectionlabel",
44 ]
45
46 # Add any paths that contain templates here, relative to this directory.
47 templates_path = ["_templates"]
48
49 # The suffix(es) of source filenames.
50 # You can specify multiple suffix as a list of string:
51 #
52 # source_suffix = ['.rst', '.md']
53 source_suffix = ".rst"
54
55 # The master toctree document.
56 master_doc = "index"
57
58 # General information about the project.
59 project = u"great_expectations"
60 copyright = u"2020, The Great Expectations Team. "
61 author = u"The Great Expectations Team"
62 gitstamp_fmt = "%d %b %Y"
63
64 # The version info for the project you're documenting, acts as replacement for
65 # |version| and |release|, also used in various other places throughout the
66 # built documents.
67 #
68 # The short X.Y version.
69 version = u""
70 # The full version, including alpha/beta/rc tags.
71 release = u""
72
73 # The language for content autogenerated by Sphinx. Refer to documentation
74 # for a list of supported languages.
75 #
76 # This is also used if you do content translation via gettext catalogs.
77 # Usually you set "language" from the command line for these cases.
78 language = None
79
80 # List of patterns, relative to source directory, that match files and
81 # directories to ignore when looking for source files.
82 # This patterns also effect to html_static_path and html_extra_path
83 exclude_patterns = []
84
85 # The name of the Pygments (syntax highlighting) style to use.
86 pygments_style = "paraiso-dark"
87
88 # If true, `todo` and `todoList` produce output, else they produce nothing.
89 todo_include_todos = True
90
91 # -- Options for HTML output ----------------------------------------------
92
93 # The theme to use for HTML and HTML Help pages. See the documentation for
94 # a list of builtin themes.
95 #
96 # html_theme = 'alabaster'
97 html_theme = "sphinx_rtd_theme"
98
99 # Theme options are theme-specific and customize the look and feel of a theme
100 # further. For a list of options available for each theme, see the
101 # documentation.
102 #
103 html_theme_options = {
104 "logo_only": True,
105 }
106
107 html_static_path = [
108 "_static",
109 "_static/style.css",
110 "_static/hk-grotesk-pro/HKGroteskPro-Bold.woff2",
111 "_static/hk-grotesk-pro/HKGroteskPro-Regular.woff2",
112 "_static/hk-grotesk-pro/HKGroteskPro-SemiBold.woff2",
113 "_static/hk-grotesk-pro/HKGroteskPro-Medium.woff2",
114 "_static/header-logo.png",
115 "_static/discuss-logo.png",
116 ]
117 html_css_files = ["style.css"]
118
119 # html_logo = '../pip-logo.png'
120
121 # Add any paths that contain custom static files (such as style sheets) here,
122 # relative to this directory. They are copied after the builtin static files,
123 # so a file named "default.css" will overwrite the builtin "default.css".
124
125
126 # -- Options for Napoleon Extension --------------------------------------------
127
128 # Parse Google style docstrings.
129 # See http://google.github.io/styleguide/pyguide.html
130 napoleon_google_docstring = True
131
132 # Parse NumPy style docstrings.
133 # See https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
134 napoleon_numpy_docstring = True
135
136 # Should special members (like __membername__) and private members
137 # (like _membername) members be included in the documentation if they
138 # have docstrings.
139 napoleon_include_private_with_doc = False
140 napoleon_include_special_with_doc = True
141
142 # If True, docstring sections will use the ".. admonition::" directive.
143 # If False, docstring sections will use the ".. rubric::" directive.
144 # One may look better than the other depending on what HTML theme is used.
145 napoleon_use_admonition_for_examples = False
146 napoleon_use_admonition_for_notes = False
147 napoleon_use_admonition_for_references = False
148
149 # If True, use Sphinx :ivar: directive for instance variables:
150 # :ivar attr1: Description of attr1.
151 # :type attr1: type
152 # If False, use Sphinx .. attribute:: directive for instance variables:
153 # .. attribute:: attr1
154 #
155 # *type*
156 #
157 # Description of attr1.
158 napoleon_use_ivar = False
159
160 # If True, use Sphinx :param: directive for function parameters:
161 # :param arg1: Description of arg1.
162 # :type arg1: type
163 # If False, output function parameters using the :parameters: field:
164 # :parameters: **arg1** (*type*) -- Description of arg1.
165 napoleon_use_param = True
166
167 # If True, use Sphinx :rtype: directive for the return type:
168 # :returns: Description of return value.
169 # :rtype: type
170 # If False, output the return type inline with the return description:
171 # :returns: *type* -- Description of return value.
172 napoleon_use_rtype = True
173
174
175 # -- Options for HTMLHelp output ------------------------------------------
176
177 # Output file base name for HTML help builder.
178 htmlhelp_basename = "great_expectationsdoc"
179
180
181 # -- Options for LaTeX output ---------------------------------------------
182
183 latex_elements = {
184 # The paper size ('letterpaper' or 'a4paper').
185 #
186 # 'papersize': 'letterpaper',
187 # The font size ('10pt', '11pt' or '12pt').
188 #
189 # 'pointsize': '10pt',
190 # Additional stuff for the LaTeX preamble.
191 #
192 # 'preamble': '',
193 # Latex figure (float) alignment
194 #
195 # 'figure_align': 'htbp',
196 }
197
198 # Grouping the document tree into LaTeX files. List of tuples
199 # (source start file, target name, title,
200 # author, documentclass [howto, manual, or own class]).
201 latex_documents = [
202 (
203 master_doc,
204 "great_expectations.tex",
205 u"great\\_expectations Documentation",
206 u"The Great Expectations Team",
207 "manual",
208 ),
209 ]
210
211
212 # -- Options for manual page output ---------------------------------------
213
214 # One entry per manual page. List of tuples
215 # (source start file, name, description, authors, manual section).
216 man_pages = [
217 (master_doc, "great_expectations", u"great_expectations Documentation", [author], 1)
218 ]
219
220
221 # -- Options for Texinfo output -------------------------------------------
222
223 # Grouping the document tree into Texinfo files. List of tuples
224 # (source start file, target name, title, author,
225 # dir menu entry, description, category)
226 texinfo_documents = [
227 (
228 master_doc,
229 "great_expectations",
230 u"great_expectations Documentation",
231 author,
232 "great_expectations",
233 "Always know what to expect from your data.",
234 "Miscellaneous",
235 ),
236 ]
237
238
239 autodoc_member_order = "bysource"
240
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -41,11 +41,15 @@
"sphinxcontrib.contentui",
"sphinx_gitstamp",
"sphinx.ext.autosectionlabel",
+ 'sphinxcontrib.discourse',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
+#discourse url connect
+discourse_url = 'https://discuss.greatexpectations.io/'
+
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -41,11 +41,15 @@\n \"sphinxcontrib.contentui\",\n \"sphinx_gitstamp\",\n \"sphinx.ext.autosectionlabel\",\n+ 'sphinxcontrib.discourse',\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n \n+#discourse url connect\n+discourse_url = 'https://discuss.greatexpectations.io/'\n+\n # The suffix(es) of source filenames.\n # You can specify multiple suffix as a list of string:\n #\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# great_expectations documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 8 23:00:19 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../\"))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # 'sphinx_rtd_theme',\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n # 'sphinx.ext.mathjax'\n \"sphinx.ext.napoleon\",\n \"sphinxcontrib.contentui\",\n \"sphinx_gitstamp\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = u\"great_expectations\"\ncopyright = u\"2020, The Great Expectations Team. \"\nauthor = u\"The Great Expectations Team\"\ngitstamp_fmt = \"%d %b %Y\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u\"\"\n# The full version, including alpha/beta/rc tags.\nrelease = u\"\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"paraiso-dark\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"logo_only\": True,\n}\n\nhtml_static_path = [\n \"_static\",\n \"_static/style.css\",\n \"_static/hk-grotesk-pro/HKGroteskPro-Bold.woff2\",\n \"_static/hk-grotesk-pro/HKGroteskPro-Regular.woff2\",\n \"_static/hk-grotesk-pro/HKGroteskPro-SemiBold.woff2\",\n \"_static/hk-grotesk-pro/HKGroteskPro-Medium.woff2\",\n \"_static/header-logo.png\",\n \"_static/discuss-logo.png\",\n]\nhtml_css_files = [\"style.css\"]\n\n# html_logo = '../pip-logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n\n\n# -- Options for Napoleon Extension --------------------------------------------\n\n# Parse Google style docstrings.\n# See http://google.github.io/styleguide/pyguide.html\nnapoleon_google_docstring = True\n\n# Parse NumPy style docstrings.\n# See https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt\nnapoleon_numpy_docstring = True\n\n# Should special members (like __membername__) and private members\n# (like _membername) members be included in the documentation if they\n# have docstrings.\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = True\n\n# If True, docstring sections will use the \".. admonition::\" directive.\n# If False, docstring sections will use the \".. rubric::\" directive.\n# One may look better than the other depending on what HTML theme is used.\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = False\nnapoleon_use_admonition_for_references = False\n\n# If True, use Sphinx :ivar: directive for instance variables:\n# :ivar attr1: Description of attr1.\n# :type attr1: type\n# If False, use Sphinx .. attribute:: directive for instance variables:\n# .. attribute:: attr1\n#\n# *type*\n#\n# Description of attr1.\nnapoleon_use_ivar = False\n\n# If True, use Sphinx :param: directive for function parameters:\n# :param arg1: Description of arg1.\n# :type arg1: type\n# If False, output function parameters using the :parameters: field:\n# :parameters: **arg1** (*type*) -- Description of arg1.\nnapoleon_use_param = True\n\n# If True, use Sphinx :rtype: directive for the return type:\n# :returns: Description of return value.\n# :rtype: type\n# If False, output the return type inline with the return description:\n# :returns: *type* -- Description of return value.\nnapoleon_use_rtype = True\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"great_expectationsdoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"great_expectations.tex\",\n u\"great\\\\_expectations Documentation\",\n u\"The Great Expectations Team\",\n \"manual\",\n ),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, \"great_expectations\", u\"great_expectations Documentation\", [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"great_expectations\",\n u\"great_expectations Documentation\",\n author,\n \"great_expectations\",\n \"Always know what to expect from your data.\",\n \"Miscellaneous\",\n ),\n]\n\n\nautodoc_member_order = \"bysource\"\n", "path": "docs/conf.py"}]} | 3,002 | 141 |
gh_patches_debug_64317 | rasdani/github-patches | git_diff | pex-tool__pex-1112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.21
On the docket:
+ [x] "FileNotFoundError: [Errno 2] No such file or directory" in pex #1098
+ [x] Unclosed resource warning for `/dev/null` in PEX teardown. #1101
+ [x] Remove `--sources-directory` / `--resources-directory` distinction. #1100
+ [x] Invalid requirement, parse error at "'python_v' #940
+ [x] Pex skipping pandas activation #1017
+ [x] Changing vendored versions does not fully clean up previous version #1096
+ [x] Pex discards the current interpreter's PATH entry when it is a directory entry. #1109
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.20"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.20"
+__version__ = "2.1.21"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.20\"\n+__version__ = \"2.1.21\"\n", "issue": "Release 2.1.21\nOn the docket:\r\n+ [x] \"FileNotFoundError: [Errno 2] No such file or directory\" in pex #1098\r\n+ [x] Unclosed resource warning for `/dev/null` in PEX teardown. #1101\r\n+ [x] Remove `--sources-directory` / `--resources-directory` distinction. #1100\r\n+ [x] Invalid requirement, parse error at \"'python_v' #940\r\n+ [x] Pex skipping pandas activation #1017\r\n+ [x] Changing vendored versions does not fully clean up previous version #1096\r\n+ [x] Pex discards the current interpreter's PATH entry when it is a directory entry. #1109\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.20\"\n", "path": "pex/version.py"}]} | 755 | 96 |
gh_patches_debug_13938 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exporter-datadog: docstring does not match arguments
The docstring here:
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L68
says it expects a list, but what it really expects is a string containing a comma-separated list of tags.
Do I fix the docstring, do I change the code code to accept a real list ,or do I change the code to accept *either* a list or the current string? I'm also working on #154 and would really prefer both objects accept the same parameters if possible.. but this is just odd, and IMHO not very pythonic.
What do we prefer for things like this?
(I'm happy to do the work, again, since I'm already working on related stuff)
</issue>
<code>
[start of exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import os
17 from urllib.parse import urlparse
18
19 from ddtrace.ext import SpanTypes as DatadogSpanTypes
20 from ddtrace.internal.writer import AgentWriter
21 from ddtrace.span import Span as DatadogSpan
22
23 import opentelemetry.trace as trace_api
24 from opentelemetry.exporter.datadog.constants import (
25 DD_ORIGIN,
26 ENV_KEY,
27 SAMPLE_RATE_METRIC_KEY,
28 SERVICE_NAME_TAG,
29 VERSION_KEY,
30 )
31 from opentelemetry.sdk.trace import sampling
32 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
33
34 logger = logging.getLogger(__name__)
35
36
37 DEFAULT_AGENT_URL = "http://localhost:8126"
38 _INSTRUMENTATION_SPAN_TYPES = {
39 "opentelemetry.instrumentation.aiohttp-client": DatadogSpanTypes.HTTP,
40 "opentelemetry.instrumentation.asgi": DatadogSpanTypes.WEB,
41 "opentelemetry.instrumentation.dbapi": DatadogSpanTypes.SQL,
42 "opentelemetry.instrumentation.django": DatadogSpanTypes.WEB,
43 "opentelemetry.instrumentation.flask": DatadogSpanTypes.WEB,
44 "opentelemetry.instrumentation.grpc": DatadogSpanTypes.GRPC,
45 "opentelemetry.instrumentation.jinja2": DatadogSpanTypes.TEMPLATE,
46 "opentelemetry.instrumentation.mysql": DatadogSpanTypes.SQL,
47 "opentelemetry.instrumentation.psycopg2": DatadogSpanTypes.SQL,
48 "opentelemetry.instrumentation.pymemcache": DatadogSpanTypes.CACHE,
49 "opentelemetry.instrumentation.pymongo": DatadogSpanTypes.MONGODB,
50 "opentelemetry.instrumentation.pymysql": DatadogSpanTypes.SQL,
51 "opentelemetry.instrumentation.redis": DatadogSpanTypes.REDIS,
52 "opentelemetry.instrumentation.requests": DatadogSpanTypes.HTTP,
53 "opentelemetry.instrumentation.sqlalchemy": DatadogSpanTypes.SQL,
54 "opentelemetry.instrumentation.wsgi": DatadogSpanTypes.WEB,
55 }
56
57
58 class DatadogSpanExporter(SpanExporter):
59 """Datadog span exporter for OpenTelemetry.
60
61 Args:
62 agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable
63 service: The service name to be used for the application or use ``DD_SERVICE`` environment variable
64 env: Set the application’s environment or use ``DD_ENV`` environment variable
65 version: Set the application’s version or use ``DD_VERSION`` environment variable
66 tags: A list of default tags to be added to every span or use ``DD_TAGS`` environment variable
67 """
68
69 def __init__(
70 self, agent_url=None, service=None, env=None, version=None, tags=None
71 ):
72 self.agent_url = (
73 agent_url
74 if agent_url
75 else os.environ.get("DD_TRACE_AGENT_URL", DEFAULT_AGENT_URL)
76 )
77 self.service = service or os.environ.get("DD_SERVICE")
78 self.env = env or os.environ.get("DD_ENV")
79 self.version = version or os.environ.get("DD_VERSION")
80 self.tags = _parse_tags_str(tags or os.environ.get("DD_TAGS"))
81 self._agent_writer = None
82
83 @property
84 def agent_writer(self):
85 if self._agent_writer is None:
86 url_parsed = urlparse(self.agent_url)
87 if url_parsed.scheme in ("http", "https"):
88 self._agent_writer = AgentWriter(
89 hostname=url_parsed.hostname,
90 port=url_parsed.port,
91 https=url_parsed.scheme == "https",
92 )
93 elif url_parsed.scheme == "unix":
94 self._agent_writer = AgentWriter(uds_path=url_parsed.path)
95 else:
96 raise ValueError(
97 "Unknown scheme `%s` for agent URL" % url_parsed.scheme
98 )
99 return self._agent_writer
100
101 def export(self, spans):
102 datadog_spans = self._translate_to_datadog(spans)
103
104 self.agent_writer.write(spans=datadog_spans)
105
106 return SpanExportResult.SUCCESS
107
108 def shutdown(self):
109 if self.agent_writer.started:
110 self.agent_writer.stop()
111 self.agent_writer.join(self.agent_writer.exit_timeout)
112
113 # pylint: disable=too-many-locals
114 def _translate_to_datadog(self, spans):
115 datadog_spans = []
116
117 for span in spans:
118 trace_id, parent_id, span_id = _get_trace_ids(span)
119
120 # datadog Span is initialized with a reference to the tracer which is
121 # used to record the span when it is finished. We can skip ignore this
122 # because we are not calling the finish method and explictly set the
123 # duration.
124 tracer = None
125
126 # extract resource attributes to be used as tags as well as potential service name
127 [
128 resource_tags,
129 resource_service_name,
130 ] = _extract_tags_from_resource(span.resource)
131
132 datadog_span = DatadogSpan(
133 tracer,
134 _get_span_name(span),
135 service=resource_service_name or self.service,
136 resource=_get_resource(span),
137 span_type=_get_span_type(span),
138 trace_id=trace_id,
139 span_id=span_id,
140 parent_id=parent_id,
141 )
142 datadog_span.start_ns = span.start_time
143 datadog_span.duration_ns = span.end_time - span.start_time
144
145 if not span.status.is_ok:
146 datadog_span.error = 1
147 if span.status.description:
148 exc_type, exc_val = _get_exc_info(span)
149 # no mapping for error.stack since traceback not recorded
150 datadog_span.set_tag("error.msg", exc_val)
151 datadog_span.set_tag("error.type", exc_type)
152
153 # combine resource attributes and span attributes, don't modify existing span attributes
154 combined_span_tags = {}
155 combined_span_tags.update(resource_tags)
156 combined_span_tags.update(span.attributes)
157
158 datadog_span.set_tags(combined_span_tags)
159
160 # add configured env tag
161 if self.env is not None:
162 datadog_span.set_tag(ENV_KEY, self.env)
163
164 # add configured application version tag to only root span
165 if self.version is not None and parent_id == 0:
166 datadog_span.set_tag(VERSION_KEY, self.version)
167
168 # add configured global tags
169 datadog_span.set_tags(self.tags)
170
171 # add origin to root span
172 origin = _get_origin(span)
173 if origin and parent_id == 0:
174 datadog_span.set_tag(DD_ORIGIN, origin)
175
176 sampling_rate = _get_sampling_rate(span)
177 if sampling_rate is not None:
178 datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)
179
180 # span events and span links are not supported
181
182 datadog_spans.append(datadog_span)
183
184 return datadog_spans
185
186
187 def _get_trace_ids(span):
188 """Extract tracer ids from span"""
189 ctx = span.get_span_context()
190 trace_id = ctx.trace_id
191 span_id = ctx.span_id
192
193 if isinstance(span.parent, trace_api.Span):
194 parent_id = span.parent.get_span_context().span_id
195 elif isinstance(span.parent, trace_api.SpanContext):
196 parent_id = span.parent.span_id
197 else:
198 parent_id = 0
199
200 trace_id = _convert_trace_id_uint64(trace_id)
201
202 return trace_id, parent_id, span_id
203
204
205 def _convert_trace_id_uint64(otel_id):
206 """Convert 128-bit int used for trace_id to 64-bit unsigned int"""
207 return otel_id & 0xFFFFFFFFFFFFFFFF
208
209
210 def _get_span_name(span):
211 """Get span name by using instrumentation and kind while backing off to
212 span.name
213 """
214 instrumentation_name = (
215 span.instrumentation_info.name if span.instrumentation_info else None
216 )
217 span_kind_name = span.kind.name if span.kind else None
218 name = (
219 "{}.{}".format(instrumentation_name, span_kind_name)
220 if instrumentation_name and span_kind_name
221 else span.name
222 )
223 return name
224
225
226 def _get_resource(span):
227 """Get resource name for span"""
228 if "http.method" in span.attributes:
229 route = span.attributes.get("http.route")
230 return (
231 span.attributes["http.method"] + " " + route
232 if route
233 else span.attributes["http.method"]
234 )
235
236 return span.name
237
238
239 def _get_span_type(span):
240 """Get Datadog span type"""
241 instrumentation_name = (
242 span.instrumentation_info.name if span.instrumentation_info else None
243 )
244 span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name)
245 return span_type
246
247
248 def _get_exc_info(span):
249 """Parse span status description for exception type and value"""
250 exc_type, exc_val = span.status.description.split(":", 1)
251 return exc_type, exc_val.strip()
252
253
254 def _get_origin(span):
255 ctx = span.get_span_context()
256 origin = ctx.trace_state.get(DD_ORIGIN)
257 return origin
258
259
260 def _get_sampling_rate(span):
261 ctx = span.get_span_context()
262 tracer_provider = trace_api.get_tracer_provider()
263 if not hasattr(tracer_provider, "sampler"):
264 return None
265 sampler = tracer_provider.sampler
266 return (
267 sampler.rate
268 if ctx.trace_flags.sampled
269 and isinstance(sampler, sampling.TraceIdRatioBased)
270 else None
271 )
272
273
274 def _parse_tags_str(tags_str):
275 """Parse a string of tags typically provided via environment variables.
276
277 The expected string is of the form::
278 "key1:value1,key2:value2"
279
280 :param tags_str: A string of the above form to parse tags from.
281 :return: A dict containing the tags that were parsed.
282 """
283 parsed_tags = {}
284 if not tags_str:
285 return parsed_tags
286
287 for tag in tags_str.split(","):
288 try:
289 key, value = tag.split(":", 1)
290
291 # Validate the tag
292 if key == "" or value == "" or value.endswith(":"):
293 raise ValueError
294 except ValueError:
295 logger.error(
296 "Malformed tag in tag pair '%s' from tag string '%s'.",
297 tag,
298 tags_str,
299 )
300 else:
301 parsed_tags[key] = value
302
303 return parsed_tags
304
305
306 def _extract_tags_from_resource(resource):
307 """Parse tags from resource.attributes, except service.name which
308 has special significance within datadog"""
309 tags = {}
310 service_name = None
311 if not (resource and getattr(resource, "attributes", None)):
312 return [tags, service_name]
313
314 for attribute_key, attribute_value in resource.attributes.items():
315 if attribute_key == SERVICE_NAME_TAG:
316 service_name = attribute_value
317 else:
318 tags[attribute_key] = attribute_value
319 return [tags, service_name]
320
[end of exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py
--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py
+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py
@@ -63,7 +63,7 @@
service: The service name to be used for the application or use ``DD_SERVICE`` environment variable
env: Set the application’s environment or use ``DD_ENV`` environment variable
version: Set the application’s version or use ``DD_VERSION`` environment variable
- tags: A list of default tags to be added to every span or use ``DD_TAGS`` environment variable
+ tags: A list (formatted as a comma-separated string) of default tags to be added to every span or use ``DD_TAGS`` environment variable
"""
def __init__(
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py\n--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py\n+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py\n@@ -63,7 +63,7 @@\n service: The service name to be used for the application or use ``DD_SERVICE`` environment variable\n env: Set the application\u2019s environment or use ``DD_ENV`` environment variable\n version: Set the application\u2019s version or use ``DD_VERSION`` environment variable\n- tags: A list of default tags to be added to every span or use ``DD_TAGS`` environment variable\n+ tags: A list (formatted as a comma-separated string) of default tags to be added to every span or use ``DD_TAGS`` environment variable\n \"\"\"\n \n def __init__(\n", "issue": "exporter-datadog: docstring does not match arguments\nThe docstring here:\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L68\r\nsays it expects a list, but what it really expects is a string containing a comma-separated list of tags.\r\n\r\nDo I fix the docstring, do I change the code code to accept a real list ,or do I change the code to accept *either* a list or the current string? I'm also working on #154 and would really prefer both objects accept the same parameters if possible.. but this is just odd, and IMHO not very pythonic.\r\n\r\nWhat do we prefer for things like this?\r\n\r\n(I'm happy to do the work, again, since I'm already working on related stuff)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nfrom ddtrace.ext import SpanTypes as DatadogSpanTypes\nfrom ddtrace.internal.writer import AgentWriter\nfrom ddtrace.span import Span as DatadogSpan\n\nimport opentelemetry.trace as trace_api\nfrom opentelemetry.exporter.datadog.constants import (\n DD_ORIGIN,\n ENV_KEY,\n SAMPLE_RATE_METRIC_KEY,\n SERVICE_NAME_TAG,\n VERSION_KEY,\n)\nfrom opentelemetry.sdk.trace import sampling\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_AGENT_URL = \"http://localhost:8126\"\n_INSTRUMENTATION_SPAN_TYPES = {\n \"opentelemetry.instrumentation.aiohttp-client\": DatadogSpanTypes.HTTP,\n \"opentelemetry.instrumentation.asgi\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.dbapi\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.django\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.flask\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.grpc\": DatadogSpanTypes.GRPC,\n \"opentelemetry.instrumentation.jinja2\": DatadogSpanTypes.TEMPLATE,\n \"opentelemetry.instrumentation.mysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.psycopg2\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.pymemcache\": DatadogSpanTypes.CACHE,\n \"opentelemetry.instrumentation.pymongo\": DatadogSpanTypes.MONGODB,\n \"opentelemetry.instrumentation.pymysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.redis\": DatadogSpanTypes.REDIS,\n \"opentelemetry.instrumentation.requests\": DatadogSpanTypes.HTTP,\n \"opentelemetry.instrumentation.sqlalchemy\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.wsgi\": DatadogSpanTypes.WEB,\n}\n\n\nclass DatadogSpanExporter(SpanExporter):\n \"\"\"Datadog span exporter for OpenTelemetry.\n\n Args:\n agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable\n service: The service name to be used for the application or use ``DD_SERVICE`` environment variable\n env: Set the application\u2019s environment or use ``DD_ENV`` environment variable\n version: Set the application\u2019s version or use ``DD_VERSION`` environment variable\n tags: A list of default tags to be added to every span or use ``DD_TAGS`` environment variable\n \"\"\"\n\n def __init__(\n self, agent_url=None, service=None, env=None, version=None, tags=None\n ):\n self.agent_url = (\n agent_url\n if agent_url\n else os.environ.get(\"DD_TRACE_AGENT_URL\", DEFAULT_AGENT_URL)\n )\n self.service = service or os.environ.get(\"DD_SERVICE\")\n self.env = env or os.environ.get(\"DD_ENV\")\n self.version = version or os.environ.get(\"DD_VERSION\")\n self.tags = _parse_tags_str(tags or os.environ.get(\"DD_TAGS\"))\n self._agent_writer = None\n\n @property\n def agent_writer(self):\n if self._agent_writer is None:\n url_parsed = urlparse(self.agent_url)\n if url_parsed.scheme in (\"http\", \"https\"):\n self._agent_writer = AgentWriter(\n hostname=url_parsed.hostname,\n port=url_parsed.port,\n https=url_parsed.scheme == \"https\",\n )\n elif url_parsed.scheme == \"unix\":\n self._agent_writer = AgentWriter(uds_path=url_parsed.path)\n else:\n raise ValueError(\n \"Unknown scheme `%s` for agent URL\" % url_parsed.scheme\n )\n return self._agent_writer\n\n def export(self, spans):\n datadog_spans = self._translate_to_datadog(spans)\n\n self.agent_writer.write(spans=datadog_spans)\n\n return SpanExportResult.SUCCESS\n\n def shutdown(self):\n if self.agent_writer.started:\n self.agent_writer.stop()\n self.agent_writer.join(self.agent_writer.exit_timeout)\n\n # pylint: disable=too-many-locals\n def _translate_to_datadog(self, spans):\n datadog_spans = []\n\n for span in spans:\n trace_id, parent_id, span_id = _get_trace_ids(span)\n\n # datadog Span is initialized with a reference to the tracer which is\n # used to record the span when it is finished. We can skip ignore this\n # because we are not calling the finish method and explictly set the\n # duration.\n tracer = None\n\n # extract resource attributes to be used as tags as well as potential service name\n [\n resource_tags,\n resource_service_name,\n ] = _extract_tags_from_resource(span.resource)\n\n datadog_span = DatadogSpan(\n tracer,\n _get_span_name(span),\n service=resource_service_name or self.service,\n resource=_get_resource(span),\n span_type=_get_span_type(span),\n trace_id=trace_id,\n span_id=span_id,\n parent_id=parent_id,\n )\n datadog_span.start_ns = span.start_time\n datadog_span.duration_ns = span.end_time - span.start_time\n\n if not span.status.is_ok:\n datadog_span.error = 1\n if span.status.description:\n exc_type, exc_val = _get_exc_info(span)\n # no mapping for error.stack since traceback not recorded\n datadog_span.set_tag(\"error.msg\", exc_val)\n datadog_span.set_tag(\"error.type\", exc_type)\n\n # combine resource attributes and span attributes, don't modify existing span attributes\n combined_span_tags = {}\n combined_span_tags.update(resource_tags)\n combined_span_tags.update(span.attributes)\n\n datadog_span.set_tags(combined_span_tags)\n\n # add configured env tag\n if self.env is not None:\n datadog_span.set_tag(ENV_KEY, self.env)\n\n # add configured application version tag to only root span\n if self.version is not None and parent_id == 0:\n datadog_span.set_tag(VERSION_KEY, self.version)\n\n # add configured global tags\n datadog_span.set_tags(self.tags)\n\n # add origin to root span\n origin = _get_origin(span)\n if origin and parent_id == 0:\n datadog_span.set_tag(DD_ORIGIN, origin)\n\n sampling_rate = _get_sampling_rate(span)\n if sampling_rate is not None:\n datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)\n\n # span events and span links are not supported\n\n datadog_spans.append(datadog_span)\n\n return datadog_spans\n\n\ndef _get_trace_ids(span):\n \"\"\"Extract tracer ids from span\"\"\"\n ctx = span.get_span_context()\n trace_id = ctx.trace_id\n span_id = ctx.span_id\n\n if isinstance(span.parent, trace_api.Span):\n parent_id = span.parent.get_span_context().span_id\n elif isinstance(span.parent, trace_api.SpanContext):\n parent_id = span.parent.span_id\n else:\n parent_id = 0\n\n trace_id = _convert_trace_id_uint64(trace_id)\n\n return trace_id, parent_id, span_id\n\n\ndef _convert_trace_id_uint64(otel_id):\n \"\"\"Convert 128-bit int used for trace_id to 64-bit unsigned int\"\"\"\n return otel_id & 0xFFFFFFFFFFFFFFFF\n\n\ndef _get_span_name(span):\n \"\"\"Get span name by using instrumentation and kind while backing off to\n span.name\n \"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_kind_name = span.kind.name if span.kind else None\n name = (\n \"{}.{}\".format(instrumentation_name, span_kind_name)\n if instrumentation_name and span_kind_name\n else span.name\n )\n return name\n\n\ndef _get_resource(span):\n \"\"\"Get resource name for span\"\"\"\n if \"http.method\" in span.attributes:\n route = span.attributes.get(\"http.route\")\n return (\n span.attributes[\"http.method\"] + \" \" + route\n if route\n else span.attributes[\"http.method\"]\n )\n\n return span.name\n\n\ndef _get_span_type(span):\n \"\"\"Get Datadog span type\"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name)\n return span_type\n\n\ndef _get_exc_info(span):\n \"\"\"Parse span status description for exception type and value\"\"\"\n exc_type, exc_val = span.status.description.split(\":\", 1)\n return exc_type, exc_val.strip()\n\n\ndef _get_origin(span):\n ctx = span.get_span_context()\n origin = ctx.trace_state.get(DD_ORIGIN)\n return origin\n\n\ndef _get_sampling_rate(span):\n ctx = span.get_span_context()\n tracer_provider = trace_api.get_tracer_provider()\n if not hasattr(tracer_provider, \"sampler\"):\n return None\n sampler = tracer_provider.sampler\n return (\n sampler.rate\n if ctx.trace_flags.sampled\n and isinstance(sampler, sampling.TraceIdRatioBased)\n else None\n )\n\n\ndef _parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n for tag in tags_str.split(\",\"):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n logger.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n\n\ndef _extract_tags_from_resource(resource):\n \"\"\"Parse tags from resource.attributes, except service.name which\n has special significance within datadog\"\"\"\n tags = {}\n service_name = None\n if not (resource and getattr(resource, \"attributes\", None)):\n return [tags, service_name]\n\n for attribute_key, attribute_value in resource.attributes.items():\n if attribute_key == SERVICE_NAME_TAG:\n service_name = attribute_value\n else:\n tags[attribute_key] = attribute_value\n return [tags, service_name]\n", "path": "exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py"}]} | 4,093 | 231 |
gh_patches_debug_38331 | rasdani/github-patches | git_diff | spectrochempy__spectrochempy-11 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automate building of docs for new release and dev version.
Author: @fernandezc (Christian Fernandez )
Date: 2020-05-28
Redmine Issue: 74, https://redmine.spectrochempy.fr/issues/74
---
None
</issue>
<code>
[start of spectrochempy/core/readers/readopus.py]
1 # -*- coding: utf-8 -*-
2 #
3 # ======================================================================================================================
4 # Copyright (©) 2015-2020 LCS
5 # Laboratoire Catalyse et Spectrochimie, Caen, France.
6 # CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
7 # See full LICENSE agreement in the root directory
8 # ======================================================================================================================
9
10 """This module to extend NDDataset with the import methods method.
11
12 """
13 __all__ = ['read_opus']
14
15 __dataset_methods__ = __all__
16
17 # ----------------------------------------------------------------------------------------------------------------------
18 # standard imports
19 # ----------------------------------------------------------------------------------------------------------------------
20
21
22 from brukeropusreader import read_file
23 from warnings import warn
24 from datetime import datetime, timezone, timedelta
25
26
27
28
29
30 # ----------------------------------------------------------------------------------------------------------------------
31 # third party imports
32 # ----------------------------------------------------------------------------------------------------------------------
33 # ----------------------------------------------------------------------------------------------------------------------
34 # local imports
35 # ----------------------------------------------------------------------------------------------------------------------
36 from spectrochempy.core import debug_
37 from spectrochempy.core.dataset.nddataset import NDDataset
38 from spectrochempy.core.dataset.ndcoord import Coord
39 from spectrochempy.utils import readfilename
40 # ======================================================================================================================
41 # Public functions
42 # ======================================================================================================================
43
44 # .............................................................................
45 def read_opus(dataset=None, **kwargs):
46 """Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is
47 extracted ("AB" field). Returns an error if dimensions are incompatibles.
48
49 Parameters
50 ----------
51 filename : `None`, `str`, or list of `str`
52 Filename of the file(s) to load. If `None` : opens a dialog box to select
53 files. If `str` : a single filename. It list of str :
54 a list of filenames.
55 directory : str, optional, default="".
56 From where to read the specified filename. If not specified, read in
57 the defaults datadir.
58
59 Returns
60 -------
61 dataset : |NDDataset|
62 A dataset corresponding to the (set of) bruker file(s).
63
64 Examples
65 --------
66 >>> A = NDDataset.read_opus('irdata\\spectrum.0001')
67 >>> print(A)
68 NDDataset: [float64] a.u. (shape: (y:1, x:2568))
69 """
70 debug_("reading bruker opus files")
71
72 # filename will be given by a keyword parameter except if the first parameters is already
73 # the filename
74 filename = kwargs.get('filename', None)
75
76 # check if the first parameter is a dataset because we allow not to pass it
77 if not isinstance(dataset, NDDataset):
78 # probably did not specify a dataset
79 # so the first parameters must be the filename
80 if isinstance(dataset, (str, list)) and dataset != '':
81 filename = dataset
82
83 # check if directory was specified
84 directory = kwargs.get("directory", None)
85 sortbydate = kwargs.get("sortbydate", True)
86
87 # returns a list of files to read
88 files = readfilename(filename,
89 directory=directory,
90 filetypes=['Bruker files (*.*)',
91 'all files (*)'],
92 dictionary=False)
93 #todo: see how to use regular expression in Qt filters
94
95 if not files:
96 # there is no files, return nothing
97 return None
98
99 xaxis = None
100 intensities = []
101 names = []
102 acquisitiondates = []
103 timestamps = []
104 for file in files:
105 opus_data = read_file(file)
106 try:
107 opus_data["AB"]
108 except KeyError: # not an absorbance spectrum
109 warn("opus file {} could not be read".format(file))
110 continue
111
112 if not xaxis:
113 xaxis = Coord(opus_data.get_range("AB"), title='Wavenumbers', units='cm^-1')
114
115 elif (opus_data.get_range("AB") != xaxis.data).any():
116 raise ValueError("spectra have incompatible dimensions (xaxis)")
117
118 intensities.append(opus_data["AB"])
119 names.append(opus_data["Sample"]['SNM'])
120 acqdate = opus_data["AB Data Parameter"]["DAT"]
121 acqtime = opus_data["AB Data Parameter"]["TIM"]
122 GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])
123 date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],
124 '%d/%m/%Y_%H:%M:%S.%f')
125 UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)
126 UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)
127 # Transform to timestamp for storage in the Coord object
128 # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime
129 timestamp = UTC_date_time.timestamp()
130 acquisitiondates.append(UTC_date_time)
131 timestamps.append(timestamp)
132
133 # return if none of the files could be read:
134 if not xaxis:
135 return
136
137 yaxis = Coord(timestamps,
138 title='Acquisition timestamp (GMT)',
139 units='s',
140 labels=(acquisitiondates, names))
141
142 dataset = NDDataset(intensities)
143 dataset.set_coords(y=yaxis, x=xaxis)
144 dataset.units = 'absorbance'
145 dataset.title = 'Absorbance'
146
147 # Set origin, description and history
148 dataset.origin = "opus"
149 dataset.description = ('Dataset from opus files. \n')
150
151 if sortbydate:
152 dataset.sort(dim='y', inplace=True)
153
154 dataset.history = str(datetime.now()) + ':import from opus files \n'
155
156 # Set the NDDataset date
157 dataset._date = datetime.now()
158 dataset._modified = dataset.date
159 # debug_("end of reading")
160
161 return dataset
162
[end of spectrochempy/core/readers/readopus.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spectrochempy/core/readers/readopus.py b/spectrochempy/core/readers/readopus.py
--- a/spectrochempy/core/readers/readopus.py
+++ b/spectrochempy/core/readers/readopus.py
@@ -22,10 +22,7 @@
from brukeropusreader import read_file
from warnings import warn
from datetime import datetime, timezone, timedelta
-
-
-
-
+from numpy import linspace
# ----------------------------------------------------------------------------------------------------------------------
# third party imports
@@ -37,6 +34,8 @@
from spectrochempy.core.dataset.nddataset import NDDataset
from spectrochempy.core.dataset.ndcoord import Coord
from spectrochempy.utils import readfilename
+
+
# ======================================================================================================================
# Public functions
# ======================================================================================================================
@@ -90,7 +89,7 @@
filetypes=['Bruker files (*.*)',
'all files (*)'],
dictionary=False)
- #todo: see how to use regular expression in Qt filters
+ # todo: see how to use regular expression in Qt filters
if not files:
# there is no files, return nothing
@@ -109,13 +108,18 @@
warn("opus file {} could not be read".format(file))
continue
+ npt = opus_data['AB Data Parameter']['NPT']
+ fxv = opus_data['AB Data Parameter']['FXV']
+ lxv = opus_data['AB Data Parameter']['LXV']
+ xdata = linspace(fxv, lxv, npt)
+
if not xaxis:
- xaxis = Coord(opus_data.get_range("AB"), title='Wavenumbers', units='cm^-1')
+ xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')
- elif (opus_data.get_range("AB") != xaxis.data).any():
+ elif (xdata != xaxis.data).any():
raise ValueError("spectra have incompatible dimensions (xaxis)")
- intensities.append(opus_data["AB"])
+ intensities.append(opus_data["AB"][:npt])
names.append(opus_data["Sample"]['SNM'])
acqdate = opus_data["AB Data Parameter"]["DAT"]
acqtime = opus_data["AB Data Parameter"]["TIM"]
| {"golden_diff": "diff --git a/spectrochempy/core/readers/readopus.py b/spectrochempy/core/readers/readopus.py\n--- a/spectrochempy/core/readers/readopus.py\n+++ b/spectrochempy/core/readers/readopus.py\n@@ -22,10 +22,7 @@\n from brukeropusreader import read_file\n from warnings import warn\n from datetime import datetime, timezone, timedelta\n-\n-\n-\n-\n+from numpy import linspace\n \n # ----------------------------------------------------------------------------------------------------------------------\n # third party imports\n@@ -37,6 +34,8 @@\n from spectrochempy.core.dataset.nddataset import NDDataset\n from spectrochempy.core.dataset.ndcoord import Coord\n from spectrochempy.utils import readfilename\n+\n+\n # ======================================================================================================================\n # Public functions\n # ======================================================================================================================\n@@ -90,7 +89,7 @@\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n- #todo: see how to use regular expression in Qt filters\n+ # todo: see how to use regular expression in Qt filters\n \n if not files:\n # there is no files, return nothing\n@@ -109,13 +108,18 @@\n warn(\"opus file {} could not be read\".format(file))\n continue\n \n+ npt = opus_data['AB Data Parameter']['NPT']\n+ fxv = opus_data['AB Data Parameter']['FXV']\n+ lxv = opus_data['AB Data Parameter']['LXV']\n+ xdata = linspace(fxv, lxv, npt)\n+\n if not xaxis:\n- xaxis = Coord(opus_data.get_range(\"AB\"), title='Wavenumbers', units='cm^-1')\n+ xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')\n \n- elif (opus_data.get_range(\"AB\") != xaxis.data).any():\n+ elif (xdata != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n \n- intensities.append(opus_data[\"AB\"])\n+ intensities.append(opus_data[\"AB\"][:npt])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n", "issue": "Automate building of docs for new release and dev version.\nAuthor: @fernandezc (Christian Fernandez )\nDate: 2020-05-28\nRedmine Issue: 74, https://redmine.spectrochempy.fr/issues/74\n\n---\n\nNone\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# ======================================================================================================================\n# Copyright (\u00a9) 2015-2020 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\n\"\"\"This module to extend NDDataset with the import methods method.\n\n\"\"\"\n__all__ = ['read_opus']\n\n__dataset_methods__ = __all__\n\n# ----------------------------------------------------------------------------------------------------------------------\n# standard imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nfrom brukeropusreader import read_file\nfrom warnings import warn\nfrom datetime import datetime, timezone, timedelta\n\n\n\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# third party imports\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n# local imports\n# ----------------------------------------------------------------------------------------------------------------------\nfrom spectrochempy.core import debug_\nfrom spectrochempy.core.dataset.nddataset import NDDataset\nfrom spectrochempy.core.dataset.ndcoord import Coord\nfrom spectrochempy.utils import readfilename\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\n\n# .............................................................................\ndef read_opus(dataset=None, **kwargs):\n \"\"\"Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is\n extracted (\"AB\" field). Returns an error if dimensions are incompatibles.\n\n Parameters\n ----------\n filename : `None`, `str`, or list of `str`\n Filename of the file(s) to load. If `None` : opens a dialog box to select\n files. If `str` : a single filename. It list of str :\n a list of filenames.\n directory : str, optional, default=\"\".\n From where to read the specified filename. If not specified, read in\n the defaults datadir.\n\n Returns\n -------\n dataset : |NDDataset|\n A dataset corresponding to the (set of) bruker file(s).\n\n Examples\n --------\n >>> A = NDDataset.read_opus('irdata\\\\spectrum.0001')\n >>> print(A)\n NDDataset: [float64] a.u. (shape: (y:1, x:2568))\n \"\"\"\n debug_(\"reading bruker opus files\")\n\n # filename will be given by a keyword parameter except if the first parameters is already\n # the filename\n filename = kwargs.get('filename', None)\n\n # check if the first parameter is a dataset because we allow not to pass it\n if not isinstance(dataset, NDDataset):\n # probably did not specify a dataset\n # so the first parameters must be the filename\n if isinstance(dataset, (str, list)) and dataset != '':\n filename = dataset\n\n # check if directory was specified\n directory = kwargs.get(\"directory\", None)\n sortbydate = kwargs.get(\"sortbydate\", True)\n\n # returns a list of files to read\n files = readfilename(filename,\n directory=directory,\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n #todo: see how to use regular expression in Qt filters\n\n if not files:\n # there is no files, return nothing\n return None\n\n xaxis = None\n intensities = []\n names = []\n acquisitiondates = []\n timestamps = []\n for file in files:\n opus_data = read_file(file)\n try:\n opus_data[\"AB\"]\n except KeyError: # not an absorbance spectrum\n warn(\"opus file {} could not be read\".format(file))\n continue\n\n if not xaxis:\n xaxis = Coord(opus_data.get_range(\"AB\"), title='Wavenumbers', units='cm^-1')\n\n elif (opus_data.get_range(\"AB\") != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n\n intensities.append(opus_data[\"AB\"])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])\n date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],\n '%d/%m/%Y_%H:%M:%S.%f')\n UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)\n UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)\n # Transform to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime\n timestamp = UTC_date_time.timestamp()\n acquisitiondates.append(UTC_date_time)\n timestamps.append(timestamp)\n\n # return if none of the files could be read:\n if not xaxis:\n return\n\n yaxis = Coord(timestamps,\n title='Acquisition timestamp (GMT)',\n units='s',\n labels=(acquisitiondates, names))\n\n dataset = NDDataset(intensities)\n dataset.set_coords(y=yaxis, x=xaxis)\n dataset.units = 'absorbance'\n dataset.title = 'Absorbance'\n\n # Set origin, description and history\n dataset.origin = \"opus\"\n dataset.description = ('Dataset from opus files. \\n')\n\n if sortbydate:\n dataset.sort(dim='y', inplace=True)\n\n dataset.history = str(datetime.now()) + ':import from opus files \\n'\n\n # Set the NDDataset date\n dataset._date = datetime.now()\n dataset._modified = dataset.date\n # debug_(\"end of reading\")\n\n return dataset\n", "path": "spectrochempy/core/readers/readopus.py"}]} | 2,210 | 516 |
gh_patches_debug_24030 | rasdani/github-patches | git_diff | beetbox__beets-3661 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nose (1.x) is mostly unmaintained
https://nose.readthedocs.io/en/latest/#note-to-users
> Nose has been in maintenance mode for the past several years and will likely cease without a new person/team to take over maintainership. New projects should consider using Nose2, py.test, or just plain unittest/unittest2.
Are there any benefits to nose2 over py.test?
The community seems to mostly use py.test (if not using unittest) at this point. I'd like to suggest making it our default test runner. We can decide on adapting our tests to py.test separately.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # This file is part of beets.
5 # Copyright 2016, Adrian Sampson.
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining
8 # a copy of this software and associated documentation files (the
9 # "Software"), to deal in the Software without restriction, including
10 # without limitation the rights to use, copy, modify, merge, publish,
11 # distribute, sublicense, and/or sell copies of the Software, and to
12 # permit persons to whom the Software is furnished to do so, subject to
13 # the following conditions:
14 #
15 # The above copyright notice and this permission notice shall be
16 # included in all copies or substantial portions of the Software.
17
18 from __future__ import division, absolute_import, print_function
19
20 import os
21 import sys
22 import subprocess
23 import shutil
24 from setuptools import setup
25
26
27 def _read(fn):
28 path = os.path.join(os.path.dirname(__file__), fn)
29 return open(path).read()
30
31
32 def build_manpages():
33 # Go into the docs directory and build the manpage.
34 docdir = os.path.join(os.path.dirname(__file__), 'docs')
35 curdir = os.getcwd()
36 os.chdir(docdir)
37 try:
38 subprocess.check_call(['make', 'man'])
39 except OSError:
40 print("Could not build manpages (make man failed)!", file=sys.stderr)
41 return
42 finally:
43 os.chdir(curdir)
44
45 # Copy resulting manpages.
46 mandir = os.path.join(os.path.dirname(__file__), 'man')
47 if os.path.exists(mandir):
48 shutil.rmtree(mandir)
49 shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)
50
51
52 # Build manpages if we're making a source distribution tarball.
53 if 'sdist' in sys.argv:
54 build_manpages()
55
56
57 setup(
58 name='beets',
59 version='1.5.0',
60 description='music tagger and library organizer',
61 author='Adrian Sampson',
62 author_email='[email protected]',
63 url='https://beets.io/',
64 license='MIT',
65 platforms='ALL',
66 long_description=_read('README.rst'),
67 test_suite='test.testall.suite',
68 zip_safe=False,
69 include_package_data=True, # Install plugin resources.
70
71 packages=[
72 'beets',
73 'beets.ui',
74 'beets.autotag',
75 'beets.util',
76 'beets.dbcore',
77 'beetsplug',
78 'beetsplug.bpd',
79 'beetsplug.web',
80 'beetsplug.lastgenre',
81 'beetsplug.metasync',
82 ],
83 entry_points={
84 'console_scripts': [
85 'beet = beets.ui:main',
86 ],
87 },
88
89 install_requires=[
90 'six>=1.9',
91 'unidecode',
92 'musicbrainzngs>=0.4',
93 'pyyaml',
94 'mediafile>=0.2.0',
95 'confuse>=1.0.0',
96 ] + [
97 # Avoid a version of munkres incompatible with Python 3.
98 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else
99 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else
100 'munkres>=1.0.0',
101 ] + (
102 # Use the backport of Python 3.4's `enum` module.
103 ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []
104 ) + (
105 # Pin a Python 2-compatible version of Jellyfish.
106 ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']
107 ) + (
108 # Support for ANSI console colors on Windows.
109 ['colorama'] if (sys.platform == 'win32') else []
110 ),
111
112 tests_require=[
113 'beautifulsoup4',
114 'flask',
115 'mock',
116 'pylast',
117 'rarfile',
118 'responses',
119 'pyxdg',
120 'python-mpd2',
121 'discogs-client',
122 'requests_oauthlib'
123 ] + (
124 # Tests for the thumbnails plugin need pathlib on Python 2 too.
125 ['pathlib'] if (sys.version_info < (3, 4, 0)) else []
126 ),
127
128 # Plugin (optional) dependencies:
129 extras_require={
130 'absubmit': ['requests'],
131 'fetchart': ['requests', 'Pillow'],
132 'embedart': ['Pillow'],
133 'embyupdate': ['requests'],
134 'chroma': ['pyacoustid'],
135 'gmusic': ['gmusicapi'],
136 'discogs': ['discogs-client>=2.2.1'],
137 'beatport': ['requests-oauthlib>=0.6.1'],
138 'kodiupdate': ['requests'],
139 'lastgenre': ['pylast'],
140 'lastimport': ['pylast'],
141 'lyrics': ['requests', 'beautifulsoup4', 'langdetect'],
142 'mpdstats': ['python-mpd2>=0.4.2'],
143 'plexupdate': ['requests'],
144 'web': ['flask', 'flask-cors'],
145 'import': ['rarfile'],
146 'thumbnails': ['pyxdg', 'Pillow'] +
147 (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),
148 'metasync': ['dbus-python'],
149 'sonosupdate': ['soco'],
150 'scrub': ['mutagen>=1.33'],
151 'bpd': ['PyGObject'],
152 'replaygain': ['PyGObject'],
153 },
154 # Non-Python/non-PyPI plugin dependencies:
155 # chroma: chromaprint or fpcalc
156 # convert: ffmpeg
157 # badfiles: mp3val and flac
158 # bpd: python-gi and GStreamer 1.0+
159 # embedart: ImageMagick
160 # absubmit: extractor binary from https://acousticbrainz.org/download
161 # keyfinder: KeyFinder
162 # replaygain: python-gi and GStreamer 1.0+ or mp3gain/aacgain
163 # or Python Audio Tools
164 # ipfs: go-ipfs
165
166 classifiers=[
167 'Topic :: Multimedia :: Sound/Audio',
168 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
169 'License :: OSI Approved :: MIT License',
170 'Environment :: Console',
171 'Environment :: Web Environment',
172 'Programming Language :: Python',
173 'Programming Language :: Python :: 2',
174 'Programming Language :: Python :: 2.7',
175 'Programming Language :: Python :: 3',
176 'Programming Language :: Python :: 3.5',
177 'Programming Language :: Python :: 3.6',
178 'Programming Language :: Python :: 3.7',
179 'Programming Language :: Python :: 3.8',
180 'Programming Language :: Python :: Implementation :: CPython',
181 ],
182 )
183
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -109,24 +109,33 @@
['colorama'] if (sys.platform == 'win32') else []
),
- tests_require=[
- 'beautifulsoup4',
- 'flask',
- 'mock',
- 'pylast',
- 'rarfile',
- 'responses',
- 'pyxdg',
- 'python-mpd2',
- 'discogs-client',
- 'requests_oauthlib'
- ] + (
- # Tests for the thumbnails plugin need pathlib on Python 2 too.
- ['pathlib'] if (sys.version_info < (3, 4, 0)) else []
- ),
-
- # Plugin (optional) dependencies:
extras_require={
+ 'test': [
+ 'beautifulsoup4',
+ 'coverage',
+ 'discogs-client',
+ 'flask',
+ 'mock',
+ 'pylast',
+ 'pytest',
+ 'python-mpd2',
+ 'pyxdg',
+ 'rarfile',
+ 'responses>=0.3.0',
+ 'requests_oauthlib',
+ ] + (
+ # Tests for the thumbnails plugin need pathlib on Python 2 too.
+ ['pathlib'] if (sys.version_info < (3, 4, 0)) else []
+ ),
+ 'lint': [
+ 'flake8',
+ 'flake8-blind-except',
+ 'flake8-coding',
+ 'flake8-future-import',
+ 'pep8-naming',
+ ],
+
+ # Plugin (optional) dependencies:
'absubmit': ['requests'],
'fetchart': ['requests', 'Pillow'],
'embedart': ['Pillow'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -109,24 +109,33 @@\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n \n- tests_require=[\n- 'beautifulsoup4',\n- 'flask',\n- 'mock',\n- 'pylast',\n- 'rarfile',\n- 'responses',\n- 'pyxdg',\n- 'python-mpd2',\n- 'discogs-client',\n- 'requests_oauthlib'\n- ] + (\n- # Tests for the thumbnails plugin need pathlib on Python 2 too.\n- ['pathlib'] if (sys.version_info < (3, 4, 0)) else []\n- ),\n-\n- # Plugin (optional) dependencies:\n extras_require={\n+ 'test': [\n+ 'beautifulsoup4',\n+ 'coverage',\n+ 'discogs-client',\n+ 'flask',\n+ 'mock',\n+ 'pylast',\n+ 'pytest',\n+ 'python-mpd2',\n+ 'pyxdg',\n+ 'rarfile',\n+ 'responses>=0.3.0',\n+ 'requests_oauthlib',\n+ ] + (\n+ # Tests for the thumbnails plugin need pathlib on Python 2 too.\n+ ['pathlib'] if (sys.version_info < (3, 4, 0)) else []\n+ ),\n+ 'lint': [\n+ 'flake8',\n+ 'flake8-blind-except',\n+ 'flake8-coding',\n+ 'flake8-future-import',\n+ 'pep8-naming',\n+ ],\n+\n+ # Plugin (optional) dependencies:\n 'absubmit': ['requests'],\n 'fetchart': ['requests', 'Pillow'],\n 'embedart': ['Pillow'],\n", "issue": "nose (1.x) is mostly unmaintained\nhttps://nose.readthedocs.io/en/latest/#note-to-users\n\n> Nose has been in maintenance mode for the past several years and will likely cease without a new person/team to take over maintainership. New projects should consider using Nose2, py.test, or just plain unittest/unittest2.\n\nAre there any benefits to nose2 over py.test?\n\nThe community seems to mostly use py.test (if not using unittest) at this point. I'd like to suggest making it our default test runner. We can decide on adapting our tests to py.test separately.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), 'docs')\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call(['make', 'man'])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), 'man')\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif 'sdist' in sys.argv:\n build_manpages()\n\n\nsetup(\n name='beets',\n version='1.5.0',\n description='music tagger and library organizer',\n author='Adrian Sampson',\n author_email='[email protected]',\n url='https://beets.io/',\n license='MIT',\n platforms='ALL',\n long_description=_read('README.rst'),\n test_suite='test.testall.suite',\n zip_safe=False,\n include_package_data=True, # Install plugin resources.\n\n packages=[\n 'beets',\n 'beets.ui',\n 'beets.autotag',\n 'beets.util',\n 'beets.dbcore',\n 'beetsplug',\n 'beetsplug.bpd',\n 'beetsplug.web',\n 'beetsplug.lastgenre',\n 'beetsplug.metasync',\n ],\n entry_points={\n 'console_scripts': [\n 'beet = beets.ui:main',\n ],\n },\n\n install_requires=[\n 'six>=1.9',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n 'mediafile>=0.2.0',\n 'confuse>=1.0.0',\n ] + [\n # Avoid a version of munkres incompatible with Python 3.\n 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else\n 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else\n 'munkres>=1.0.0',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n ) + (\n # Pin a Python 2-compatible version of Jellyfish.\n ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']\n ) + (\n # Support for ANSI console colors on Windows.\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n\n tests_require=[\n 'beautifulsoup4',\n 'flask',\n 'mock',\n 'pylast',\n 'rarfile',\n 'responses',\n 'pyxdg',\n 'python-mpd2',\n 'discogs-client',\n 'requests_oauthlib'\n ] + (\n # Tests for the thumbnails plugin need pathlib on Python 2 too.\n ['pathlib'] if (sys.version_info < (3, 4, 0)) else []\n ),\n\n # Plugin (optional) dependencies:\n extras_require={\n 'absubmit': ['requests'],\n 'fetchart': ['requests', 'Pillow'],\n 'embedart': ['Pillow'],\n 'embyupdate': ['requests'],\n 'chroma': ['pyacoustid'],\n 'gmusic': ['gmusicapi'],\n 'discogs': ['discogs-client>=2.2.1'],\n 'beatport': ['requests-oauthlib>=0.6.1'],\n 'kodiupdate': ['requests'],\n 'lastgenre': ['pylast'],\n 'lastimport': ['pylast'],\n 'lyrics': ['requests', 'beautifulsoup4', 'langdetect'],\n 'mpdstats': ['python-mpd2>=0.4.2'],\n 'plexupdate': ['requests'],\n 'web': ['flask', 'flask-cors'],\n 'import': ['rarfile'],\n 'thumbnails': ['pyxdg', 'Pillow'] +\n (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),\n 'metasync': ['dbus-python'],\n 'sonosupdate': ['soco'],\n 'scrub': ['mutagen>=1.33'],\n 'bpd': ['PyGObject'],\n 'replaygain': ['PyGObject'],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # chroma: chromaprint or fpcalc\n # convert: ffmpeg\n # badfiles: mp3val and flac\n # bpd: python-gi and GStreamer 1.0+\n # embedart: ImageMagick\n # absubmit: extractor binary from https://acousticbrainz.org/download\n # keyfinder: KeyFinder\n # replaygain: python-gi and GStreamer 1.0+ or mp3gain/aacgain\n # or Python Audio Tools\n # ipfs: go-ipfs\n\n classifiers=[\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n", "path": "setup.py"}]} | 2,681 | 410 |
gh_patches_debug_27132 | rasdani/github-patches | git_diff | python-discord__bot-1392 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Command suggester wrongly invoked for commands under cooldown
When a command is under cooldown and is invoked again, the error handler will wrongly try to suggest an alternative as if the command does not exist, when it should not do anything at all.

</issue>
<code>
[start of bot/exts/info/tags.py]
1 import logging
2 import re
3 import time
4 from pathlib import Path
5 from typing import Callable, Dict, Iterable, List, Optional
6
7 from discord import Colour, Embed, Member
8 from discord.ext.commands import Cog, Context, group
9
10 from bot import constants
11 from bot.bot import Bot
12 from bot.converters import TagNameConverter
13 from bot.pagination import LinePaginator
14 from bot.utils.messages import wait_for_deletion
15
16 log = logging.getLogger(__name__)
17
18 TEST_CHANNELS = (
19 constants.Channels.bot_commands,
20 constants.Channels.helpers
21 )
22
23 REGEX_NON_ALPHABET = re.compile(r"[^a-z]", re.MULTILINE & re.IGNORECASE)
24 FOOTER_TEXT = f"To show a tag, type {constants.Bot.prefix}tags <tagname>."
25
26
27 class Tags(Cog):
28 """Save new tags and fetch existing tags."""
29
30 def __init__(self, bot: Bot):
31 self.bot = bot
32 self.tag_cooldowns = {}
33 self._cache = self.get_tags()
34
35 @staticmethod
36 def get_tags() -> dict:
37 """Get all tags."""
38 cache = {}
39
40 base_path = Path("bot", "resources", "tags")
41 for file in base_path.glob("**/*"):
42 if file.is_file():
43 tag_title = file.stem
44 tag = {
45 "title": tag_title,
46 "embed": {
47 "description": file.read_text(encoding="utf8"),
48 },
49 "restricted_to": None,
50 "location": f"/bot/{file}"
51 }
52
53 # Convert to a list to allow negative indexing.
54 parents = list(file.relative_to(base_path).parents)
55 if len(parents) > 1:
56 # -1 would be '.' hence -2 is used as the index.
57 tag["restricted_to"] = parents[-2].name
58
59 cache[tag_title] = tag
60
61 return cache
62
63 @staticmethod
64 def check_accessibility(user: Member, tag: dict) -> bool:
65 """Check if user can access a tag."""
66 return not tag["restricted_to"] or tag["restricted_to"].lower() in [role.name.lower() for role in user.roles]
67
68 @staticmethod
69 def _fuzzy_search(search: str, target: str) -> float:
70 """A simple scoring algorithm based on how many letters are found / total, with order in mind."""
71 current, index = 0, 0
72 _search = REGEX_NON_ALPHABET.sub('', search.lower())
73 _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))
74 _target = next(_targets)
75 try:
76 while True:
77 while index < len(_target) and _search[current] == _target[index]:
78 current += 1
79 index += 1
80 index, _target = 0, next(_targets)
81 except (StopIteration, IndexError):
82 pass
83 return current / len(_search) * 100
84
85 def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:
86 """Return a list of suggested tags."""
87 scores: Dict[str, int] = {
88 tag_title: Tags._fuzzy_search(tag_name, tag['title'])
89 for tag_title, tag in self._cache.items()
90 }
91
92 thresholds = thresholds or [100, 90, 80, 70, 60]
93
94 for threshold in thresholds:
95 suggestions = [
96 self._cache[tag_title]
97 for tag_title, matching_score in scores.items()
98 if matching_score >= threshold
99 ]
100 if suggestions:
101 return suggestions
102
103 return []
104
105 def _get_tag(self, tag_name: str) -> list:
106 """Get a specific tag."""
107 found = [self._cache.get(tag_name.lower(), None)]
108 if not found[0]:
109 return self._get_suggestions(tag_name)
110 return found
111
112 def _get_tags_via_content(self, check: Callable[[Iterable], bool], keywords: str, user: Member) -> list:
113 """
114 Search for tags via contents.
115
116 `predicate` will be the built-in any, all, or a custom callable. Must return a bool.
117 """
118 keywords_processed: List[str] = []
119 for keyword in keywords.split(','):
120 keyword_sanitized = keyword.strip().casefold()
121 if not keyword_sanitized:
122 # this happens when there are leading / trailing / consecutive comma.
123 continue
124 keywords_processed.append(keyword_sanitized)
125
126 if not keywords_processed:
127 # after sanitizing, we can end up with an empty list, for example when keywords is ','
128 # in that case, we simply want to search for such keywords directly instead.
129 keywords_processed = [keywords]
130
131 matching_tags = []
132 for tag in self._cache.values():
133 matches = (query in tag['embed']['description'].casefold() for query in keywords_processed)
134 if self.check_accessibility(user, tag) and check(matches):
135 matching_tags.append(tag)
136
137 return matching_tags
138
139 async def _send_matching_tags(self, ctx: Context, keywords: str, matching_tags: list) -> None:
140 """Send the result of matching tags to user."""
141 if not matching_tags:
142 pass
143 elif len(matching_tags) == 1:
144 await ctx.send(embed=Embed().from_dict(matching_tags[0]['embed']))
145 else:
146 is_plural = keywords.strip().count(' ') > 0 or keywords.strip().count(',') > 0
147 embed = Embed(
148 title=f"Here are the tags containing the given keyword{'s' * is_plural}:",
149 description='\n'.join(tag['title'] for tag in matching_tags[:10])
150 )
151 await LinePaginator.paginate(
152 sorted(f"**»** {tag['title']}" for tag in matching_tags),
153 ctx,
154 embed,
155 footer_text=FOOTER_TEXT,
156 empty=False,
157 max_lines=15
158 )
159
160 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
161 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
162 """Show all known tags, a single tag, or run a subcommand."""
163 await self.get_command(ctx, tag_name=tag_name)
164
165 @tags_group.group(name='search', invoke_without_command=True)
166 async def search_tag_content(self, ctx: Context, *, keywords: str) -> None:
167 """
168 Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.
169
170 Only search for tags that has ALL the keywords.
171 """
172 matching_tags = self._get_tags_via_content(all, keywords, ctx.author)
173 await self._send_matching_tags(ctx, keywords, matching_tags)
174
175 @search_tag_content.command(name='any')
176 async def search_tag_content_any_keyword(self, ctx: Context, *, keywords: Optional[str] = 'any') -> None:
177 """
178 Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.
179
180 Search for tags that has ANY of the keywords.
181 """
182 matching_tags = self._get_tags_via_content(any, keywords or 'any', ctx.author)
183 await self._send_matching_tags(ctx, keywords, matching_tags)
184
185 async def display_tag(self, ctx: Context, tag_name: str = None) -> bool:
186 """
187 If a tag is not found, display similar tag names as suggestions.
188
189 If a tag is not specified, display a paginated embed of all tags.
190
191 Tags are on cooldowns on a per-tag, per-channel basis. If a tag is on cooldown, display
192 nothing and return False.
193 """
194 def _command_on_cooldown(tag_name: str) -> bool:
195 """
196 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
197
198 The cooldown duration is set in constants.py.
199 """
200 now = time.time()
201
202 cooldown_conditions = (
203 tag_name
204 and tag_name in self.tag_cooldowns
205 and (now - self.tag_cooldowns[tag_name]["time"]) < constants.Cooldowns.tags
206 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
207 )
208
209 if cooldown_conditions:
210 return True
211 return False
212
213 if _command_on_cooldown(tag_name):
214 time_elapsed = time.time() - self.tag_cooldowns[tag_name]["time"]
215 time_left = constants.Cooldowns.tags - time_elapsed
216 log.info(
217 f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
218 f"Cooldown ends in {time_left:.1f} seconds."
219 )
220 return False
221
222 if tag_name is not None:
223 temp_founds = self._get_tag(tag_name)
224
225 founds = []
226
227 for found_tag in temp_founds:
228 if self.check_accessibility(ctx.author, found_tag):
229 founds.append(found_tag)
230
231 if len(founds) == 1:
232 tag = founds[0]
233 if ctx.channel.id not in TEST_CHANNELS:
234 self.tag_cooldowns[tag_name] = {
235 "time": time.time(),
236 "channel": ctx.channel.id
237 }
238
239 self.bot.stats.incr(f"tags.usages.{tag['title'].replace('-', '_')}")
240
241 await wait_for_deletion(
242 await ctx.send(embed=Embed.from_dict(tag['embed'])),
243 [ctx.author.id],
244 )
245 return True
246 elif founds and len(tag_name) >= 3:
247 await wait_for_deletion(
248 await ctx.send(
249 embed=Embed(
250 title='Did you mean ...',
251 description='\n'.join(tag['title'] for tag in founds[:10])
252 )
253 ),
254 [ctx.author.id],
255 )
256 return True
257
258 else:
259 tags = self._cache.values()
260 if not tags:
261 await ctx.send(embed=Embed(
262 description="**There are no tags in the database!**",
263 colour=Colour.red()
264 ))
265 return True
266 else:
267 embed: Embed = Embed(title="**Current tags**")
268 await LinePaginator.paginate(
269 sorted(
270 f"**»** {tag['title']}" for tag in tags
271 if self.check_accessibility(ctx.author, tag)
272 ),
273 ctx,
274 embed,
275 footer_text=FOOTER_TEXT,
276 empty=False,
277 max_lines=15
278 )
279 return True
280
281 return False
282
283 @tags_group.command(name='get', aliases=('show', 'g'))
284 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> bool:
285 """
286 Get a specified tag, or a list of all tags if no tag is specified.
287
288 Returns False if a tag is on cooldown, or if no matches are found.
289 """
290 return await self.display_tag(ctx, tag_name)
291
292
293 def setup(bot: Bot) -> None:
294 """Load the Tags cog."""
295 bot.add_cog(Tags(bot))
296
[end of bot/exts/info/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/exts/info/tags.py b/bot/exts/info/tags.py
--- a/bot/exts/info/tags.py
+++ b/bot/exts/info/tags.py
@@ -189,7 +189,7 @@
If a tag is not specified, display a paginated embed of all tags.
Tags are on cooldowns on a per-tag, per-channel basis. If a tag is on cooldown, display
- nothing and return False.
+ nothing and return True.
"""
def _command_on_cooldown(tag_name: str) -> bool:
"""
@@ -217,7 +217,7 @@
f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
f"Cooldown ends in {time_left:.1f} seconds."
)
- return False
+ return True
if tag_name is not None:
temp_founds = self._get_tag(tag_name)
@@ -285,7 +285,8 @@
"""
Get a specified tag, or a list of all tags if no tag is specified.
- Returns False if a tag is on cooldown, or if no matches are found.
+ Returns True if something can be sent, or if the tag is on cooldown.
+ Returns False if no matches are found.
"""
return await self.display_tag(ctx, tag_name)
| {"golden_diff": "diff --git a/bot/exts/info/tags.py b/bot/exts/info/tags.py\n--- a/bot/exts/info/tags.py\n+++ b/bot/exts/info/tags.py\n@@ -189,7 +189,7 @@\n If a tag is not specified, display a paginated embed of all tags.\n \n Tags are on cooldowns on a per-tag, per-channel basis. If a tag is on cooldown, display\n- nothing and return False.\n+ nothing and return True.\n \"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n@@ -217,7 +217,7 @@\n f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\"\n )\n- return False\n+ return True\n \n if tag_name is not None:\n temp_founds = self._get_tag(tag_name)\n@@ -285,7 +285,8 @@\n \"\"\"\n Get a specified tag, or a list of all tags if no tag is specified.\n \n- Returns False if a tag is on cooldown, or if no matches are found.\n+ Returns True if something can be sent, or if the tag is on cooldown.\n+ Returns False if no matches are found.\n \"\"\"\n return await self.display_tag(ctx, tag_name)\n", "issue": "Command suggester wrongly invoked for commands under cooldown\nWhen a command is under cooldown and is invoked again, the error handler will wrongly try to suggest an alternative as if the command does not exist, when it should not do anything at all.\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\nimport re\nimport time\nfrom pathlib import Path\nfrom typing import Callable, Dict, Iterable, List, Optional\n\nfrom discord import Colour, Embed, Member\nfrom discord.ext.commands import Cog, Context, group\n\nfrom bot import constants\nfrom bot.bot import Bot\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\nfrom bot.utils.messages import wait_for_deletion\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n constants.Channels.bot_commands,\n constants.Channels.helpers\n)\n\nREGEX_NON_ALPHABET = re.compile(r\"[^a-z]\", re.MULTILINE & re.IGNORECASE)\nFOOTER_TEXT = f\"To show a tag, type {constants.Bot.prefix}tags <tagname>.\"\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n self._cache = self.get_tags()\n\n @staticmethod\n def get_tags() -> dict:\n \"\"\"Get all tags.\"\"\"\n cache = {}\n\n base_path = Path(\"bot\", \"resources\", \"tags\")\n for file in base_path.glob(\"**/*\"):\n if file.is_file():\n tag_title = file.stem\n tag = {\n \"title\": tag_title,\n \"embed\": {\n \"description\": file.read_text(encoding=\"utf8\"),\n },\n \"restricted_to\": None,\n \"location\": f\"/bot/{file}\"\n }\n\n # Convert to a list to allow negative indexing.\n parents = list(file.relative_to(base_path).parents)\n if len(parents) > 1:\n # -1 would be '.' hence -2 is used as the index.\n tag[\"restricted_to\"] = parents[-2].name\n\n cache[tag_title] = tag\n\n return cache\n\n @staticmethod\n def check_accessibility(user: Member, tag: dict) -> bool:\n \"\"\"Check if user can access a tag.\"\"\"\n return not tag[\"restricted_to\"] or tag[\"restricted_to\"].lower() in [role.name.lower() for role in user.roles]\n\n @staticmethod\n def _fuzzy_search(search: str, target: str) -> float:\n \"\"\"A simple scoring algorithm based on how many letters are found / total, with order in mind.\"\"\"\n current, index = 0, 0\n _search = REGEX_NON_ALPHABET.sub('', search.lower())\n _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))\n _target = next(_targets)\n try:\n while True:\n while index < len(_target) and _search[current] == _target[index]:\n current += 1\n index += 1\n index, _target = 0, next(_targets)\n except (StopIteration, IndexError):\n pass\n return current / len(_search) * 100\n\n def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:\n \"\"\"Return a list of suggested tags.\"\"\"\n scores: Dict[str, int] = {\n tag_title: Tags._fuzzy_search(tag_name, tag['title'])\n for tag_title, tag in self._cache.items()\n }\n\n thresholds = thresholds or [100, 90, 80, 70, 60]\n\n for threshold in thresholds:\n suggestions = [\n self._cache[tag_title]\n for tag_title, matching_score in scores.items()\n if matching_score >= threshold\n ]\n if suggestions:\n return suggestions\n\n return []\n\n def _get_tag(self, tag_name: str) -> list:\n \"\"\"Get a specific tag.\"\"\"\n found = [self._cache.get(tag_name.lower(), None)]\n if not found[0]:\n return self._get_suggestions(tag_name)\n return found\n\n def _get_tags_via_content(self, check: Callable[[Iterable], bool], keywords: str, user: Member) -> list:\n \"\"\"\n Search for tags via contents.\n\n `predicate` will be the built-in any, all, or a custom callable. Must return a bool.\n \"\"\"\n keywords_processed: List[str] = []\n for keyword in keywords.split(','):\n keyword_sanitized = keyword.strip().casefold()\n if not keyword_sanitized:\n # this happens when there are leading / trailing / consecutive comma.\n continue\n keywords_processed.append(keyword_sanitized)\n\n if not keywords_processed:\n # after sanitizing, we can end up with an empty list, for example when keywords is ','\n # in that case, we simply want to search for such keywords directly instead.\n keywords_processed = [keywords]\n\n matching_tags = []\n for tag in self._cache.values():\n matches = (query in tag['embed']['description'].casefold() for query in keywords_processed)\n if self.check_accessibility(user, tag) and check(matches):\n matching_tags.append(tag)\n\n return matching_tags\n\n async def _send_matching_tags(self, ctx: Context, keywords: str, matching_tags: list) -> None:\n \"\"\"Send the result of matching tags to user.\"\"\"\n if not matching_tags:\n pass\n elif len(matching_tags) == 1:\n await ctx.send(embed=Embed().from_dict(matching_tags[0]['embed']))\n else:\n is_plural = keywords.strip().count(' ') > 0 or keywords.strip().count(',') > 0\n embed = Embed(\n title=f\"Here are the tags containing the given keyword{'s' * is_plural}:\",\n description='\\n'.join(tag['title'] for tag in matching_tags[:10])\n )\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in matching_tags),\n ctx,\n embed,\n footer_text=FOOTER_TEXT,\n empty=False,\n max_lines=15\n )\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await self.get_command(ctx, tag_name=tag_name)\n\n @tags_group.group(name='search', invoke_without_command=True)\n async def search_tag_content(self, ctx: Context, *, keywords: str) -> None:\n \"\"\"\n Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.\n\n Only search for tags that has ALL the keywords.\n \"\"\"\n matching_tags = self._get_tags_via_content(all, keywords, ctx.author)\n await self._send_matching_tags(ctx, keywords, matching_tags)\n\n @search_tag_content.command(name='any')\n async def search_tag_content_any_keyword(self, ctx: Context, *, keywords: Optional[str] = 'any') -> None:\n \"\"\"\n Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.\n\n Search for tags that has ANY of the keywords.\n \"\"\"\n matching_tags = self._get_tags_via_content(any, keywords or 'any', ctx.author)\n await self._send_matching_tags(ctx, keywords, matching_tags)\n\n async def display_tag(self, ctx: Context, tag_name: str = None) -> bool:\n \"\"\"\n If a tag is not found, display similar tag names as suggestions.\n\n If a tag is not specified, display a paginated embed of all tags.\n\n Tags are on cooldowns on a per-tag, per-channel basis. If a tag is on cooldown, display\n nothing and return False.\n \"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < constants.Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_elapsed = time.time() - self.tag_cooldowns[tag_name][\"time\"]\n time_left = constants.Cooldowns.tags - time_elapsed\n log.info(\n f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\"\n )\n return False\n\n if tag_name is not None:\n temp_founds = self._get_tag(tag_name)\n\n founds = []\n\n for found_tag in temp_founds:\n if self.check_accessibility(ctx.author, found_tag):\n founds.append(found_tag)\n\n if len(founds) == 1:\n tag = founds[0]\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n\n self.bot.stats.incr(f\"tags.usages.{tag['title'].replace('-', '_')}\")\n\n await wait_for_deletion(\n await ctx.send(embed=Embed.from_dict(tag['embed'])),\n [ctx.author.id],\n )\n return True\n elif founds and len(tag_name) >= 3:\n await wait_for_deletion(\n await ctx.send(\n embed=Embed(\n title='Did you mean ...',\n description='\\n'.join(tag['title'] for tag in founds[:10])\n )\n ),\n [ctx.author.id],\n )\n return True\n\n else:\n tags = self._cache.values()\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n return True\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(\n f\"**\u00bb** {tag['title']}\" for tag in tags\n if self.check_accessibility(ctx.author, tag)\n ),\n ctx,\n embed,\n footer_text=FOOTER_TEXT,\n empty=False,\n max_lines=15\n )\n return True\n\n return False\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> bool:\n \"\"\"\n Get a specified tag, or a list of all tags if no tag is specified.\n\n Returns False if a tag is on cooldown, or if no matches are found.\n \"\"\"\n return await self.display_tag(ctx, tag_name)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Tags cog.\"\"\"\n bot.add_cog(Tags(bot))\n", "path": "bot/exts/info/tags.py"}]} | 3,808 | 308 |
gh_patches_debug_66169 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show simplified domain statuses to the registrant
### Story
As a domain manager
I want to know the status of my domain in simple language
so that I know if any action is needed or if any functions are limited
### Acceptance Criteria
- [x] Domains table on the dashboard shows a user-friendly domain status
- [ ] Show the domain status on the "Domain Overview" page
- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)
### Additional Context
**BACKGROUND**
In general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing "user-friendly" versions of the domain status.
User-friendly statuses include:
- _Note:_ "Unknown" _Domain status shows as_ "DNS needed"
- DNS needed
- Ready
- On hold
- Deleted
Refer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)
**DOMAINS TABLE**
Currently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)
**DOMAIN OVERVIEW**
Currently, we do not show the domain status when viewing the "Manage Domains" pages. The "Manage Domains" pages can be accessed by clicking the "Manage" button next to an approved domain.
The first page is the "Domain Overview." Add stylized message to the top of that page to indicate the user-friendly domain status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)
### Issue Links
Depends on: Domain Status presence for testing
</issue>
<code>
[start of src/registrar/views/index.py]
1 from django.db.models import F
2 from django.shortcuts import render
3
4 from registrar.models import DomainApplication
5
6
7 def index(request):
8 """This page is available to anyone without logging in."""
9 context = {}
10 if request.user.is_authenticated:
11 applications = DomainApplication.objects.filter(creator=request.user)
12 # Let's exclude the approved applications since our
13 # domain_applications context will be used to populate
14 # the active applications table
15 context["domain_applications"] = applications.exclude(status="approved")
16
17 domains = request.user.permissions.values(
18 "role",
19 pk=F("domain__id"),
20 name=F("domain__name"),
21 created_time=F("domain__created_at"),
22 application_status=F("domain__domain_application__status"),
23 )
24 context["domains"] = domains
25 return render(request, "home.html", context)
26
[end of src/registrar/views/index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py
--- a/src/registrar/views/index.py
+++ b/src/registrar/views/index.py
@@ -19,7 +19,7 @@
pk=F("domain__id"),
name=F("domain__name"),
created_time=F("domain__created_at"),
- application_status=F("domain__domain_application__status"),
+ state=F("domain__state"),
)
context["domains"] = domains
return render(request, "home.html", context)
| {"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -19,7 +19,7 @@\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n- application_status=F(\"domain__domain_application__status\"),\n+ state=F(\"domain__state\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "issue": "Show simplified domain statuses to the registrant\n### Story\r\n\r\nAs a domain manager\r\nI want to know the status of my domain in simple language\r\nso that I know if any action is needed or if any functions are limited\r\n\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] Domains table on the dashboard shows a user-friendly domain status\r\n- [ ] Show the domain status on the \"Domain Overview\" page\r\n- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)\r\n\r\n\r\n### Additional Context\r\n**BACKGROUND**\r\nIn general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing \"user-friendly\" versions of the domain status. \r\n\r\nUser-friendly statuses include: \r\n- _Note:_ \"Unknown\" _Domain status shows as_ \"DNS needed\"\r\n- DNS needed \r\n- Ready\r\n- On hold\r\n- Deleted\r\n\r\nRefer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)\r\n\r\n**DOMAINS TABLE**\r\nCurrently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status. \r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n**DOMAIN OVERVIEW**\r\nCurrently, we do not show the domain status when viewing the \"Manage Domains\" pages. The \"Manage Domains\" pages can be accessed by clicking the \"Manage\" button next to an approved domain. \r\n\r\nThe first page is the \"Domain Overview.\" Add stylized message to the top of that page to indicate the user-friendly domain status.\r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n### Issue Links\r\n\r\nDepends on: Domain Status presence for testing\n", "before_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n application_status=F(\"domain__domain_application__status\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}]} | 1,350 | 117 |
gh_patches_debug_20140 | rasdani/github-patches | git_diff | Flexget__Flexget-1599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bakabt URL change
Bakabt url has change from http://www.bakabt.com to https://bakabt.me, the url rewriter plugin needs to be updated to match
</issue>
<code>
[start of flexget/plugins/sites/bakabt.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from flexget import plugin
7 from flexget.event import event
8 from flexget.plugins.internal.urlrewriting import UrlRewritingError
9 from flexget.utils.soup import get_soup
10
11 log = logging.getLogger('bakabt')
12
13
14 class UrlRewriteBakaBT(object):
15 """BakaBT urlrewriter."""
16
17 # urlrewriter API
18 def url_rewritable(self, task, entry):
19 url = entry['url']
20 if url.startswith('http://www.bakabt.com/download/'):
21 return False
22 if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
23 return True
24 return False
25
26 # urlrewriter API
27 def url_rewrite(self, task, entry):
28 entry['url'] = self.parse_download_page(entry['url'], task.requests)
29
30 @plugin.internet(log)
31 def parse_download_page(self, url, requests):
32 txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
33 page = requests.get(url, headers=txheaders)
34 try:
35 soup = get_soup(page.text)
36 except Exception as e:
37 raise UrlRewritingError(e)
38 tag_a = soup.find('a', attrs={'class': 'download_link'})
39 if not tag_a:
40 raise UrlRewritingError('Unable to locate download link from url %s' % url)
41 torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
42 return torrent_url
43
44
45 @event('plugin.register')
46 def register_plugin():
47 plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)
48
[end of flexget/plugins/sites/bakabt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/sites/bakabt.py b/flexget/plugins/sites/bakabt.py
--- a/flexget/plugins/sites/bakabt.py
+++ b/flexget/plugins/sites/bakabt.py
@@ -17,9 +17,9 @@
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
- if url.startswith('http://www.bakabt.com/download/'):
+ if url.startswith('http://www.bakabt.me/download/'):
return False
- if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
+ if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):
return True
return False
@@ -38,7 +38,7 @@
tag_a = soup.find('a', attrs={'class': 'download_link'})
if not tag_a:
raise UrlRewritingError('Unable to locate download link from url %s' % url)
- torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
+ torrent_url = 'http://www.bakabt.me' + tag_a.get('href')
return torrent_url
| {"golden_diff": "diff --git a/flexget/plugins/sites/bakabt.py b/flexget/plugins/sites/bakabt.py\n--- a/flexget/plugins/sites/bakabt.py\n+++ b/flexget/plugins/sites/bakabt.py\n@@ -17,9 +17,9 @@\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n- if url.startswith('http://www.bakabt.com/download/'):\n+ if url.startswith('http://www.bakabt.me/download/'):\n return False\n- if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):\n+ if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):\n return True\n return False\n \n@@ -38,7 +38,7 @@\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n- torrent_url = 'http://www.bakabt.com' + tag_a.get('href')\n+ torrent_url = 'http://www.bakabt.me' + tag_a.get('href')\n return torrent_url\n", "issue": "Bakabt URL change\nBakabt url has change from http://www.bakabt.com to https://bakabt.me, the url rewriter plugin needs to be updated to match\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('bakabt')\n\n\nclass UrlRewriteBakaBT(object):\n \"\"\"BakaBT urlrewriter.\"\"\"\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.startswith('http://www.bakabt.com/download/'):\n return False\n if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task.requests)\n\n @plugin.internet(log)\n def parse_download_page(self, url, requests):\n txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n page = requests.get(url, headers=txheaders)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n torrent_url = 'http://www.bakabt.com' + tag_a.get('href')\n return torrent_url\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)\n", "path": "flexget/plugins/sites/bakabt.py"}]} | 1,085 | 294 |
gh_patches_debug_38647 | rasdani/github-patches | git_diff | RocketMap__RocketMap-2114 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix location to use lat/lng if directly provided instead of lookups
</issue>
<code>
[start of pogom/proxy.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 import logging
5 import requests
6 import sys
7 import time
8
9 from queue import Queue
10 from threading import Thread
11 from random import randint
12
13 log = logging.getLogger(__name__)
14
15 # Last used proxy for round-robin.
16 last_proxy = -1
17
18 # Proxy check result constants.
19 check_result_ok = 0
20 check_result_failed = 1
21 check_result_banned = 2
22 check_result_wrong = 3
23 check_result_timeout = 4
24 check_result_exception = 5
25 check_result_empty = 6
26 check_result_max = 6 # Should be equal to maximal return code.
27
28
29 # Simple function to do a call to Niantic's system for
30 # testing proxy connectivity.
31 def check_proxy(proxy_queue, timeout, proxies, show_warnings, check_results):
32
33 # Url for proxy testing.
34 proxy_test_url = 'https://pgorelease.nianticlabs.com/plfe/rpc'
35 proxy = proxy_queue.get()
36
37 check_result = check_result_ok
38
39 if proxy and proxy[1]:
40
41 log.debug('Checking proxy: %s', proxy[1])
42
43 try:
44 proxy_response = requests.post(proxy_test_url, '',
45 proxies={'http': proxy[1],
46 'https': proxy[1]},
47 timeout=timeout)
48
49 if proxy_response.status_code == 200:
50 log.debug('Proxy %s is ok.', proxy[1])
51 proxy_queue.task_done()
52 proxies.append(proxy[1])
53 check_results[check_result_ok] += 1
54 return True
55
56 elif proxy_response.status_code == 403:
57 proxy_error = ("Proxy " + proxy[1] +
58 " is banned - got status code: " +
59 str(proxy_response.status_code))
60 check_result = check_result_banned
61
62 else:
63 proxy_error = ("Wrong status code - " +
64 str(proxy_response.status_code))
65 check_result = check_result_wrong
66
67 except requests.ConnectTimeout:
68 proxy_error = ("Connection timeout (" + str(timeout) +
69 " second(s) ) via proxy " + proxy[1])
70 check_result = check_result_timeout
71
72 except requests.ConnectionError:
73 proxy_error = "Failed to connect to proxy " + proxy[1]
74 check_result = check_result_failed
75
76 except Exception as e:
77 proxy_error = e
78 check_result = check_result_exception
79
80 else:
81 proxy_error = "Empty proxy server."
82 check_result = check_result_empty
83
84 # Decrease output amount if there are lot of proxies.
85 if show_warnings:
86 log.warning('%s', repr(proxy_error))
87 else:
88 log.debug('%s', repr(proxy_error))
89 proxy_queue.task_done()
90
91 check_results[check_result] += 1
92 return False
93
94
95 # Check all proxies and return a working list with proxies.
96 def check_proxies(args):
97
98 source_proxies = []
99
100 check_results = [0] * (check_result_max + 1)
101
102 # Load proxies from the file. Override args.proxy if specified.
103 if args.proxy_file is not None:
104 log.info('Loading proxies from file.')
105
106 with open(args.proxy_file) as f:
107 for line in f:
108 # Ignore blank lines and comment lines.
109 if len(line.strip()) == 0 or line.startswith('#'):
110 continue
111 source_proxies.append(line.strip())
112
113 log.info('Loaded %d proxies.', len(source_proxies))
114
115 if len(source_proxies) == 0:
116 log.error('Proxy file was configured but ' +
117 'no proxies were loaded. Aborting.')
118 sys.exit(1)
119 else:
120 source_proxies = args.proxy
121
122 # No proxies - no cookies.
123 if (source_proxies is None) or (len(source_proxies) == 0):
124 log.info('No proxies are configured.')
125 return None
126
127 if args.proxy_skip_check:
128 return source_proxies
129
130 proxy_queue = Queue()
131 total_proxies = len(source_proxies)
132
133 log.info('Checking %d proxies...', total_proxies)
134 if (total_proxies > 10):
135 log.info('Enable "-v or -vv" to see checking details.')
136
137 proxies = []
138
139 for proxy in enumerate(source_proxies):
140 proxy_queue.put(proxy)
141
142 t = Thread(target=check_proxy,
143 name='check_proxy',
144 args=(proxy_queue, args.proxy_timeout, proxies,
145 total_proxies <= 10, check_results))
146 t.daemon = True
147 t.start()
148
149 # This is painful but we need to wait here until proxy_queue is
150 # completed so we have a working list of proxies.
151 proxy_queue.join()
152
153 working_proxies = len(proxies)
154
155 if working_proxies == 0:
156 log.error('Proxy was configured but no working ' +
157 'proxies were found. Aborting.')
158 sys.exit(1)
159 else:
160 other_fails = (check_results[check_result_failed] +
161 check_results[check_result_wrong] +
162 check_results[check_result_exception] +
163 check_results[check_result_empty])
164 log.info('Proxy check completed. Working: %d, banned: %d, ' +
165 'timeout: %d, other fails: %d of total %d configured.',
166 working_proxies, check_results[check_result_banned],
167 check_results[check_result_timeout],
168 other_fails,
169 total_proxies)
170 return proxies
171
172
173 # Thread function for periodical proxy updating.
174 def proxies_refresher(args):
175
176 while True:
177 # Wait BEFORE refresh, because initial refresh is done at startup.
178 time.sleep(args.proxy_refresh)
179
180 try:
181 proxies = check_proxies(args)
182
183 if len(proxies) == 0:
184 log.warning('No live proxies found. Using previous ones ' +
185 'until next round...')
186 continue
187
188 args.proxy = proxies
189 log.info('Regular proxy refresh complete.')
190 except Exception as e:
191 log.exception('Exception while refresh proxies: %s', repr(e))
192
193
194 # Provide new proxy for a search thread.
195 def get_new_proxy(args):
196
197 global last_proxy
198
199 # If none/round - simply get next proxy.
200 if ((args.proxy_rotation is None) or (args.proxy_rotation == 'none') or
201 (args.proxy_rotation == 'round')):
202 if last_proxy >= len(args.proxy) - 1:
203 last_proxy = 0
204 else:
205 last_proxy = last_proxy + 1
206 lp = last_proxy
207 # If random - get random one.
208 elif (args.proxy_rotation == 'random'):
209 lp = randint(0, len(args.proxy) - 1)
210 else:
211 log.warning('Parameter -pxo/--proxy-rotation has wrong value. ' +
212 'Use only first proxy.')
213 lp = 0
214
215 return lp, args.proxy[lp]
216
[end of pogom/proxy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pogom/proxy.py b/pogom/proxy.py
--- a/pogom/proxy.py
+++ b/pogom/proxy.py
@@ -32,6 +32,10 @@
# Url for proxy testing.
proxy_test_url = 'https://pgorelease.nianticlabs.com/plfe/rpc'
+ proxy_test_ptc_url = 'https://sso.pokemon.com/sso/oauth2.0/authorize?' \
+ 'client_id=mobile-app_pokemon-go&redirect_uri=' \
+ 'https%3A%2F%2Fwww.nianticlabs.com%2Fpokemongo' \
+ '%2Ferror'
proxy = proxy_queue.get()
check_result = check_result_ok
@@ -46,22 +50,42 @@
'https': proxy[1]},
timeout=timeout)
- if proxy_response.status_code == 200:
+ proxy_response_ptc = requests.get(proxy_test_ptc_url, '',
+ proxies={'http': proxy[1],
+ 'https': proxy[1]},
+ timeout=timeout,
+ headers={'User-Agent':
+ 'pokemongo/1 '
+ 'CFNetwork/811.4.18 '
+ 'Darwin/16.5.0',
+ 'Host':
+ 'sso.pokemon.com',
+ 'X-Unity-Version':
+ '5.5.1f1'})
+
+ niantic_status = proxy_response.status_code
+ ptc_status = proxy_response_ptc.status_code
+
+ banned_status_codes = [403, 409]
+
+ if niantic_status == 200 and ptc_status == 200:
log.debug('Proxy %s is ok.', proxy[1])
proxy_queue.task_done()
proxies.append(proxy[1])
check_results[check_result_ok] += 1
return True
- elif proxy_response.status_code == 403:
+ elif (niantic_status in banned_status_codes or
+ ptc_status in banned_status_codes):
proxy_error = ("Proxy " + proxy[1] +
- " is banned - got status code: " +
- str(proxy_response.status_code))
+ " is banned - got Niantic status code: " +
+ str(niantic_status) + ", PTC status code: " +
+ str(ptc_status))
check_result = check_result_banned
else:
- proxy_error = ("Wrong status code - " +
- str(proxy_response.status_code))
+ proxy_error = ("Wrong status codes - " + str(niantic_status) +
+ ", " + str(ptc_status))
check_result = check_result_wrong
except requests.ConnectTimeout:
| {"golden_diff": "diff --git a/pogom/proxy.py b/pogom/proxy.py\n--- a/pogom/proxy.py\n+++ b/pogom/proxy.py\n@@ -32,6 +32,10 @@\n \n # Url for proxy testing.\n proxy_test_url = 'https://pgorelease.nianticlabs.com/plfe/rpc'\n+ proxy_test_ptc_url = 'https://sso.pokemon.com/sso/oauth2.0/authorize?' \\\n+ 'client_id=mobile-app_pokemon-go&redirect_uri=' \\\n+ 'https%3A%2F%2Fwww.nianticlabs.com%2Fpokemongo' \\\n+ '%2Ferror'\n proxy = proxy_queue.get()\n \n check_result = check_result_ok\n@@ -46,22 +50,42 @@\n 'https': proxy[1]},\n timeout=timeout)\n \n- if proxy_response.status_code == 200:\n+ proxy_response_ptc = requests.get(proxy_test_ptc_url, '',\n+ proxies={'http': proxy[1],\n+ 'https': proxy[1]},\n+ timeout=timeout,\n+ headers={'User-Agent':\n+ 'pokemongo/1 '\n+ 'CFNetwork/811.4.18 '\n+ 'Darwin/16.5.0',\n+ 'Host':\n+ 'sso.pokemon.com',\n+ 'X-Unity-Version':\n+ '5.5.1f1'})\n+\n+ niantic_status = proxy_response.status_code\n+ ptc_status = proxy_response_ptc.status_code\n+\n+ banned_status_codes = [403, 409]\n+\n+ if niantic_status == 200 and ptc_status == 200:\n log.debug('Proxy %s is ok.', proxy[1])\n proxy_queue.task_done()\n proxies.append(proxy[1])\n check_results[check_result_ok] += 1\n return True\n \n- elif proxy_response.status_code == 403:\n+ elif (niantic_status in banned_status_codes or\n+ ptc_status in banned_status_codes):\n proxy_error = (\"Proxy \" + proxy[1] +\n- \" is banned - got status code: \" +\n- str(proxy_response.status_code))\n+ \" is banned - got Niantic status code: \" +\n+ str(niantic_status) + \", PTC status code: \" +\n+ str(ptc_status))\n check_result = check_result_banned\n \n else:\n- proxy_error = (\"Wrong status code - \" +\n- str(proxy_response.status_code))\n+ proxy_error = (\"Wrong status codes - \" + str(niantic_status) +\n+ \", \" + str(ptc_status))\n check_result = check_result_wrong\n \n except requests.ConnectTimeout:\n", "issue": "Fix location to use lat/lng if directly provided instead of lookups\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport requests\nimport sys\nimport time\n\nfrom queue import Queue\nfrom threading import Thread\nfrom random import randint\n\nlog = logging.getLogger(__name__)\n\n# Last used proxy for round-robin.\nlast_proxy = -1\n\n# Proxy check result constants.\ncheck_result_ok = 0\ncheck_result_failed = 1\ncheck_result_banned = 2\ncheck_result_wrong = 3\ncheck_result_timeout = 4\ncheck_result_exception = 5\ncheck_result_empty = 6\ncheck_result_max = 6 # Should be equal to maximal return code.\n\n\n# Simple function to do a call to Niantic's system for\n# testing proxy connectivity.\ndef check_proxy(proxy_queue, timeout, proxies, show_warnings, check_results):\n\n # Url for proxy testing.\n proxy_test_url = 'https://pgorelease.nianticlabs.com/plfe/rpc'\n proxy = proxy_queue.get()\n\n check_result = check_result_ok\n\n if proxy and proxy[1]:\n\n log.debug('Checking proxy: %s', proxy[1])\n\n try:\n proxy_response = requests.post(proxy_test_url, '',\n proxies={'http': proxy[1],\n 'https': proxy[1]},\n timeout=timeout)\n\n if proxy_response.status_code == 200:\n log.debug('Proxy %s is ok.', proxy[1])\n proxy_queue.task_done()\n proxies.append(proxy[1])\n check_results[check_result_ok] += 1\n return True\n\n elif proxy_response.status_code == 403:\n proxy_error = (\"Proxy \" + proxy[1] +\n \" is banned - got status code: \" +\n str(proxy_response.status_code))\n check_result = check_result_banned\n\n else:\n proxy_error = (\"Wrong status code - \" +\n str(proxy_response.status_code))\n check_result = check_result_wrong\n\n except requests.ConnectTimeout:\n proxy_error = (\"Connection timeout (\" + str(timeout) +\n \" second(s) ) via proxy \" + proxy[1])\n check_result = check_result_timeout\n\n except requests.ConnectionError:\n proxy_error = \"Failed to connect to proxy \" + proxy[1]\n check_result = check_result_failed\n\n except Exception as e:\n proxy_error = e\n check_result = check_result_exception\n\n else:\n proxy_error = \"Empty proxy server.\"\n check_result = check_result_empty\n\n # Decrease output amount if there are lot of proxies.\n if show_warnings:\n log.warning('%s', repr(proxy_error))\n else:\n log.debug('%s', repr(proxy_error))\n proxy_queue.task_done()\n\n check_results[check_result] += 1\n return False\n\n\n# Check all proxies and return a working list with proxies.\ndef check_proxies(args):\n\n source_proxies = []\n\n check_results = [0] * (check_result_max + 1)\n\n # Load proxies from the file. Override args.proxy if specified.\n if args.proxy_file is not None:\n log.info('Loading proxies from file.')\n\n with open(args.proxy_file) as f:\n for line in f:\n # Ignore blank lines and comment lines.\n if len(line.strip()) == 0 or line.startswith('#'):\n continue\n source_proxies.append(line.strip())\n\n log.info('Loaded %d proxies.', len(source_proxies))\n\n if len(source_proxies) == 0:\n log.error('Proxy file was configured but ' +\n 'no proxies were loaded. Aborting.')\n sys.exit(1)\n else:\n source_proxies = args.proxy\n\n # No proxies - no cookies.\n if (source_proxies is None) or (len(source_proxies) == 0):\n log.info('No proxies are configured.')\n return None\n\n if args.proxy_skip_check:\n return source_proxies\n\n proxy_queue = Queue()\n total_proxies = len(source_proxies)\n\n log.info('Checking %d proxies...', total_proxies)\n if (total_proxies > 10):\n log.info('Enable \"-v or -vv\" to see checking details.')\n\n proxies = []\n\n for proxy in enumerate(source_proxies):\n proxy_queue.put(proxy)\n\n t = Thread(target=check_proxy,\n name='check_proxy',\n args=(proxy_queue, args.proxy_timeout, proxies,\n total_proxies <= 10, check_results))\n t.daemon = True\n t.start()\n\n # This is painful but we need to wait here until proxy_queue is\n # completed so we have a working list of proxies.\n proxy_queue.join()\n\n working_proxies = len(proxies)\n\n if working_proxies == 0:\n log.error('Proxy was configured but no working ' +\n 'proxies were found. Aborting.')\n sys.exit(1)\n else:\n other_fails = (check_results[check_result_failed] +\n check_results[check_result_wrong] +\n check_results[check_result_exception] +\n check_results[check_result_empty])\n log.info('Proxy check completed. Working: %d, banned: %d, ' +\n 'timeout: %d, other fails: %d of total %d configured.',\n working_proxies, check_results[check_result_banned],\n check_results[check_result_timeout],\n other_fails,\n total_proxies)\n return proxies\n\n\n# Thread function for periodical proxy updating.\ndef proxies_refresher(args):\n\n while True:\n # Wait BEFORE refresh, because initial refresh is done at startup.\n time.sleep(args.proxy_refresh)\n\n try:\n proxies = check_proxies(args)\n\n if len(proxies) == 0:\n log.warning('No live proxies found. Using previous ones ' +\n 'until next round...')\n continue\n\n args.proxy = proxies\n log.info('Regular proxy refresh complete.')\n except Exception as e:\n log.exception('Exception while refresh proxies: %s', repr(e))\n\n\n# Provide new proxy for a search thread.\ndef get_new_proxy(args):\n\n global last_proxy\n\n # If none/round - simply get next proxy.\n if ((args.proxy_rotation is None) or (args.proxy_rotation == 'none') or\n (args.proxy_rotation == 'round')):\n if last_proxy >= len(args.proxy) - 1:\n last_proxy = 0\n else:\n last_proxy = last_proxy + 1\n lp = last_proxy\n # If random - get random one.\n elif (args.proxy_rotation == 'random'):\n lp = randint(0, len(args.proxy) - 1)\n else:\n log.warning('Parameter -pxo/--proxy-rotation has wrong value. ' +\n 'Use only first proxy.')\n lp = 0\n\n return lp, args.proxy[lp]\n", "path": "pogom/proxy.py"}]} | 2,576 | 623 |
gh_patches_debug_26154 | rasdani/github-patches | git_diff | cupy__cupy-5964 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`expand_dims` fails to check duplicated axes
```
>>> cupy.expand_dims(cupy.ones(7), axis=(0, -3))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "cupy/cupy/_manipulation/dims.py", line 148, in expand_dims
return _manipulation._expand_dims(a, axis)
File "cupy/core/_routines_manipulation.pyx", line 223, in cupy.core._routines_manipulation._expand_dims
File "cupy/core/_routines_manipulation.pyx", line 238, in cupy.core._routines_manipulation._expand_dims
File "cupy/core/_routines_manipulation.pyx", line 317, in cupy.core._routines_manipulation._reshape
ValueError: cannot reshape array of size 7 into shape 49
```
</issue>
<code>
[start of cupy/_manipulation/rearrange.py]
1 import itertools
2
3 import numpy
4
5 import cupy
6 from cupy._core import _reduction
7 from cupy._core import internal
8
9
10 def flip(a, axis=None):
11 """Reverse the order of elements in an array along the given axis.
12
13 Note that ``flip`` function has been introduced since NumPy v1.12.
14 The contents of this document is the same as the original one.
15
16 Args:
17 a (~cupy.ndarray): Input array.
18 axis (int or tuple of int or None): Axis or axes along which to flip
19 over. The default, ``axis=None``, will flip over all of the axes of
20 the input array. If axis is negative it counts from the last to the
21 first axis. If axis is a tuple of ints, flipping is performed on
22 all of the axes specified in the tuple.
23
24 Returns:
25 ~cupy.ndarray: Output array.
26
27 .. seealso:: :func:`numpy.flip`
28
29 """
30 a_ndim = a.ndim
31 if a_ndim < 1:
32 raise numpy.AxisError('Input must be >= 1-d')
33
34 axes = internal._normalize_axis_indices(axis, a_ndim)
35 return _flip(a, axes)
36
37
38 def fliplr(a):
39 """Flip array in the left/right direction.
40
41 Flip the entries in each row in the left/right direction. Columns
42 are preserved, but appear in a different order than before.
43
44 Args:
45 a (~cupy.ndarray): Input array.
46
47 Returns:
48 ~cupy.ndarray: Output array.
49
50 .. seealso:: :func:`numpy.fliplr`
51
52 """
53 if a.ndim < 2:
54 raise ValueError('Input must be >= 2-d')
55 return a[::, ::-1]
56
57
58 def flipud(a):
59 """Flip array in the up/down direction.
60
61 Flip the entries in each column in the up/down direction. Rows are
62 preserved, but appear in a different order than before.
63
64 Args:
65 a (~cupy.ndarray): Input array.
66
67 Returns:
68 ~cupy.ndarray: Output array.
69
70 .. seealso:: :func:`numpy.flipud`
71
72 """
73 if a.ndim < 1:
74 raise ValueError('Input must be >= 1-d')
75 return a[::-1]
76
77
78 def roll(a, shift, axis=None):
79 """Roll array elements along a given axis.
80
81 Elements that roll beyond the last position are re-introduced at the first.
82
83 Args:
84 a (~cupy.ndarray): Array to be rolled.
85 shift (int or tuple of int): The number of places by which elements are
86 shifted. If a tuple, then `axis` must be a tuple of the same size,
87 and each of the given axes is shifted by the corresponding number.
88 If an int while `axis` is a tuple of ints, then the same value is
89 used for all given axes.
90 axis (int or tuple of int or None): The axis along which elements are
91 shifted. By default, the array is flattened before shifting, after
92 which the original shape is restored.
93
94 Returns:
95 ~cupy.ndarray: Output array.
96
97 .. seealso:: :func:`numpy.roll`
98
99 """
100 if axis is None:
101 return roll(a.ravel(), shift, 0).reshape(a.shape)
102 elif isinstance(shift, cupy.ndarray):
103 shift = shift.ravel()
104 axes = _reduction._get_axis(axis, a.ndim)[0]
105 n_axes = max(len(axes), shift.size)
106 axes = numpy.broadcast_to(axes, (n_axes,))
107 shift = cupy.broadcast_to(shift, (n_axes,))
108
109 # TODO(asi1024): Improve after issue #4799 is resolved.
110 indices = []
111 for ax in range(a.ndim):
112 ind_shape = [1] * a.ndim
113 ind_shape[ax] = a.shape[ax]
114 indices.append(cupy.arange(a.shape[ax]).reshape(ind_shape))
115
116 for ax, s in zip(axes, shift):
117 indices[ax] -= s
118 indices[ax] %= a.shape[ax]
119
120 for ax in range(a.ndim):
121 indices[ax] = cupy.broadcast_to(indices[ax], a.shape)
122
123 return a[tuple(indices)]
124 else:
125 axis = _reduction._get_axis(axis, a.ndim)[0]
126
127 broadcasted = numpy.broadcast(shift, axis)
128 if broadcasted.nd > 1:
129 raise ValueError(
130 '\'shift\' and \'axis\' should be scalars or 1D sequences')
131 shifts = {ax: 0 for ax in range(a.ndim)}
132 for sh, ax in broadcasted:
133 shifts[ax] += sh
134
135 rolls = [((slice(None), slice(None)),)] * a.ndim
136 for ax, offset in shifts.items():
137 offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
138 if offset:
139 # (original, result), (original, result)
140 rolls[ax] = ((slice(None, -offset), slice(offset, None)),
141 (slice(-offset, None), slice(None, offset)))
142
143 result = cupy.empty_like(a)
144 for indices in itertools.product(*rolls):
145 arr_index, res_index = zip(*indices)
146 result[res_index] = a[arr_index]
147
148 return result
149
150
151 def rot90(a, k=1, axes=(0, 1)):
152 """Rotate an array by 90 degrees in the plane specified by axes.
153
154 Note that ``axes`` argument has been introduced since NumPy v1.12.
155 The contents of this document is the same as the original one.
156
157 Args:
158 a (~cupy.ndarray): Array of two or more dimensions.
159 k (int): Number of times the array is rotated by 90 degrees.
160 axes: (tuple of ints): The array is rotated in the plane defined by
161 the axes. Axes must be different.
162
163 Returns:
164 ~cupy.ndarray: Output array.
165
166 .. seealso:: :func:`numpy.rot90`
167
168 """
169 a_ndim = a.ndim
170 if a_ndim < 2:
171 raise ValueError('Input must be >= 2-d')
172
173 axes = tuple(axes)
174 if len(axes) != 2:
175 raise ValueError('len(axes) must be 2')
176 if axes[0] == axes[1] or abs(axes[0] - axes[1]) == a_ndim:
177 raise ValueError('axes must be different')
178 if not (-a_ndim <= axes[0] < a_ndim and -a_ndim <= axes[1] < a_ndim):
179 raise ValueError('axes must be >= %d and < %d' % (-a_ndim, a_ndim))
180
181 k = k % 4
182
183 if k == 0:
184 return a[:]
185 if k == 2:
186 return _flip(a, axes)
187
188 axes_t = list(range(0, a_ndim))
189 axes_t[axes[0]], axes_t[axes[1]] = axes_t[axes[1]], axes_t[axes[0]]
190
191 if k == 1:
192 return cupy.transpose(_flip(a, (axes[1],)), axes_t)
193 else:
194 return _flip(cupy.transpose(a, axes_t), (axes[1],))
195
196
197 def _flip(a, axes):
198 # This function flips array without checking args.
199 indexer = [slice(None)] * a.ndim
200 for ax in axes:
201 indexer[ax] = slice(None, None, -1)
202
203 return a[tuple(indexer)]
204
[end of cupy/_manipulation/rearrange.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/_manipulation/rearrange.py b/cupy/_manipulation/rearrange.py
--- a/cupy/_manipulation/rearrange.py
+++ b/cupy/_manipulation/rearrange.py
@@ -3,7 +3,6 @@
import numpy
import cupy
-from cupy._core import _reduction
from cupy._core import internal
@@ -99,9 +98,13 @@
"""
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
- elif isinstance(shift, cupy.ndarray):
+
+ axes = (axis,) if numpy.isscalar(axis) else axis
+ axes = tuple([ # allow_duplicate
+ internal._normalize_axis_index(ax, a.ndim) for ax in axes
+ ])
+ if isinstance(shift, cupy.ndarray):
shift = shift.ravel()
- axes = _reduction._get_axis(axis, a.ndim)[0]
n_axes = max(len(axes), shift.size)
axes = numpy.broadcast_to(axes, (n_axes,))
shift = cupy.broadcast_to(shift, (n_axes,))
@@ -122,9 +125,7 @@
return a[tuple(indices)]
else:
- axis = _reduction._get_axis(axis, a.ndim)[0]
-
- broadcasted = numpy.broadcast(shift, axis)
+ broadcasted = numpy.broadcast(shift, axes)
if broadcasted.nd > 1:
raise ValueError(
'\'shift\' and \'axis\' should be scalars or 1D sequences')
| {"golden_diff": "diff --git a/cupy/_manipulation/rearrange.py b/cupy/_manipulation/rearrange.py\n--- a/cupy/_manipulation/rearrange.py\n+++ b/cupy/_manipulation/rearrange.py\n@@ -3,7 +3,6 @@\n import numpy\n \n import cupy\n-from cupy._core import _reduction\n from cupy._core import internal\n \n \n@@ -99,9 +98,13 @@\n \"\"\"\n if axis is None:\n return roll(a.ravel(), shift, 0).reshape(a.shape)\n- elif isinstance(shift, cupy.ndarray):\n+\n+ axes = (axis,) if numpy.isscalar(axis) else axis\n+ axes = tuple([ # allow_duplicate\n+ internal._normalize_axis_index(ax, a.ndim) for ax in axes\n+ ])\n+ if isinstance(shift, cupy.ndarray):\n shift = shift.ravel()\n- axes = _reduction._get_axis(axis, a.ndim)[0]\n n_axes = max(len(axes), shift.size)\n axes = numpy.broadcast_to(axes, (n_axes,))\n shift = cupy.broadcast_to(shift, (n_axes,))\n@@ -122,9 +125,7 @@\n \n return a[tuple(indices)]\n else:\n- axis = _reduction._get_axis(axis, a.ndim)[0]\n-\n- broadcasted = numpy.broadcast(shift, axis)\n+ broadcasted = numpy.broadcast(shift, axes)\n if broadcasted.nd > 1:\n raise ValueError(\n '\\'shift\\' and \\'axis\\' should be scalars or 1D sequences')\n", "issue": "`expand_dims` fails to check duplicated axes\n```\r\n>>> cupy.expand_dims(cupy.ones(7), axis=(0, -3))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"cupy/cupy/_manipulation/dims.py\", line 148, in expand_dims\r\n return _manipulation._expand_dims(a, axis)\r\n File \"cupy/core/_routines_manipulation.pyx\", line 223, in cupy.core._routines_manipulation._expand_dims\r\n File \"cupy/core/_routines_manipulation.pyx\", line 238, in cupy.core._routines_manipulation._expand_dims\r\n File \"cupy/core/_routines_manipulation.pyx\", line 317, in cupy.core._routines_manipulation._reshape\r\nValueError: cannot reshape array of size 7 into shape 49\r\n```\n", "before_files": [{"content": "import itertools\n\nimport numpy\n\nimport cupy\nfrom cupy._core import _reduction\nfrom cupy._core import internal\n\n\ndef flip(a, axis=None):\n \"\"\"Reverse the order of elements in an array along the given axis.\n\n Note that ``flip`` function has been introduced since NumPy v1.12.\n The contents of this document is the same as the original one.\n\n Args:\n a (~cupy.ndarray): Input array.\n axis (int or tuple of int or None): Axis or axes along which to flip\n over. The default, ``axis=None``, will flip over all of the axes of\n the input array. If axis is negative it counts from the last to the\n first axis. If axis is a tuple of ints, flipping is performed on\n all of the axes specified in the tuple.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.flip`\n\n \"\"\"\n a_ndim = a.ndim\n if a_ndim < 1:\n raise numpy.AxisError('Input must be >= 1-d')\n\n axes = internal._normalize_axis_indices(axis, a_ndim)\n return _flip(a, axes)\n\n\ndef fliplr(a):\n \"\"\"Flip array in the left/right direction.\n\n Flip the entries in each row in the left/right direction. Columns\n are preserved, but appear in a different order than before.\n\n Args:\n a (~cupy.ndarray): Input array.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.fliplr`\n\n \"\"\"\n if a.ndim < 2:\n raise ValueError('Input must be >= 2-d')\n return a[::, ::-1]\n\n\ndef flipud(a):\n \"\"\"Flip array in the up/down direction.\n\n Flip the entries in each column in the up/down direction. Rows are\n preserved, but appear in a different order than before.\n\n Args:\n a (~cupy.ndarray): Input array.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.flipud`\n\n \"\"\"\n if a.ndim < 1:\n raise ValueError('Input must be >= 1-d')\n return a[::-1]\n\n\ndef roll(a, shift, axis=None):\n \"\"\"Roll array elements along a given axis.\n\n Elements that roll beyond the last position are re-introduced at the first.\n\n Args:\n a (~cupy.ndarray): Array to be rolled.\n shift (int or tuple of int): The number of places by which elements are\n shifted. If a tuple, then `axis` must be a tuple of the same size,\n and each of the given axes is shifted by the corresponding number.\n If an int while `axis` is a tuple of ints, then the same value is\n used for all given axes.\n axis (int or tuple of int or None): The axis along which elements are\n shifted. By default, the array is flattened before shifting, after\n which the original shape is restored.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.roll`\n\n \"\"\"\n if axis is None:\n return roll(a.ravel(), shift, 0).reshape(a.shape)\n elif isinstance(shift, cupy.ndarray):\n shift = shift.ravel()\n axes = _reduction._get_axis(axis, a.ndim)[0]\n n_axes = max(len(axes), shift.size)\n axes = numpy.broadcast_to(axes, (n_axes,))\n shift = cupy.broadcast_to(shift, (n_axes,))\n\n # TODO(asi1024): Improve after issue #4799 is resolved.\n indices = []\n for ax in range(a.ndim):\n ind_shape = [1] * a.ndim\n ind_shape[ax] = a.shape[ax]\n indices.append(cupy.arange(a.shape[ax]).reshape(ind_shape))\n\n for ax, s in zip(axes, shift):\n indices[ax] -= s\n indices[ax] %= a.shape[ax]\n\n for ax in range(a.ndim):\n indices[ax] = cupy.broadcast_to(indices[ax], a.shape)\n\n return a[tuple(indices)]\n else:\n axis = _reduction._get_axis(axis, a.ndim)[0]\n\n broadcasted = numpy.broadcast(shift, axis)\n if broadcasted.nd > 1:\n raise ValueError(\n '\\'shift\\' and \\'axis\\' should be scalars or 1D sequences')\n shifts = {ax: 0 for ax in range(a.ndim)}\n for sh, ax in broadcasted:\n shifts[ax] += sh\n\n rolls = [((slice(None), slice(None)),)] * a.ndim\n for ax, offset in shifts.items():\n offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.\n if offset:\n # (original, result), (original, result)\n rolls[ax] = ((slice(None, -offset), slice(offset, None)),\n (slice(-offset, None), slice(None, offset)))\n\n result = cupy.empty_like(a)\n for indices in itertools.product(*rolls):\n arr_index, res_index = zip(*indices)\n result[res_index] = a[arr_index]\n\n return result\n\n\ndef rot90(a, k=1, axes=(0, 1)):\n \"\"\"Rotate an array by 90 degrees in the plane specified by axes.\n\n Note that ``axes`` argument has been introduced since NumPy v1.12.\n The contents of this document is the same as the original one.\n\n Args:\n a (~cupy.ndarray): Array of two or more dimensions.\n k (int): Number of times the array is rotated by 90 degrees.\n axes: (tuple of ints): The array is rotated in the plane defined by\n the axes. Axes must be different.\n\n Returns:\n ~cupy.ndarray: Output array.\n\n .. seealso:: :func:`numpy.rot90`\n\n \"\"\"\n a_ndim = a.ndim\n if a_ndim < 2:\n raise ValueError('Input must be >= 2-d')\n\n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError('len(axes) must be 2')\n if axes[0] == axes[1] or abs(axes[0] - axes[1]) == a_ndim:\n raise ValueError('axes must be different')\n if not (-a_ndim <= axes[0] < a_ndim and -a_ndim <= axes[1] < a_ndim):\n raise ValueError('axes must be >= %d and < %d' % (-a_ndim, a_ndim))\n\n k = k % 4\n\n if k == 0:\n return a[:]\n if k == 2:\n return _flip(a, axes)\n\n axes_t = list(range(0, a_ndim))\n axes_t[axes[0]], axes_t[axes[1]] = axes_t[axes[1]], axes_t[axes[0]]\n\n if k == 1:\n return cupy.transpose(_flip(a, (axes[1],)), axes_t)\n else:\n return _flip(cupy.transpose(a, axes_t), (axes[1],))\n\n\ndef _flip(a, axes):\n # This function flips array without checking args.\n indexer = [slice(None)] * a.ndim\n for ax in axes:\n indexer[ax] = slice(None, None, -1)\n\n return a[tuple(indexer)]\n", "path": "cupy/_manipulation/rearrange.py"}]} | 2,928 | 350 |
gh_patches_debug_10665 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Startup folder not configured correctly
I think the `app_root="startup"` should be `app_root=startup` [here](https://github.com/NVIDIA/NVFlare/blob/fa20e5e27b01e0f66cbefea229aed2cf6729663d/nvflare/private/fed/app/server/server_train.py#L67-L69)
If you like I can submit a PR?
</issue>
<code>
[start of nvflare/private/fed/app/server/server_train.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Provides a command line interface for federated server."""
16
17 import argparse
18 import logging
19 import os
20 import sys
21
22 from nvflare.fuel.common.excepts import ConfigError
23 from nvflare.fuel.hci.server.authz import AuthorizationService
24 from nvflare.fuel.sec.audit import AuditService
25 from nvflare.fuel.sec.security_content_service import LoadResult, SecurityContentService
26 from nvflare.fuel.utils.argument_utils import parse_vars
27 from nvflare.private.fed.app.fl_conf import FLServerStarterConfiger
28 from nvflare.private.fed.server.admin import FedAdminServer
29 from nvflare.security.security import EmptyAuthorizer, FLAuthorizer
30
31
32 def main():
33 """FL Server program starting point."""
34 parser = argparse.ArgumentParser()
35 parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
36
37 parser.add_argument(
38 "--fed_server", "-s", type=str, help="an aggregation server specification json file", required=True
39 )
40
41 parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
42
43 args = parser.parse_args()
44 kv_list = parse_vars(args.set)
45
46 args.train_config = "config/config_train.json"
47 config_folder = kv_list.get("config_folder", "")
48 if config_folder == "":
49 args.server_config = "config_fed_server.json"
50 else:
51 args.server_config = config_folder + "/config_fed_server.json"
52 args.env = "config/environment.json"
53 args.config_folder = config_folder
54 logger = logging.getLogger()
55 args.log_config = None
56
57 try:
58 remove_restart_file(args)
59 except BaseException:
60 print("Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.")
61 sys.exit(-1)
62
63 try:
64 os.chdir(args.workspace)
65
66 # trainer = WorkFlowFactory().create_server_trainer(train_configs, envs)
67 startup = os.path.join(args.workspace, "startup")
68 conf = FLServerStarterConfiger(
69 app_root="startup",
70 # wf_config_file_name="config_train.json",
71 server_config_file_name=args.fed_server,
72 # env_config_file_name="environment.json",
73 log_config_file_name="log.config",
74 kv_list=args.set,
75 )
76 log_level = os.environ.get("FL_LOG_LEVEL", "")
77 numeric_level = getattr(logging, log_level.upper(), None)
78 if isinstance(numeric_level, int):
79 logging.getLogger().setLevel(numeric_level)
80 logger.debug("loglevel debug enabled")
81 logger.info("loglevel info enabled")
82 logger.warning("loglevel warn enabled")
83 logger.error("loglevel error enabled")
84 logger.critical("loglevel critical enabled")
85 conf.configure()
86
87 deployer = conf.deployer
88 secure_train = conf.cmd_vars.get("secure_train", False)
89
90 security_check(secure_train, args)
91
92 try:
93 # Deploy the FL server
94 services = deployer.deploy(args)
95
96 first_server = sorted(conf.config_data["servers"])[0]
97 # allow command to overwrite the admin_host
98 if conf.cmd_vars.get("host", None):
99 first_server["admin_host"] = conf.cmd_vars["host"]
100 admin_server = create_admin_server(
101 services,
102 server_conf=first_server,
103 args=args,
104 secure_train=secure_train,
105 app_validator=deployer.app_validator,
106 )
107 admin_server.start()
108
109 services.platform = "PT"
110
111 services.set_admin_server(admin_server)
112 finally:
113 deployer.close()
114
115 logger.info("Server started")
116
117 except ConfigError as ex:
118 print("ConfigError:", str(ex))
119 finally:
120 pass
121
122
123 def security_check(secure_train, args):
124 """To check the security content if running in security mode.
125
126 Args:
127 secure_train: True/False
128 args: command args
129
130 """
131 # initialize the SecurityContentService.
132 # must do this before initializing other services since it may be needed by them!
133 startup = os.path.join(args.workspace, "startup")
134 SecurityContentService.initialize(content_folder=startup)
135
136 if secure_train:
137 insecure_list = secure_content_check(args)
138 if len(insecure_list):
139 print("The following files are not secure content.")
140 for item in insecure_list:
141 print(item)
142 sys.exit(1)
143
144 # initialize the AuditService, which is used by command processing.
145 # The Audit Service can be used in other places as well.
146 AuditService.initialize(audit_file_name="audit.log")
147 # Initialize the AuthorizationService. It is used by command authorization
148 # We use FLAuthorizer for policy processing.
149 # AuthorizationService depends on SecurityContentService to read authorization policy file.
150 if secure_train:
151 _, err = AuthorizationService.initialize(FLAuthorizer())
152 else:
153 _, err = AuthorizationService.initialize(EmptyAuthorizer())
154
155 if err:
156 print("AuthorizationService error: {}".format(err))
157 sys.exit(1)
158
159
160 def secure_content_check(args):
161 """To check the security contents.
162
163 Args:
164 args: command args
165
166 Returns: the insecure content list
167
168 """
169 insecure_list = []
170 data, sig = SecurityContentService.load_json(args.fed_server)
171 if sig != LoadResult.OK:
172 insecure_list.append(args.fed_server)
173
174 for server in data["servers"]:
175 content, sig = SecurityContentService.load_content(server.get("ssl_cert"))
176 if sig != LoadResult.OK:
177 insecure_list.append(server.get("ssl_cert"))
178 content, sig = SecurityContentService.load_content(server.get("ssl_private_key"))
179 if sig != LoadResult.OK:
180 insecure_list.append(server.get("ssl_private_key"))
181 content, sig = SecurityContentService.load_content(server.get("ssl_root_cert"))
182 if sig != LoadResult.OK:
183 insecure_list.append(server.get("ssl_root_cert"))
184
185 if "authorization.json" in SecurityContentService.security_content_manager.signature:
186 data, sig = SecurityContentService.load_json("authorization.json")
187 if sig != LoadResult.OK:
188 insecure_list.append("authorization.json")
189
190 return insecure_list
191
192
193 def remove_restart_file(args):
194 """To remove the restart.fl file.
195
196 Args:
197 args: command args
198
199 """
200 restart_file = os.path.join(args.workspace, "restart.fl")
201 if os.path.exists(restart_file):
202 os.remove(restart_file)
203 restart_file = os.path.join(args.workspace, "shutdown.fl")
204 if os.path.exists(restart_file):
205 os.remove(restart_file)
206
207
208 def create_admin_server(fl_server, server_conf=None, args=None, secure_train=False, app_validator=None):
209 """To create the admin server.
210
211 Args:
212 fl_server: fl_server
213 server_conf: server config
214 args: command args
215 secure_train: True/False
216 app_validator: application validator
217
218 Returns: admin server
219
220 """
221 # sai = ServerEngine(fl_server, args)
222 users = {}
223 # Create a default user admin:admin for the POC insecure use case.
224 if not secure_train:
225 users = {
226 "admin": "e7b71aa322cecc502e9454271b98feaec594da944c369facc90ac85016dc6c74c3fd99657ebd9d083a7804c3a17ddd8c655df8bcbf172be9d0207c8c9430c19be3cd846949505d283e066434175956bf45cd1d6781e63e5be4f3e23533d4d002"
227 }
228 # cmd_modules = [ValidationCommandModule()]
229
230 root_cert = server_conf["ssl_root_cert"] if secure_train else None
231 server_cert = server_conf["ssl_cert"] if secure_train else None
232 server_key = server_conf["ssl_private_key"] if secure_train else None
233 admin_server = FedAdminServer(
234 fed_admin_interface=fl_server.engine,
235 users=users,
236 cmd_modules=fl_server.cmd_modules,
237 file_upload_dir=os.path.join(args.workspace, server_conf.get("admin_storage", "tmp")),
238 file_download_dir=os.path.join(args.workspace, server_conf.get("admin_storage", "tmp")),
239 allowed_shell_cmds=None,
240 host=server_conf.get("admin_host", "localhost"),
241 port=server_conf.get("admin_port", 5005),
242 ca_cert_file_name=root_cert,
243 server_cert_file_name=server_cert,
244 server_key_file_name=server_key,
245 accepted_client_cns=None,
246 app_validator=app_validator,
247 )
248 return admin_server
249
250
251 if __name__ == "__main__":
252 """
253 This is the main program when starting the NVIDIA FLARE server process.
254 """
255
256 main()
257
[end of nvflare/private/fed/app/server/server_train.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/private/fed/app/server/server_train.py b/nvflare/private/fed/app/server/server_train.py
--- a/nvflare/private/fed/app/server/server_train.py
+++ b/nvflare/private/fed/app/server/server_train.py
@@ -66,7 +66,7 @@
# trainer = WorkFlowFactory().create_server_trainer(train_configs, envs)
startup = os.path.join(args.workspace, "startup")
conf = FLServerStarterConfiger(
- app_root="startup",
+ app_root=startup,
# wf_config_file_name="config_train.json",
server_config_file_name=args.fed_server,
# env_config_file_name="environment.json",
| {"golden_diff": "diff --git a/nvflare/private/fed/app/server/server_train.py b/nvflare/private/fed/app/server/server_train.py\n--- a/nvflare/private/fed/app/server/server_train.py\n+++ b/nvflare/private/fed/app/server/server_train.py\n@@ -66,7 +66,7 @@\n # trainer = WorkFlowFactory().create_server_trainer(train_configs, envs)\n startup = os.path.join(args.workspace, \"startup\")\n conf = FLServerStarterConfiger(\n- app_root=\"startup\",\n+ app_root=startup,\n # wf_config_file_name=\"config_train.json\",\n server_config_file_name=args.fed_server,\n # env_config_file_name=\"environment.json\",\n", "issue": "Startup folder not configured correctly\nI think the `app_root=\"startup\"` should be `app_root=startup` [here](https://github.com/NVIDIA/NVFlare/blob/fa20e5e27b01e0f66cbefea229aed2cf6729663d/nvflare/private/fed/app/server/server_train.py#L67-L69)\r\n\r\nIf you like I can submit a PR?\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a command line interface for federated server.\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom nvflare.fuel.common.excepts import ConfigError\nfrom nvflare.fuel.hci.server.authz import AuthorizationService\nfrom nvflare.fuel.sec.audit import AuditService\nfrom nvflare.fuel.sec.security_content_service import LoadResult, SecurityContentService\nfrom nvflare.fuel.utils.argument_utils import parse_vars\nfrom nvflare.private.fed.app.fl_conf import FLServerStarterConfiger\nfrom nvflare.private.fed.server.admin import FedAdminServer\nfrom nvflare.security.security import EmptyAuthorizer, FLAuthorizer\n\n\ndef main():\n \"\"\"FL Server program starting point.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n\n parser.add_argument(\n \"--fed_server\", \"-s\", type=str, help=\"an aggregation server specification json file\", required=True\n )\n\n parser.add_argument(\"--set\", metavar=\"KEY=VALUE\", nargs=\"*\")\n\n args = parser.parse_args()\n kv_list = parse_vars(args.set)\n\n args.train_config = \"config/config_train.json\"\n config_folder = kv_list.get(\"config_folder\", \"\")\n if config_folder == \"\":\n args.server_config = \"config_fed_server.json\"\n else:\n args.server_config = config_folder + \"/config_fed_server.json\"\n args.env = \"config/environment.json\"\n args.config_folder = config_folder\n logger = logging.getLogger()\n args.log_config = None\n\n try:\n remove_restart_file(args)\n except BaseException:\n print(\"Could not remove the restart.fl / shutdown.fl file. Please check your system before starting FL.\")\n sys.exit(-1)\n\n try:\n os.chdir(args.workspace)\n\n # trainer = WorkFlowFactory().create_server_trainer(train_configs, envs)\n startup = os.path.join(args.workspace, \"startup\")\n conf = FLServerStarterConfiger(\n app_root=\"startup\",\n # wf_config_file_name=\"config_train.json\",\n server_config_file_name=args.fed_server,\n # env_config_file_name=\"environment.json\",\n log_config_file_name=\"log.config\",\n kv_list=args.set,\n )\n log_level = os.environ.get(\"FL_LOG_LEVEL\", \"\")\n numeric_level = getattr(logging, log_level.upper(), None)\n if isinstance(numeric_level, int):\n logging.getLogger().setLevel(numeric_level)\n logger.debug(\"loglevel debug enabled\")\n logger.info(\"loglevel info enabled\")\n logger.warning(\"loglevel warn enabled\")\n logger.error(\"loglevel error enabled\")\n logger.critical(\"loglevel critical enabled\")\n conf.configure()\n\n deployer = conf.deployer\n secure_train = conf.cmd_vars.get(\"secure_train\", False)\n\n security_check(secure_train, args)\n\n try:\n # Deploy the FL server\n services = deployer.deploy(args)\n\n first_server = sorted(conf.config_data[\"servers\"])[0]\n # allow command to overwrite the admin_host\n if conf.cmd_vars.get(\"host\", None):\n first_server[\"admin_host\"] = conf.cmd_vars[\"host\"]\n admin_server = create_admin_server(\n services,\n server_conf=first_server,\n args=args,\n secure_train=secure_train,\n app_validator=deployer.app_validator,\n )\n admin_server.start()\n\n services.platform = \"PT\"\n\n services.set_admin_server(admin_server)\n finally:\n deployer.close()\n\n logger.info(\"Server started\")\n\n except ConfigError as ex:\n print(\"ConfigError:\", str(ex))\n finally:\n pass\n\n\ndef security_check(secure_train, args):\n \"\"\"To check the security content if running in security mode.\n\n Args:\n secure_train: True/False\n args: command args\n\n \"\"\"\n # initialize the SecurityContentService.\n # must do this before initializing other services since it may be needed by them!\n startup = os.path.join(args.workspace, \"startup\")\n SecurityContentService.initialize(content_folder=startup)\n\n if secure_train:\n insecure_list = secure_content_check(args)\n if len(insecure_list):\n print(\"The following files are not secure content.\")\n for item in insecure_list:\n print(item)\n sys.exit(1)\n\n # initialize the AuditService, which is used by command processing.\n # The Audit Service can be used in other places as well.\n AuditService.initialize(audit_file_name=\"audit.log\")\n # Initialize the AuthorizationService. It is used by command authorization\n # We use FLAuthorizer for policy processing.\n # AuthorizationService depends on SecurityContentService to read authorization policy file.\n if secure_train:\n _, err = AuthorizationService.initialize(FLAuthorizer())\n else:\n _, err = AuthorizationService.initialize(EmptyAuthorizer())\n\n if err:\n print(\"AuthorizationService error: {}\".format(err))\n sys.exit(1)\n\n\ndef secure_content_check(args):\n \"\"\"To check the security contents.\n\n Args:\n args: command args\n\n Returns: the insecure content list\n\n \"\"\"\n insecure_list = []\n data, sig = SecurityContentService.load_json(args.fed_server)\n if sig != LoadResult.OK:\n insecure_list.append(args.fed_server)\n\n for server in data[\"servers\"]:\n content, sig = SecurityContentService.load_content(server.get(\"ssl_cert\"))\n if sig != LoadResult.OK:\n insecure_list.append(server.get(\"ssl_cert\"))\n content, sig = SecurityContentService.load_content(server.get(\"ssl_private_key\"))\n if sig != LoadResult.OK:\n insecure_list.append(server.get(\"ssl_private_key\"))\n content, sig = SecurityContentService.load_content(server.get(\"ssl_root_cert\"))\n if sig != LoadResult.OK:\n insecure_list.append(server.get(\"ssl_root_cert\"))\n\n if \"authorization.json\" in SecurityContentService.security_content_manager.signature:\n data, sig = SecurityContentService.load_json(\"authorization.json\")\n if sig != LoadResult.OK:\n insecure_list.append(\"authorization.json\")\n\n return insecure_list\n\n\ndef remove_restart_file(args):\n \"\"\"To remove the restart.fl file.\n\n Args:\n args: command args\n\n \"\"\"\n restart_file = os.path.join(args.workspace, \"restart.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n restart_file = os.path.join(args.workspace, \"shutdown.fl\")\n if os.path.exists(restart_file):\n os.remove(restart_file)\n\n\ndef create_admin_server(fl_server, server_conf=None, args=None, secure_train=False, app_validator=None):\n \"\"\"To create the admin server.\n\n Args:\n fl_server: fl_server\n server_conf: server config\n args: command args\n secure_train: True/False\n app_validator: application validator\n\n Returns: admin server\n\n \"\"\"\n # sai = ServerEngine(fl_server, args)\n users = {}\n # Create a default user admin:admin for the POC insecure use case.\n if not secure_train:\n users = {\n \"admin\": \"e7b71aa322cecc502e9454271b98feaec594da944c369facc90ac85016dc6c74c3fd99657ebd9d083a7804c3a17ddd8c655df8bcbf172be9d0207c8c9430c19be3cd846949505d283e066434175956bf45cd1d6781e63e5be4f3e23533d4d002\"\n }\n # cmd_modules = [ValidationCommandModule()]\n\n root_cert = server_conf[\"ssl_root_cert\"] if secure_train else None\n server_cert = server_conf[\"ssl_cert\"] if secure_train else None\n server_key = server_conf[\"ssl_private_key\"] if secure_train else None\n admin_server = FedAdminServer(\n fed_admin_interface=fl_server.engine,\n users=users,\n cmd_modules=fl_server.cmd_modules,\n file_upload_dir=os.path.join(args.workspace, server_conf.get(\"admin_storage\", \"tmp\")),\n file_download_dir=os.path.join(args.workspace, server_conf.get(\"admin_storage\", \"tmp\")),\n allowed_shell_cmds=None,\n host=server_conf.get(\"admin_host\", \"localhost\"),\n port=server_conf.get(\"admin_port\", 5005),\n ca_cert_file_name=root_cert,\n server_cert_file_name=server_cert,\n server_key_file_name=server_key,\n accepted_client_cns=None,\n app_validator=app_validator,\n )\n return admin_server\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This is the main program when starting the NVIDIA FLARE server process.\n \"\"\"\n\n main()\n", "path": "nvflare/private/fed/app/server/server_train.py"}]} | 3,387 | 153 |
gh_patches_debug_23767 | rasdani/github-patches | git_diff | microsoft__presidio-650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Image anonymization is slow
Hi,
First of all, thank you guys for making this awesome project :)
I wanted to ask if there are any ways to improve image anonymization performance. I'm using Presidio as a python package and it takes about 5~ seconds to process pretty small images. I'm using the example code from the Presidio docs.
</issue>
<code>
[start of presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py]
1 from typing import List
2
3 from presidio_analyzer import AnalyzerEngine
4 from presidio_analyzer import RecognizerResult
5 from presidio_image_redactor.entities.image_recognizer_result import (
6 ImageRecognizerResult,
7 )
8 from presidio_image_redactor.ocr import OCR
9
10
11 class ImageAnalyzerEngine:
12 """ImageAnalyzerEngine class."""
13
14 def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:
15 """Analyse method to analyse the given image.
16
17 :param image: PIL Image/numpy array or file path(str) to be processed
18
19 :return: list of the extract entities with image bounding boxes
20 """
21 ocr_result = OCR().perform_ocr(image)
22 text = OCR().get_text_from_ocr_dict(ocr_result)
23
24 analyzer = AnalyzerEngine()
25 analyzer_result = analyzer.analyze(text=text, language="en", **kwargs)
26 bboxes = self.map_analyzer_results_to_bounding_boxes(
27 analyzer_result, ocr_result, text
28 )
29 return bboxes
30
31 @staticmethod
32 def map_analyzer_results_to_bounding_boxes(
33 text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str
34 ) -> List[ImageRecognizerResult]:
35 """Map extracted PII entities to image bounding boxes.
36
37 Matching is based on the position of the recognized entity from analyzer
38 and word (in ocr dict) in the text.
39
40 :param text_analyzer_results: PII entities recognized by presidio analyzer
41 :param ocr_result: dict results with words and bboxes from OCR
42 :param text: text the results are based on
43
44 return: list of extracted entities with image bounding boxes
45 """
46 if (not ocr_result) or (not text_analyzer_results):
47 return []
48
49 bboxes = []
50 proc_indexes = 0
51 indexes = len(text_analyzer_results)
52
53 pos = 0
54 iter_ocr = enumerate(ocr_result["text"])
55 for index, word in iter_ocr:
56 if not word:
57 pos += 1
58 else:
59 for element in text_analyzer_results:
60 text_element = text[element.start : element.end]
61 # check position and text of ocr word matches recognized entity
62 if (
63 max(pos, element.start) < min(element.end, pos + len(word))
64 ) and ((text_element in word) or (word in text_element)):
65 bboxes.append(
66 ImageRecognizerResult(
67 element.entity_type,
68 element.start,
69 element.end,
70 element.score,
71 ocr_result["left"][index],
72 ocr_result["top"][index],
73 ocr_result["width"][index],
74 ocr_result["height"][index],
75 )
76 )
77
78 # add bounding boxes for all words in ocr dict
79 # contained within the text of recognized entity
80 # based on relative position in the full text
81 while pos + len(word) < element.end:
82 index, word = next(iter_ocr)
83 if word:
84 bboxes.append(
85 ImageRecognizerResult(
86 element.entity_type,
87 element.start,
88 element.end,
89 element.score,
90 ocr_result["left"][index],
91 ocr_result["top"][index],
92 ocr_result["width"][index],
93 ocr_result["height"][index],
94 )
95 )
96 pos += len(word) + 1
97 proc_indexes += 1
98
99 if proc_indexes == indexes:
100 break
101 pos += len(word) + 1
102
103 return bboxes
104
[end of presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
--- a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
+++ b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
@@ -9,7 +9,16 @@
class ImageAnalyzerEngine:
- """ImageAnalyzerEngine class."""
+ """ImageAnalyzerEngine class.
+
+ :param analyzer_engine: The Presidio AnalyzerEngine instance
+ to be used to detect PII in text.
+ """
+
+ def __init__(self, analyzer_engine: AnalyzerEngine = None):
+ if not analyzer_engine:
+ analyzer_engine = AnalyzerEngine()
+ self.analyzer_engine = analyzer_engine
def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:
"""Analyse method to analyse the given image.
@@ -21,8 +30,8 @@
ocr_result = OCR().perform_ocr(image)
text = OCR().get_text_from_ocr_dict(ocr_result)
- analyzer = AnalyzerEngine()
- analyzer_result = analyzer.analyze(text=text, language="en", **kwargs)
+ analyzer_result = self.analyzer_engine.analyze(
+ text=text, language="en", **kwargs)
bboxes = self.map_analyzer_results_to_bounding_boxes(
analyzer_result, ocr_result, text
)
| {"golden_diff": "diff --git a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n--- a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n+++ b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n@@ -9,7 +9,16 @@\n \n \n class ImageAnalyzerEngine:\n- \"\"\"ImageAnalyzerEngine class.\"\"\"\n+ \"\"\"ImageAnalyzerEngine class.\n+\n+ :param analyzer_engine: The Presidio AnalyzerEngine instance\n+ to be used to detect PII in text.\n+ \"\"\"\n+\n+ def __init__(self, analyzer_engine: AnalyzerEngine = None):\n+ if not analyzer_engine:\n+ analyzer_engine = AnalyzerEngine()\n+ self.analyzer_engine = analyzer_engine\n \n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n@@ -21,8 +30,8 @@\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n \n- analyzer = AnalyzerEngine()\n- analyzer_result = analyzer.analyze(text=text, language=\"en\", **kwargs)\n+ analyzer_result = self.analyzer_engine.analyze(\n+ text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n", "issue": "Image anonymization is slow\nHi,\r\n\r\nFirst of all, thank you guys for making this awesome project :)\r\n\r\nI wanted to ask if there are any ways to improve image anonymization performance. I'm using Presidio as a python package and it takes about 5~ seconds to process pretty small images. I'm using the example code from the Presidio docs.\n", "before_files": [{"content": "from typing import List\n\nfrom presidio_analyzer import AnalyzerEngine\nfrom presidio_analyzer import RecognizerResult\nfrom presidio_image_redactor.entities.image_recognizer_result import (\n ImageRecognizerResult,\n)\nfrom presidio_image_redactor.ocr import OCR\n\n\nclass ImageAnalyzerEngine:\n \"\"\"ImageAnalyzerEngine class.\"\"\"\n\n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n\n :param image: PIL Image/numpy array or file path(str) to be processed\n\n :return: list of the extract entities with image bounding boxes\n \"\"\"\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n\n analyzer = AnalyzerEngine()\n analyzer_result = analyzer.analyze(text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n return bboxes\n\n @staticmethod\n def map_analyzer_results_to_bounding_boxes(\n text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str\n ) -> List[ImageRecognizerResult]:\n \"\"\"Map extracted PII entities to image bounding boxes.\n\n Matching is based on the position of the recognized entity from analyzer\n and word (in ocr dict) in the text.\n\n :param text_analyzer_results: PII entities recognized by presidio analyzer\n :param ocr_result: dict results with words and bboxes from OCR\n :param text: text the results are based on\n\n return: list of extracted entities with image bounding boxes\n \"\"\"\n if (not ocr_result) or (not text_analyzer_results):\n return []\n\n bboxes = []\n proc_indexes = 0\n indexes = len(text_analyzer_results)\n\n pos = 0\n iter_ocr = enumerate(ocr_result[\"text\"])\n for index, word in iter_ocr:\n if not word:\n pos += 1\n else:\n for element in text_analyzer_results:\n text_element = text[element.start : element.end]\n # check position and text of ocr word matches recognized entity\n if (\n max(pos, element.start) < min(element.end, pos + len(word))\n ) and ((text_element in word) or (word in text_element)):\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n\n # add bounding boxes for all words in ocr dict\n # contained within the text of recognized entity\n # based on relative position in the full text\n while pos + len(word) < element.end:\n index, word = next(iter_ocr)\n if word:\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n pos += len(word) + 1\n proc_indexes += 1\n\n if proc_indexes == indexes:\n break\n pos += len(word) + 1\n\n return bboxes\n", "path": "presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py"}]} | 1,605 | 339 |
gh_patches_debug_53486 | rasdani/github-patches | git_diff | getnikola__nikola-2593 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Archives (CREATE_MONTHLY_ARCHIVE = True) think all months are "None"
https://irclogs.getnikola.com/2015/
To reproduce locally: get https://github.com/getnikola/irclogs-site create any random files in `posts/*.rst`.
</issue>
<code>
[start of nikola/plugins/task/archive.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Classify the posts in archives."""
28
29 import os
30 import nikola.utils
31 import datetime
32 from nikola.plugin_categories import Taxonomy
33
34
35 class Archive(Taxonomy):
36 """Classify the post archives."""
37
38 name = "classify_archive"
39
40 classification_name = "archive"
41 overview_page_variable_name = "archive"
42 more_than_one_classifications_per_post = False
43 has_hierarchy = True
44 include_posts_from_subhierarchies = True
45 include_posts_into_hierarchy_root = True
46 subcategories_list_template = "list.tmpl"
47 generate_atom_feeds_for_post_lists = False
48 template_for_classification_overview = None
49 always_disable_rss = True
50 apply_to_posts = True
51 apply_to_pages = False
52 minimum_post_count_per_classification_in_overview = 1
53 omit_empty_classifications = False
54 also_create_classifications_from_other_languages = False
55
56 def set_site(self, site):
57 """Set Nikola site."""
58 # Sanity checks
59 if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:
60 raise Exception('Cannot create monthly and single archives at the same time.')
61 # Finish setup
62 self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']
63 self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']
64 self.template_for_single_list = "archiveindex.tmpl" if site.config['ARCHIVES_ARE_INDEXES'] else "list_post.tmpl"
65 # Determine maximum hierarchy height
66 if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:
67 self.max_levels = 3
68 elif site.config['CREATE_MONTHLY_ARCHIVE']:
69 self.max_levels = 2
70 elif site.config['CREATE_SINGLE_ARCHIVE']:
71 self.max_levels = 0
72 else:
73 self.max_levels = 1
74 return super(Archive, self).set_site(site)
75
76 def get_implicit_classifications(self, lang):
77 """Return a list of classification strings which should always appear in posts_per_classification."""
78 return ['']
79
80 def classify(self, post, lang):
81 """Classify the given post for the given language."""
82 levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]
83 return ['/'.join(levels[:self.max_levels])]
84
85 def sort_classifications(self, classifications, lang, level=None):
86 """Sort the given list of classification strings."""
87 if level in (0, 1):
88 # Years or months: sort descending
89 classifications.sort()
90 classifications.reverse()
91
92 def get_classification_friendly_name(self, classification, lang, only_last_component=False):
93 """Extract a friendly name from the classification."""
94 classification = self.extract_hierarchy(classification)
95 if len(classification) == 0:
96 return ""
97 elif len(classification) == 1:
98 return classification[0]
99 elif len(classification) == 2:
100 nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)
101 else:
102 # Fallback
103 return '/'.join(classification)
104
105 def get_path(self, classification, lang, dest_type='page'):
106 """A path handler for the given classification."""
107 components = [self.site.config['ARCHIVE_PATH']]
108 if classification:
109 components.extend(classification)
110 add_index = 'always'
111 else:
112 components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])
113 add_index = 'never'
114 return [_f for _f in components if _f], add_index
115
116 def extract_hierarchy(self, classification):
117 """Given a classification, return a list of parts in the hierarchy."""
118 return classification.split('/') if classification else []
119
120 def recombine_classification_from_hierarchy(self, hierarchy):
121 """Given a list of parts in the hierarchy, return the classification string."""
122 return '/'.join(hierarchy)
123
124 def provide_context_and_uptodate(self, classification, lang, node=None):
125 """Provide data for the context and the uptodate list for the list of the given classifiation."""
126 hierarchy = self.extract_hierarchy(classification)
127 kw = {
128 "messages": self.site.MESSAGES,
129 }
130 page_kind = "list"
131 if self.show_list_as_index:
132 if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:
133 page_kind = "index"
134 if len(hierarchy) == 0:
135 title = kw["messages"][lang]["Archive"]
136 kw["is_feed_stale"] = False
137 elif len(hierarchy) == 1:
138 title = kw["messages"][lang]["Posts for year %s"] % hierarchy[0]
139 kw["is_feed_stale"] = (datetime.datetime.utcnow().strftime("%Y") != hierarchy[0])
140 elif len(hierarchy) == 2:
141 title = kw["messages"][lang]["Posts for {month} {year}"].format(
142 year=hierarchy[0],
143 month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))
144 kw["is_feed_stale"] = (datetime.datetime.utcnow().strftime("%Y/%m") != classification)
145 elif len(hierarchy) == 3:
146 title = kw["messages"][lang]["Posts for {month} {day}, {year}"].format(
147 year=hierarchy[0],
148 month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),
149 day=int(hierarchy[2]))
150 kw["is_feed_stale"] = (datetime.datetime.utcnow().strftime("%Y/%m/%d") != classification)
151 else:
152 raise Exception("Cannot interpret classification {}!".format(repr(classification)))
153 context = {
154 "title": title,
155 "pagekind": [page_kind, "archive_page"],
156 }
157 if page_kind == 'index':
158 context["archive_name"] = classification if classification else None
159 context["is_feed_stale"] = kw["is_feed_stale"]
160 kw.update(context)
161 return context, kw
162
163 def should_generate_classification_page(self, classification, post_list, lang):
164 """Only generates list of posts for classification if this function returns True."""
165 return len(classification.split('/')) < 3 or len(post_list) > 0
166
[end of nikola/plugins/task/archive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py
--- a/nikola/plugins/task/archive.py
+++ b/nikola/plugins/task/archive.py
@@ -97,7 +97,7 @@
elif len(classification) == 1:
return classification[0]
elif len(classification) == 2:
- nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)
+ return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)
else:
# Fallback
return '/'.join(classification)
| {"golden_diff": "diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py\n--- a/nikola/plugins/task/archive.py\n+++ b/nikola/plugins/task/archive.py\n@@ -97,7 +97,7 @@\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n- nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n+ return nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n", "issue": "Archives (CREATE_MONTHLY_ARCHIVE = True) think all months are \"None\"\nhttps://irclogs.getnikola.com/2015/\r\n\r\nTo reproduce locally: get https://github.com/getnikola/irclogs-site create any random files in `posts/*.rst`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Classify the posts in archives.\"\"\"\n\nimport os\nimport nikola.utils\nimport datetime\nfrom nikola.plugin_categories import Taxonomy\n\n\nclass Archive(Taxonomy):\n \"\"\"Classify the post archives.\"\"\"\n\n name = \"classify_archive\"\n\n classification_name = \"archive\"\n overview_page_variable_name = \"archive\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = True\n subcategories_list_template = \"list.tmpl\"\n generate_atom_feeds_for_post_lists = False\n template_for_classification_overview = None\n always_disable_rss = True\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = False\n also_create_classifications_from_other_languages = False\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n # Sanity checks\n if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n # Finish setup\n self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']\n self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']\n self.template_for_single_list = \"archiveindex.tmpl\" if site.config['ARCHIVES_ARE_INDEXES'] else \"list_post.tmpl\"\n # Determine maximum hierarchy height\n if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:\n self.max_levels = 3\n elif site.config['CREATE_MONTHLY_ARCHIVE']:\n self.max_levels = 2\n elif site.config['CREATE_SINGLE_ARCHIVE']:\n self.max_levels = 0\n else:\n self.max_levels = 1\n return super(Archive, self).set_site(site)\n\n def get_implicit_classifications(self, lang):\n \"\"\"Return a list of classification strings which should always appear in posts_per_classification.\"\"\"\n return ['']\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]\n return ['/'.join(levels[:self.max_levels])]\n\n def sort_classifications(self, classifications, lang, level=None):\n \"\"\"Sort the given list of classification strings.\"\"\"\n if level in (0, 1):\n # Years or months: sort descending\n classifications.sort()\n classifications.reverse()\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n if len(classification) == 0:\n return \"\"\n elif len(classification) == 1:\n return classification[0]\n elif len(classification) == 2:\n nikola.utils.LocaleBorg().get_month_name(int(classification[1]), lang)\n else:\n # Fallback\n return '/'.join(classification)\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n components = [self.site.config['ARCHIVE_PATH']]\n if classification:\n components.extend(classification)\n add_index = 'always'\n else:\n components.append(os.path.splitext(self.site.config['ARCHIVE_FILENAME'])[0])\n add_index = 'never'\n return [_f for _f in components if _f], add_index\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return classification.split('/') if classification else []\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return '/'.join(hierarchy)\n\n def provide_context_and_uptodate(self, classification, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n hierarchy = self.extract_hierarchy(classification)\n kw = {\n \"messages\": self.site.MESSAGES,\n }\n page_kind = \"list\"\n if self.show_list_as_index:\n if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:\n page_kind = \"index\"\n if len(hierarchy) == 0:\n title = kw[\"messages\"][lang][\"Archive\"]\n kw[\"is_feed_stale\"] = False\n elif len(hierarchy) == 1:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % hierarchy[0]\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y\") != hierarchy[0])\n elif len(hierarchy) == 2:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m\") != classification)\n elif len(hierarchy) == 3:\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=hierarchy[0],\n month=nikola.utils.LocaleBorg().get_month_name(int(hierarchy[1]), lang),\n day=int(hierarchy[2]))\n kw[\"is_feed_stale\"] = (datetime.datetime.utcnow().strftime(\"%Y/%m/%d\") != classification)\n else:\n raise Exception(\"Cannot interpret classification {}!\".format(repr(classification)))\n context = {\n \"title\": title,\n \"pagekind\": [page_kind, \"archive_page\"],\n }\n if page_kind == 'index':\n context[\"archive_name\"] = classification if classification else None\n context[\"is_feed_stale\"] = kw[\"is_feed_stale\"]\n kw.update(context)\n return context, kw\n\n def should_generate_classification_page(self, classification, post_list, lang):\n \"\"\"Only generates list of posts for classification if this function returns True.\"\"\"\n return len(classification.split('/')) < 3 or len(post_list) > 0\n", "path": "nikola/plugins/task/archive.py"}]} | 2,600 | 135 |
gh_patches_debug_23125 | rasdani/github-patches | git_diff | optuna__optuna-4147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`PartialFixedSampler` does not handle `None` correctly
### Expected behavior
In the following codes, "x" should be `None`. But sometimes 0 is sampled.
```python
import optuna
def objective(trial):
x = trial.suggest_categorical("x", (None, 0))
return 0
tpe = optuna.samplers.TPESampler()
sampler = optuna.samplers.PartialFixedSampler({"x": None}, tpe)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
```
```
[I 2022-11-06 14:47:19,010] A new study created in memory with name: no-name-4c144572-cd3b-442a-99d8-962f8401367a
[I 2022-11-06 14:47:19,013] Trial 0 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,013] Trial 1 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,014] Trial 2 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,014] Trial 3 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,014] Trial 4 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,015] Trial 5 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,015] Trial 6 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,015] Trial 7 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,016] Trial 8 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.
[I 2022-11-06 14:47:19,016] Trial 9 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.
```
### Environment
- Optuna version:3.1.0.dev
- Python version:3.8.6
- OS:macOS-10.16-x86_64-i386-64bit
- (Optional) Other libraries and their versions:
### Error messages, stack traces, or logs
See `Expected behavior`
### Steps to reproduce
See `Expected behavior`
### Additional context (optional)
This code is buggy when param_value is `None`.
https://github.com/optuna/optuna/blob/c155cea56c9fb0f4752a9838e2518d416719f949/optuna/samplers/_partial_fixed.py#L94-L113
</issue>
<code>
[start of optuna/samplers/_partial_fixed.py]
1 from typing import Any
2 from typing import Dict
3 from typing import Optional
4 from typing import Sequence
5 import warnings
6
7 from optuna._experimental import experimental_class
8 from optuna.distributions import BaseDistribution
9 from optuna.samplers import BaseSampler
10 from optuna.study import Study
11 from optuna.trial import FrozenTrial
12 from optuna.trial import TrialState
13
14
15 @experimental_class("2.4.0")
16 class PartialFixedSampler(BaseSampler):
17 """Sampler with partially fixed parameters.
18
19 .. versionadded:: 2.4.0
20
21 Example:
22
23 After several steps of optimization, you can fix the value of ``y`` and re-optimize it.
24
25 .. testcode::
26
27 import optuna
28
29
30 def objective(trial):
31 x = trial.suggest_float("x", -1, 1)
32 y = trial.suggest_int("y", -1, 1)
33 return x**2 + y
34
35
36 study = optuna.create_study()
37 study.optimize(objective, n_trials=10)
38
39 best_params = study.best_params
40 fixed_params = {"y": best_params["y"]}
41 partial_sampler = optuna.samplers.PartialFixedSampler(fixed_params, study.sampler)
42
43 study.sampler = partial_sampler
44 study.optimize(objective, n_trials=10)
45
46 Args:
47
48 fixed_params:
49 A dictionary of parameters to be fixed.
50
51 base_sampler:
52 A sampler which samples unfixed parameters.
53
54 """
55
56 def __init__(self, fixed_params: Dict[str, Any], base_sampler: BaseSampler) -> None:
57 self._fixed_params = fixed_params
58 self._base_sampler = base_sampler
59
60 def reseed_rng(self) -> None:
61 self._base_sampler.reseed_rng()
62
63 def infer_relative_search_space(
64 self, study: Study, trial: FrozenTrial
65 ) -> Dict[str, BaseDistribution]:
66
67 search_space = self._base_sampler.infer_relative_search_space(study, trial)
68
69 # Remove fixed params from relative search space to return fixed values.
70 for param_name in self._fixed_params.keys():
71 if param_name in search_space:
72 del search_space[param_name]
73
74 return search_space
75
76 def sample_relative(
77 self,
78 study: Study,
79 trial: FrozenTrial,
80 search_space: Dict[str, BaseDistribution],
81 ) -> Dict[str, Any]:
82
83 # Fixed params are never sampled here.
84 return self._base_sampler.sample_relative(study, trial, search_space)
85
86 def sample_independent(
87 self,
88 study: Study,
89 trial: FrozenTrial,
90 param_name: str,
91 param_distribution: BaseDistribution,
92 ) -> Any:
93
94 # If param_name isn't in self._fixed_params.keys(), param_value is set to None.
95 param_value = self._fixed_params.get(param_name)
96
97 if param_value is None:
98 # Unfixed params are sampled here.
99 return self._base_sampler.sample_independent(
100 study, trial, param_name, param_distribution
101 )
102 else:
103 # Fixed params are sampled here.
104 # Check if a parameter value is contained in the range of this distribution.
105 param_value_in_internal_repr = param_distribution.to_internal_repr(param_value)
106 contained = param_distribution._contains(param_value_in_internal_repr)
107
108 if not contained:
109 warnings.warn(
110 f"Fixed parameter '{param_name}' with value {param_value} is out of range "
111 f"for distribution {param_distribution}."
112 )
113 return param_value
114
115 def after_trial(
116 self,
117 study: Study,
118 trial: FrozenTrial,
119 state: TrialState,
120 values: Optional[Sequence[float]],
121 ) -> None:
122
123 self._base_sampler.after_trial(study, trial, state, values)
124
[end of optuna/samplers/_partial_fixed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/samplers/_partial_fixed.py b/optuna/samplers/_partial_fixed.py
--- a/optuna/samplers/_partial_fixed.py
+++ b/optuna/samplers/_partial_fixed.py
@@ -91,10 +91,7 @@
param_distribution: BaseDistribution,
) -> Any:
- # If param_name isn't in self._fixed_params.keys(), param_value is set to None.
- param_value = self._fixed_params.get(param_name)
-
- if param_value is None:
+ if param_name not in self._fixed_params:
# Unfixed params are sampled here.
return self._base_sampler.sample_independent(
study, trial, param_name, param_distribution
@@ -102,6 +99,8 @@
else:
# Fixed params are sampled here.
# Check if a parameter value is contained in the range of this distribution.
+ param_value = self._fixed_params[param_name]
+
param_value_in_internal_repr = param_distribution.to_internal_repr(param_value)
contained = param_distribution._contains(param_value_in_internal_repr)
| {"golden_diff": "diff --git a/optuna/samplers/_partial_fixed.py b/optuna/samplers/_partial_fixed.py\n--- a/optuna/samplers/_partial_fixed.py\n+++ b/optuna/samplers/_partial_fixed.py\n@@ -91,10 +91,7 @@\n param_distribution: BaseDistribution,\n ) -> Any:\n \n- # If param_name isn't in self._fixed_params.keys(), param_value is set to None.\n- param_value = self._fixed_params.get(param_name)\n-\n- if param_value is None:\n+ if param_name not in self._fixed_params:\n # Unfixed params are sampled here.\n return self._base_sampler.sample_independent(\n study, trial, param_name, param_distribution\n@@ -102,6 +99,8 @@\n else:\n # Fixed params are sampled here.\n # Check if a parameter value is contained in the range of this distribution.\n+ param_value = self._fixed_params[param_name]\n+\n param_value_in_internal_repr = param_distribution.to_internal_repr(param_value)\n contained = param_distribution._contains(param_value_in_internal_repr)\n", "issue": "`PartialFixedSampler` does not handle `None` correctly\n### Expected behavior\r\n\r\nIn the following codes, \"x\" should be `None`. But sometimes 0 is sampled.\r\n\r\n```python\r\nimport optuna\r\n\r\ndef objective(trial):\r\n x = trial.suggest_categorical(\"x\", (None, 0))\r\n return 0\r\n\r\ntpe = optuna.samplers.TPESampler()\r\nsampler = optuna.samplers.PartialFixedSampler({\"x\": None}, tpe)\r\nstudy = optuna.create_study(sampler=sampler)\r\nstudy.optimize(objective, n_trials=10)\r\n```\r\n\r\n```\r\n[I 2022-11-06 14:47:19,010] A new study created in memory with name: no-name-4c144572-cd3b-442a-99d8-962f8401367a\r\n[I 2022-11-06 14:47:19,013] Trial 0 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,013] Trial 1 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,014] Trial 2 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,014] Trial 3 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,014] Trial 4 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,015] Trial 5 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,015] Trial 6 finished with value: 0.0 and parameters: {'x': None}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,015] Trial 7 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,016] Trial 8 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.\r\n[I 2022-11-06 14:47:19,016] Trial 9 finished with value: 0.0 and parameters: {'x': 0}. Best is trial 0 with value: 0.0.\r\n```\r\n\r\n### Environment\r\n\r\n- Optuna version:3.1.0.dev\r\n- Python version:3.8.6\r\n- OS:macOS-10.16-x86_64-i386-64bit\r\n- (Optional) Other libraries and their versions:\r\n\r\n\r\n### Error messages, stack traces, or logs\r\n\r\nSee `Expected behavior`\r\n\r\n\r\n### Steps to reproduce\r\n\r\nSee `Expected behavior`\r\n\r\n### Additional context (optional)\r\n\r\nThis code is buggy when param_value is `None`.\r\n\r\nhttps://github.com/optuna/optuna/blob/c155cea56c9fb0f4752a9838e2518d416719f949/optuna/samplers/_partial_fixed.py#L94-L113\n", "before_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Sequence\nimport warnings\n\nfrom optuna._experimental import experimental_class\nfrom optuna.distributions import BaseDistribution\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\n@experimental_class(\"2.4.0\")\nclass PartialFixedSampler(BaseSampler):\n \"\"\"Sampler with partially fixed parameters.\n\n .. versionadded:: 2.4.0\n\n Example:\n\n After several steps of optimization, you can fix the value of ``y`` and re-optimize it.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -1, 1)\n y = trial.suggest_int(\"y\", -1, 1)\n return x**2 + y\n\n\n study = optuna.create_study()\n study.optimize(objective, n_trials=10)\n\n best_params = study.best_params\n fixed_params = {\"y\": best_params[\"y\"]}\n partial_sampler = optuna.samplers.PartialFixedSampler(fixed_params, study.sampler)\n\n study.sampler = partial_sampler\n study.optimize(objective, n_trials=10)\n\n Args:\n\n fixed_params:\n A dictionary of parameters to be fixed.\n\n base_sampler:\n A sampler which samples unfixed parameters.\n\n \"\"\"\n\n def __init__(self, fixed_params: Dict[str, Any], base_sampler: BaseSampler) -> None:\n self._fixed_params = fixed_params\n self._base_sampler = base_sampler\n\n def reseed_rng(self) -> None:\n self._base_sampler.reseed_rng()\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n search_space = self._base_sampler.infer_relative_search_space(study, trial)\n\n # Remove fixed params from relative search space to return fixed values.\n for param_name in self._fixed_params.keys():\n if param_name in search_space:\n del search_space[param_name]\n\n return search_space\n\n def sample_relative(\n self,\n study: Study,\n trial: FrozenTrial,\n search_space: Dict[str, BaseDistribution],\n ) -> Dict[str, Any]:\n\n # Fixed params are never sampled here.\n return self._base_sampler.sample_relative(study, trial, search_space)\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n # If param_name isn't in self._fixed_params.keys(), param_value is set to None.\n param_value = self._fixed_params.get(param_name)\n\n if param_value is None:\n # Unfixed params are sampled here.\n return self._base_sampler.sample_independent(\n study, trial, param_name, param_distribution\n )\n else:\n # Fixed params are sampled here.\n # Check if a parameter value is contained in the range of this distribution.\n param_value_in_internal_repr = param_distribution.to_internal_repr(param_value)\n contained = param_distribution._contains(param_value_in_internal_repr)\n\n if not contained:\n warnings.warn(\n f\"Fixed parameter '{param_name}' with value {param_value} is out of range \"\n f\"for distribution {param_distribution}.\"\n )\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n\n self._base_sampler.after_trial(study, trial, state, values)\n", "path": "optuna/samplers/_partial_fixed.py"}]} | 2,582 | 241 |
gh_patches_debug_15494 | rasdani/github-patches | git_diff | EleutherAI__gpt-neox-72 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement 1-Bit Adam
Integrate 1-bit Adam into our model. The DeepSpeed tutorial can be found [here](https://www.deepspeed.ai/tutorials/onebit-adam/)
</issue>
<code>
[start of train_pipeline.py]
1 import random
2 import deepspeed
3 import torch
4 from torch.utils.data import DataLoader
5 from tqdm.auto import trange
6 import torch.distributed as distributed
7
8 from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,
9 prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)
10
11 from gpt_neox.utils import get_args, get_params
12
13 import GPUtil
14
15 # arguments
16 train_args = get_args()
17 params = get_params(train_args.model)
18
19 # tokenizer
20 tokenizer = get_tokenizer(tokenizer_type=params["tokenizer"].get("type", None),
21 from_pretrained=params["tokenizer"].get("from_pretrained", True),
22 add_padding_token=params["tokenizer"].get("add_padding_token", False))
23 vocab_size = len(tokenizer) if params["vocab_size"] is None else params["vocab_size"]
24
25 # model
26 deepspeed.init_distributed(dist_backend='nccl')
27 torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
28
29 def loss_function(x, y):
30 losses = torch.nn.functional.cross_entropy(x, y, reduction='none')
31 loss = losses.mean()
32 return loss
33
34 model = GPTNeoX_Pipe(
35 num_tokens=params["vocab_size"],
36 dim=params["hidden_dim"],
37 seq_len=params["seq_len"],
38 depth=params["n_layers"],
39 heads=params["n_heads"],
40 dim_head=params["dim_head"],
41 loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),
42 num_stages = params.get("pipeline_num_stages", 2)
43 )
44 model = AutoregressiveWrapper(model)
45
46 # optimizer
47 ds_model_params = prepare_optimizer_parameters(model)
48 optim = torch.optim.Adam(model.parameters(), lr=params["learning_rate"])
49
50 # prepare data
51 dset_params = params["dataset"]
52 assert dset_params is not None
53
54 if is_main(train_args):
55 prepare_data(dset_params["name"])
56 torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
57 else:
58 torch.distributed.barrier()
59
60 # data loading
61 train_dataset = GPT2Dataset(glob_pattern=dset_params["train_path"],
62 seq_len=params["seq_len"],
63 train=True,
64 **dset_params)
65 train_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get("pin_memory", False))
66
67 eval_dataset = GPT2Dataset(glob_pattern=dset_params["eval_path"],
68 seq_len=params["seq_len"],
69 train=False,
70 **dset_params)
71
72 val_loader = DataLoader(eval_dataset, batch_size=params["eval_batch_size"])
73 val_loader = iter(val_loader)
74
75 # deepspeed loader
76 model_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,
77 model=model,
78 optimizer=optim,
79 model_parameters=ds_model_params,
80 training_data=train_dataset)
81
82
83 batches_to_train = 10000
84
85 pbar = trange(params["num_epochs"], mininterval=10., desc='Training Model', dynamic_ncols=True)
86 for _ in pbar:
87 for i in range(batches_to_train):
88
89 is_main = model_engine.local_rank == 0
90
91 loss = model_engine.train_batch()
92
93 pbar.set_description(f'Training Loss: {loss.item():.4f}')
94 pbar.update()
95
[end of train_pipeline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/train_pipeline.py b/train_pipeline.py
--- a/train_pipeline.py
+++ b/train_pipeline.py
@@ -1,16 +1,21 @@
+import argparse
+import json
import random
+from collections import defaultdict
+import os
import deepspeed
import torch
from torch.utils.data import DataLoader
from tqdm.auto import trange
-import torch.distributed as distributed
-from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,
- prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)
+from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,
+ cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,
+ GPTNeoX_Pipe)
+from gpt_neox.datasets import GPT2Dataset
+from gpt_neox.utils import is_main
+import gpt_neox
-from gpt_neox.utils import get_args, get_params
-
-import GPUtil
+WORLD_SIZE = os.getenv('WORLD_SIZE')
# arguments
train_args = get_args()
| {"golden_diff": "diff --git a/train_pipeline.py b/train_pipeline.py\n--- a/train_pipeline.py\n+++ b/train_pipeline.py\n@@ -1,16 +1,21 @@\n+import argparse\n+import json\n import random\n+from collections import defaultdict\n+import os\n import deepspeed\n import torch\n from torch.utils.data import DataLoader\n from tqdm.auto import trange\n-import torch.distributed as distributed\n \n-from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,\n- prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)\n+from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,\n+ cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,\n+ GPTNeoX_Pipe)\n+from gpt_neox.datasets import GPT2Dataset\n+from gpt_neox.utils import is_main\n+import gpt_neox\n \n-from gpt_neox.utils import get_args, get_params\n-\n-import GPUtil\n+WORLD_SIZE = os.getenv('WORLD_SIZE')\n \n # arguments\n train_args = get_args()\n", "issue": "Implement 1-Bit Adam\nIntegrate 1-bit Adam into our model. The DeepSpeed tutorial can be found [here](https://www.deepspeed.ai/tutorials/onebit-adam/)\n", "before_files": [{"content": "import random\nimport deepspeed\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import trange\nimport torch.distributed as distributed\n\nfrom gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,\n prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)\n\nfrom gpt_neox.utils import get_args, get_params\n\nimport GPUtil\n\n# arguments\ntrain_args = get_args()\nparams = get_params(train_args.model)\n\n# tokenizer\ntokenizer = get_tokenizer(tokenizer_type=params[\"tokenizer\"].get(\"type\", None),\n from_pretrained=params[\"tokenizer\"].get(\"from_pretrained\", True),\n add_padding_token=params[\"tokenizer\"].get(\"add_padding_token\", False))\nvocab_size = len(tokenizer) if params[\"vocab_size\"] is None else params[\"vocab_size\"]\n\n# model\ndeepspeed.init_distributed(dist_backend='nccl')\ntorch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\n\ndef loss_function(x, y):\n losses = torch.nn.functional.cross_entropy(x, y, reduction='none')\n loss = losses.mean()\n return loss\n \nmodel = GPTNeoX_Pipe(\n num_tokens=params[\"vocab_size\"],\n dim=params[\"hidden_dim\"],\n seq_len=params[\"seq_len\"],\n depth=params[\"n_layers\"],\n heads=params[\"n_heads\"],\n dim_head=params[\"dim_head\"],\n loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),\n num_stages = params.get(\"pipeline_num_stages\", 2)\n)\nmodel = AutoregressiveWrapper(model)\n\n# optimizer\nds_model_params = prepare_optimizer_parameters(model)\noptim = torch.optim.Adam(model.parameters(), lr=params[\"learning_rate\"])\n\n# prepare data\ndset_params = params[\"dataset\"]\nassert dset_params is not None\n\nif is_main(train_args):\n prepare_data(dset_params[\"name\"])\n torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\nelse:\n torch.distributed.barrier()\n \n# data loading\ntrain_dataset = GPT2Dataset(glob_pattern=dset_params[\"train_path\"],\n seq_len=params[\"seq_len\"],\n train=True,\n **dset_params)\ntrain_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get(\"pin_memory\", False))\n\neval_dataset = GPT2Dataset(glob_pattern=dset_params[\"eval_path\"],\n seq_len=params[\"seq_len\"],\n train=False,\n **dset_params)\n\nval_loader = DataLoader(eval_dataset, batch_size=params[\"eval_batch_size\"])\nval_loader = iter(val_loader)\n\n# deepspeed loader\nmodel_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,\n model=model,\n optimizer=optim,\n model_parameters=ds_model_params,\n training_data=train_dataset)\n\n\nbatches_to_train = 10000\n\npbar = trange(params[\"num_epochs\"], mininterval=10., desc='Training Model', dynamic_ncols=True)\nfor _ in pbar:\n for i in range(batches_to_train):\n\n is_main = model_engine.local_rank == 0\n\n loss = model_engine.train_batch()\n\n pbar.set_description(f'Training Loss: {loss.item():.4f}')\n pbar.update()\n", "path": "train_pipeline.py"}]} | 1,476 | 244 |
gh_patches_debug_33275 | rasdani/github-patches | git_diff | liberapay__liberapay.com-82 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch Jinja templates to line statements
Because they're easier to type (especially on an azerty keyboard): `% if ...` instead of `{% if ... %}`. [Documentation](http://jinja.pocoo.org/docs/dev/templates/#line-statements).
Any objections?
</issue>
<code>
[start of liberapay/utils/emails.py]
1 from __future__ import unicode_literals
2
3 from aspen.simplates.pagination import parse_specline, split_and_escape
4 from aspen_jinja2_renderer import SimplateLoader
5 from jinja2 import Environment
6
7
8 ( VERIFICATION_MISSING
9 , VERIFICATION_FAILED
10 , VERIFICATION_EXPIRED
11 , VERIFICATION_REDUNDANT
12 , VERIFICATION_STYMIED
13 , VERIFICATION_SUCCEEDED
14 ) = range(6)
15
16
17 jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)
18 jinja_env_html = Environment(
19 trim_blocks=True, lstrip_blocks=True,
20 autoescape=True, extensions=['jinja2.ext.autoescape'],
21 )
22
23 def compile_email_spt(fpath):
24 r = {}
25 with open(fpath) as f:
26 pages = list(split_and_escape(f.read()))
27 for i, page in enumerate(pages, 1):
28 tmpl = b'\n' * page.offset + page.content
29 content_type, renderer = parse_specline(page.header)
30 key = 'subject' if i == 1 else content_type
31 env = jinja_env_html if content_type == 'text/html' else jinja_env
32 r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)
33 return r
34
[end of liberapay/utils/emails.py]
[start of liberapay/constants.py]
1 from __future__ import print_function, unicode_literals
2
3 from collections import OrderedDict
4 from datetime import date, timedelta
5 from decimal import Decimal
6 import re
7
8
9 ASCII_ALLOWED_IN_USERNAME = set("0123456789"
10 "abcdefghijklmnopqrstuvwxyz"
11 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
12 "-_")
13
14
15 BIRTHDAY = date(2015, 5, 22)
16
17 EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
18 EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
19
20 MAX_TIP = Decimal('100.00')
21 MIN_TIP = Decimal('0.01')
22
23 QUARANTINE = timedelta(weeks=4)
24
25 PASSWORD_MIN_SIZE = 8
26 PASSWORD_MAX_SIZE = 150
27
28 _ = lambda a: a
29 PRIVACY_FIELDS = OrderedDict([
30 ('hide_giving', _("Hide total giving from others.")),
31 ('hide_receiving', _("Hide total receiving from others.")),
32 ('hide_from_search', _("Hide myself from search results.")),
33 ])
34 del _
35 PRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())
36
37 SESSION = b'session'
38 SESSION_REFRESH = timedelta(hours=1)
39 SESSION_TIMEOUT = timedelta(hours=6)
40
41 USERNAME_MAX_SIZE = 32
42
[end of liberapay/constants.py]
[start of liberapay/renderers/jinja2_htmlescaped.py]
1 import aspen_jinja2_renderer as base
2
3 from markupsafe import escape as htmlescape
4
5
6 class HTMLRenderer(base.Renderer):
7 def render_content(self, context):
8
9 # Extend to inject an HTML-escaping function. Since autoescape is on,
10 # template authors shouldn't normally need to use this function, but
11 # having it in the simplate context makes it easier to implement i18n.
12
13 context['escape'] = context['state']['escape'] = htmlescape
14
15 return base.Renderer.render_content(self, context)
16
17
18 class Factory(base.Factory):
19
20 Renderer = HTMLRenderer
21
22 def compile_meta(self, configuration):
23 # Override to turn on autoescaping.
24 loader = base.FileSystemLoader(configuration.project_root)
25 return base.Environment( loader=loader
26 , autoescape=True
27 , extensions=['jinja2.ext.autoescape']
28 )
29
[end of liberapay/renderers/jinja2_htmlescaped.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/constants.py b/liberapay/constants.py
--- a/liberapay/constants.py
+++ b/liberapay/constants.py
@@ -17,6 +17,11 @@
EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
+JINJA_ENV_COMMON = dict(
+ trim_blocks=True, lstrip_blocks=True,
+ line_statement_prefix='%', line_comment_prefix='#',
+)
+
MAX_TIP = Decimal('100.00')
MIN_TIP = Decimal('0.01')
diff --git a/liberapay/renderers/jinja2_htmlescaped.py b/liberapay/renderers/jinja2_htmlescaped.py
--- a/liberapay/renderers/jinja2_htmlescaped.py
+++ b/liberapay/renderers/jinja2_htmlescaped.py
@@ -2,6 +2,8 @@
from markupsafe import escape as htmlescape
+from liberapay.constants import JINJA_ENV_COMMON
+
class HTMLRenderer(base.Renderer):
def render_content(self, context):
@@ -22,7 +24,8 @@
def compile_meta(self, configuration):
# Override to turn on autoescaping.
loader = base.FileSystemLoader(configuration.project_root)
- return base.Environment( loader=loader
- , autoescape=True
- , extensions=['jinja2.ext.autoescape']
- )
+ return base.Environment(
+ loader=loader,
+ autoescape=True, extensions=['jinja2.ext.autoescape'],
+ **JINJA_ENV_COMMON
+ )
diff --git a/liberapay/utils/emails.py b/liberapay/utils/emails.py
--- a/liberapay/utils/emails.py
+++ b/liberapay/utils/emails.py
@@ -4,6 +4,8 @@
from aspen_jinja2_renderer import SimplateLoader
from jinja2 import Environment
+from liberapay.constants import JINJA_ENV_COMMON
+
( VERIFICATION_MISSING
, VERIFICATION_FAILED
@@ -14,10 +16,10 @@
) = range(6)
-jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)
+jinja_env = Environment(**JINJA_ENV_COMMON)
jinja_env_html = Environment(
- trim_blocks=True, lstrip_blocks=True,
autoescape=True, extensions=['jinja2.ext.autoescape'],
+ **JINJA_ENV_COMMON
)
def compile_email_spt(fpath):
| {"golden_diff": "diff --git a/liberapay/constants.py b/liberapay/constants.py\n--- a/liberapay/constants.py\n+++ b/liberapay/constants.py\n@@ -17,6 +17,11 @@\n EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\n EMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n \n+JINJA_ENV_COMMON = dict(\n+ trim_blocks=True, lstrip_blocks=True,\n+ line_statement_prefix='%', line_comment_prefix='#',\n+)\n+\n MAX_TIP = Decimal('100.00')\n MIN_TIP = Decimal('0.01')\n \ndiff --git a/liberapay/renderers/jinja2_htmlescaped.py b/liberapay/renderers/jinja2_htmlescaped.py\n--- a/liberapay/renderers/jinja2_htmlescaped.py\n+++ b/liberapay/renderers/jinja2_htmlescaped.py\n@@ -2,6 +2,8 @@\n \n from markupsafe import escape as htmlescape\n \n+from liberapay.constants import JINJA_ENV_COMMON\n+\n \n class HTMLRenderer(base.Renderer):\n def render_content(self, context):\n@@ -22,7 +24,8 @@\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n- return base.Environment( loader=loader\n- , autoescape=True\n- , extensions=['jinja2.ext.autoescape']\n- )\n+ return base.Environment(\n+ loader=loader,\n+ autoescape=True, extensions=['jinja2.ext.autoescape'],\n+ **JINJA_ENV_COMMON\n+ )\ndiff --git a/liberapay/utils/emails.py b/liberapay/utils/emails.py\n--- a/liberapay/utils/emails.py\n+++ b/liberapay/utils/emails.py\n@@ -4,6 +4,8 @@\n from aspen_jinja2_renderer import SimplateLoader\n from jinja2 import Environment\n \n+from liberapay.constants import JINJA_ENV_COMMON\n+\n \n ( VERIFICATION_MISSING\n , VERIFICATION_FAILED\n@@ -14,10 +16,10 @@\n ) = range(6)\n \n \n-jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)\n+jinja_env = Environment(**JINJA_ENV_COMMON)\n jinja_env_html = Environment(\n- trim_blocks=True, lstrip_blocks=True,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n+ **JINJA_ENV_COMMON\n )\n \n def compile_email_spt(fpath):\n", "issue": "Switch Jinja templates to line statements\nBecause they're easier to type (especially on an azerty keyboard): `% if ...` instead of `{% if ... %}`. [Documentation](http://jinja.pocoo.org/docs/dev/templates/#line-statements).\n\nAny objections?\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom aspen.simplates.pagination import parse_specline, split_and_escape\nfrom aspen_jinja2_renderer import SimplateLoader\nfrom jinja2 import Environment\n\n\n( VERIFICATION_MISSING\n, VERIFICATION_FAILED\n, VERIFICATION_EXPIRED\n, VERIFICATION_REDUNDANT\n, VERIFICATION_STYMIED\n, VERIFICATION_SUCCEEDED\n ) = range(6)\n\n\njinja_env = Environment(trim_blocks=True, lstrip_blocks=True)\njinja_env_html = Environment(\n trim_blocks=True, lstrip_blocks=True,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n)\n\ndef compile_email_spt(fpath):\n r = {}\n with open(fpath) as f:\n pages = list(split_and_escape(f.read()))\n for i, page in enumerate(pages, 1):\n tmpl = b'\\n' * page.offset + page.content\n content_type, renderer = parse_specline(page.header)\n key = 'subject' if i == 1 else content_type\n env = jinja_env_html if content_type == 'text/html' else jinja_env\n r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)\n return r\n", "path": "liberapay/utils/emails.py"}, {"content": "from __future__ import print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import date, timedelta\nfrom decimal import Decimal\nimport re\n\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_\")\n\n\nBIRTHDAY = date(2015, 5, 22)\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n\nMAX_TIP = Decimal('100.00')\nMIN_TIP = Decimal('0.01')\n\nQUARANTINE = timedelta(weeks=4)\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\n_ = lambda a: a\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results.\")),\n])\ndel _\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\nUSERNAME_MAX_SIZE = 32\n", "path": "liberapay/constants.py"}, {"content": "import aspen_jinja2_renderer as base\n\nfrom markupsafe import escape as htmlescape\n\n\nclass HTMLRenderer(base.Renderer):\n def render_content(self, context):\n\n # Extend to inject an HTML-escaping function. Since autoescape is on,\n # template authors shouldn't normally need to use this function, but\n # having it in the simplate context makes it easier to implement i18n.\n\n context['escape'] = context['state']['escape'] = htmlescape\n\n return base.Renderer.render_content(self, context)\n\n\nclass Factory(base.Factory):\n\n Renderer = HTMLRenderer\n\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n return base.Environment( loader=loader\n , autoescape=True\n , extensions=['jinja2.ext.autoescape']\n )\n", "path": "liberapay/renderers/jinja2_htmlescaped.py"}]} | 1,563 | 568 |
gh_patches_debug_35591 | rasdani/github-patches | git_diff | azavea__raster-vision-692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Avoid downloading all rasters
If you have many large scenes, the rasters will all be downloaded at once and you will run out of disk space. This is because the download occurs in the constructor of `RasterSources`. Instead, dwe should download rasters https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L79 in the _activate method https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L100 and delete them in the _deactivate method.
</issue>
<code>
[start of rastervision/command/command.py]
1 from abc import ABC, abstractmethod
2
3 from rastervision.rv_config import RVConfig
4
5
6 class Command(ABC):
7 @abstractmethod
8 def run(self, tmp_dir):
9 """Run the command."""
10 pass
11
12 def set_tmp_dir(self, tmp_dir):
13 self._tmp_dir = tmp_dir
14
15 def get_tmp_dir(self):
16 if hasattr(self, '_tmp_dir') and self._tmp_dir:
17 if isinstance(self._tmp_dir, str):
18 return self._tmp_dir
19 else:
20 return self._tmp_dir.name
21 else:
22 return RVConfig.get_tmp_dir().name
23
24
25 class NoOpCommand(Command):
26 """Defines a command that does nothing.
27 """
28
29 def run(self, tmp_dir):
30 pass
31
[end of rastervision/command/command.py]
[start of rastervision/data/raster_source/rasterio_source.py]
1 from abc import abstractmethod
2
3 import numpy as np
4 import rasterio
5 from rasterio.enums import (ColorInterp, MaskFlags)
6
7 from rastervision.data import (ActivateMixin, ActivationError)
8 from rastervision.data.raster_source import RasterSource
9 from rastervision.core.box import Box
10
11
12 def load_window(image_dataset, window=None, channels=None, is_masked=False):
13 """Load a window of an image from a TIFF file.
14
15 Args:
16 window: ((row_start, row_stop), (col_start, col_stop)) or
17 ((y_min, y_max), (x_min, x_max))
18 channels: An optional list of bands to read.
19 is_masked: If True, read a masked array from rasterio
20 """
21 if is_masked:
22 im = image_dataset.read(window=window, boundless=True, masked=True)
23 im = np.ma.filled(im, fill_value=0)
24 else:
25 im = image_dataset.read(window=window, boundless=True)
26
27 # Handle non-zero NODATA values by setting the data to 0.
28 for channel, nodata in enumerate(image_dataset.nodatavals):
29 if nodata is not None and nodata != 0:
30 im[channel, im[channel] == nodata] = 0
31
32 if channels:
33 im = im[channels, :]
34 im = np.transpose(im, axes=[1, 2, 0])
35 return im
36
37
38 class RasterioRasterSource(ActivateMixin, RasterSource):
39 def __init__(self, raster_transformers, temp_dir, channel_order=None):
40 self.temp_dir = temp_dir
41 self.imagery_path = self._download_data(temp_dir)
42
43 num_channels = None
44
45 # Activate in order to get information out of the raster
46 with self.activate():
47 colorinterp = self.image_dataset.colorinterp
48 self.channels = [
49 i for i, color_interp in enumerate(colorinterp)
50 if color_interp != ColorInterp.alpha
51 ]
52
53 mask_flags = self.image_dataset.mask_flag_enums
54 self.is_masked = any(
55 [m for m in mask_flags if m != MaskFlags.all_valid])
56
57 self.height = self.image_dataset.height
58 self.width = self.image_dataset.width
59 # Get 1x1 chip (after applying raster transformers) to test dtype
60 # and channel order if needed
61 test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))
62
63 raw_channels = list(range(0, test_chip.shape[2]))
64 self.channel_order = channel_order or raw_channels
65 num_channels = len(raw_channels)
66
67 # Transform the chip to get the final dtype
68 test_chip = test_chip[:, :, self.channel_order]
69 for transformer in raster_transformers:
70 test_chip = transformer.transform(test_chip, channel_order)
71
72 self.dtype = test_chip.dtype
73
74 self._set_crs_transformer()
75
76 super().__init__(channel_order, num_channels, raster_transformers)
77
78 @abstractmethod
79 def _download_data(self, tmp_dir):
80 """Download any data needed for this Raster Source.
81 Return a single local path representing the image or a VRT of the data."""
82 pass
83
84 def get_crs_transformer(self):
85 return self.crs_transformer
86
87 def get_extent(self):
88 return Box(0, 0, self.height, self.width)
89
90 def get_dtype(self):
91 """Return the numpy.dtype of this scene"""
92 return self.dtype
93
94 def _get_chip(self, window):
95 if self.image_dataset is None:
96 raise ActivationError('RasterSource must be activated before use')
97 return load_window(self.image_dataset, window.rasterio_format(),
98 self.channels)
99
100 def _activate(self):
101 self.image_dataset = rasterio.open(self.imagery_path)
102
103 def _deactivate(self):
104 self.image_dataset.close()
105 self.image_dataset = None
106
[end of rastervision/data/raster_source/rasterio_source.py]
[start of rastervision/data/raster_source/image_source.py]
1 from rastervision.data.raster_source.rasterio_source import (
2 RasterioRasterSource)
3 from rastervision.data.crs_transformer.identity_crs_transformer import (
4 IdentityCRSTransformer)
5 from rastervision.utils.files import download_if_needed
6
7
8 class ImageSource(RasterioRasterSource):
9 def __init__(self, uri, raster_transformers, temp_dir, channel_order=None):
10 self.uri = uri
11 super().__init__(raster_transformers, temp_dir, channel_order)
12
13 def _download_data(self, temp_dir):
14 return download_if_needed(self.uri, self.temp_dir)
15
16 def _set_crs_transformer(self):
17 self.crs_transformer = IdentityCRSTransformer()
18
[end of rastervision/data/raster_source/image_source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rastervision/command/command.py b/rastervision/command/command.py
--- a/rastervision/command/command.py
+++ b/rastervision/command/command.py
@@ -19,7 +19,9 @@
else:
return self._tmp_dir.name
else:
- return RVConfig.get_tmp_dir().name
+ tmp_dir = RVConfig.get_tmp_dir()
+ self.set_tmp_dir(tmp_dir)
+ return tmp_dir.name
class NoOpCommand(Command):
diff --git a/rastervision/data/raster_source/image_source.py b/rastervision/data/raster_source/image_source.py
--- a/rastervision/data/raster_source/image_source.py
+++ b/rastervision/data/raster_source/image_source.py
@@ -11,7 +11,7 @@
super().__init__(raster_transformers, temp_dir, channel_order)
def _download_data(self, temp_dir):
- return download_if_needed(self.uri, self.temp_dir)
+ return download_if_needed(self.uri, temp_dir)
def _set_crs_transformer(self):
self.crs_transformer = IdentityCRSTransformer()
diff --git a/rastervision/data/raster_source/rasterio_source.py b/rastervision/data/raster_source/rasterio_source.py
--- a/rastervision/data/raster_source/rasterio_source.py
+++ b/rastervision/data/raster_source/rasterio_source.py
@@ -1,4 +1,5 @@
from abc import abstractmethod
+import tempfile
import numpy as np
import rasterio
@@ -38,8 +39,8 @@
class RasterioRasterSource(ActivateMixin, RasterSource):
def __init__(self, raster_transformers, temp_dir, channel_order=None):
self.temp_dir = temp_dir
- self.imagery_path = self._download_data(temp_dir)
-
+ self.image_temp_dir = None
+ self.image_dataset = None
num_channels = None
# Activate in order to get information out of the raster
@@ -98,8 +99,14 @@
self.channels)
def _activate(self):
+ # Download images to temporary directory and delete it when done.
+ self.image_temp_dir = tempfile.TemporaryDirectory(dir=self.temp_dir)
+ self.imagery_path = self._download_data(self.image_temp_dir.name)
self.image_dataset = rasterio.open(self.imagery_path)
+ self._set_crs_transformer()
def _deactivate(self):
self.image_dataset.close()
self.image_dataset = None
+ self.image_temp_dir.cleanup()
+ self.image_temp_dir = None
| {"golden_diff": "diff --git a/rastervision/command/command.py b/rastervision/command/command.py\n--- a/rastervision/command/command.py\n+++ b/rastervision/command/command.py\n@@ -19,7 +19,9 @@\n else:\n return self._tmp_dir.name\n else:\n- return RVConfig.get_tmp_dir().name\n+ tmp_dir = RVConfig.get_tmp_dir()\n+ self.set_tmp_dir(tmp_dir)\n+ return tmp_dir.name\n \n \n class NoOpCommand(Command):\ndiff --git a/rastervision/data/raster_source/image_source.py b/rastervision/data/raster_source/image_source.py\n--- a/rastervision/data/raster_source/image_source.py\n+++ b/rastervision/data/raster_source/image_source.py\n@@ -11,7 +11,7 @@\n super().__init__(raster_transformers, temp_dir, channel_order)\n \n def _download_data(self, temp_dir):\n- return download_if_needed(self.uri, self.temp_dir)\n+ return download_if_needed(self.uri, temp_dir)\n \n def _set_crs_transformer(self):\n self.crs_transformer = IdentityCRSTransformer()\ndiff --git a/rastervision/data/raster_source/rasterio_source.py b/rastervision/data/raster_source/rasterio_source.py\n--- a/rastervision/data/raster_source/rasterio_source.py\n+++ b/rastervision/data/raster_source/rasterio_source.py\n@@ -1,4 +1,5 @@\n from abc import abstractmethod\n+import tempfile\n \n import numpy as np\n import rasterio\n@@ -38,8 +39,8 @@\n class RasterioRasterSource(ActivateMixin, RasterSource):\n def __init__(self, raster_transformers, temp_dir, channel_order=None):\n self.temp_dir = temp_dir\n- self.imagery_path = self._download_data(temp_dir)\n-\n+ self.image_temp_dir = None\n+ self.image_dataset = None\n num_channels = None\n \n # Activate in order to get information out of the raster\n@@ -98,8 +99,14 @@\n self.channels)\n \n def _activate(self):\n+ # Download images to temporary directory and delete it when done.\n+ self.image_temp_dir = tempfile.TemporaryDirectory(dir=self.temp_dir)\n+ self.imagery_path = self._download_data(self.image_temp_dir.name)\n self.image_dataset = rasterio.open(self.imagery_path)\n+ self._set_crs_transformer()\n \n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n+ self.image_temp_dir.cleanup()\n+ self.image_temp_dir = None\n", "issue": "Avoid downloading all rasters\nIf you have many large scenes, the rasters will all be downloaded at once and you will run out of disk space. This is because the download occurs in the constructor of `RasterSources`. Instead, dwe should download rasters https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L79 in the _activate method https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L100 and delete them in the _deactivate method. \n", "before_files": [{"content": "from abc import ABC, abstractmethod\n\nfrom rastervision.rv_config import RVConfig\n\n\nclass Command(ABC):\n @abstractmethod\n def run(self, tmp_dir):\n \"\"\"Run the command.\"\"\"\n pass\n\n def set_tmp_dir(self, tmp_dir):\n self._tmp_dir = tmp_dir\n\n def get_tmp_dir(self):\n if hasattr(self, '_tmp_dir') and self._tmp_dir:\n if isinstance(self._tmp_dir, str):\n return self._tmp_dir\n else:\n return self._tmp_dir.name\n else:\n return RVConfig.get_tmp_dir().name\n\n\nclass NoOpCommand(Command):\n \"\"\"Defines a command that does nothing.\n \"\"\"\n\n def run(self, tmp_dir):\n pass\n", "path": "rastervision/command/command.py"}, {"content": "from abc import abstractmethod\n\nimport numpy as np\nimport rasterio\nfrom rasterio.enums import (ColorInterp, MaskFlags)\n\nfrom rastervision.data import (ActivateMixin, ActivationError)\nfrom rastervision.data.raster_source import RasterSource\nfrom rastervision.core.box import Box\n\n\ndef load_window(image_dataset, window=None, channels=None, is_masked=False):\n \"\"\"Load a window of an image from a TIFF file.\n\n Args:\n window: ((row_start, row_stop), (col_start, col_stop)) or\n ((y_min, y_max), (x_min, x_max))\n channels: An optional list of bands to read.\n is_masked: If True, read a masked array from rasterio\n \"\"\"\n if is_masked:\n im = image_dataset.read(window=window, boundless=True, masked=True)\n im = np.ma.filled(im, fill_value=0)\n else:\n im = image_dataset.read(window=window, boundless=True)\n\n # Handle non-zero NODATA values by setting the data to 0.\n for channel, nodata in enumerate(image_dataset.nodatavals):\n if nodata is not None and nodata != 0:\n im[channel, im[channel] == nodata] = 0\n\n if channels:\n im = im[channels, :]\n im = np.transpose(im, axes=[1, 2, 0])\n return im\n\n\nclass RasterioRasterSource(ActivateMixin, RasterSource):\n def __init__(self, raster_transformers, temp_dir, channel_order=None):\n self.temp_dir = temp_dir\n self.imagery_path = self._download_data(temp_dir)\n\n num_channels = None\n\n # Activate in order to get information out of the raster\n with self.activate():\n colorinterp = self.image_dataset.colorinterp\n self.channels = [\n i for i, color_interp in enumerate(colorinterp)\n if color_interp != ColorInterp.alpha\n ]\n\n mask_flags = self.image_dataset.mask_flag_enums\n self.is_masked = any(\n [m for m in mask_flags if m != MaskFlags.all_valid])\n\n self.height = self.image_dataset.height\n self.width = self.image_dataset.width\n # Get 1x1 chip (after applying raster transformers) to test dtype\n # and channel order if needed\n test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))\n\n raw_channels = list(range(0, test_chip.shape[2]))\n self.channel_order = channel_order or raw_channels\n num_channels = len(raw_channels)\n\n # Transform the chip to get the final dtype\n test_chip = test_chip[:, :, self.channel_order]\n for transformer in raster_transformers:\n test_chip = transformer.transform(test_chip, channel_order)\n\n self.dtype = test_chip.dtype\n\n self._set_crs_transformer()\n\n super().__init__(channel_order, num_channels, raster_transformers)\n\n @abstractmethod\n def _download_data(self, tmp_dir):\n \"\"\"Download any data needed for this Raster Source.\n Return a single local path representing the image or a VRT of the data.\"\"\"\n pass\n\n def get_crs_transformer(self):\n return self.crs_transformer\n\n def get_extent(self):\n return Box(0, 0, self.height, self.width)\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n\n def _get_chip(self, window):\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n return load_window(self.image_dataset, window.rasterio_format(),\n self.channels)\n\n def _activate(self):\n self.image_dataset = rasterio.open(self.imagery_path)\n\n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n", "path": "rastervision/data/raster_source/rasterio_source.py"}, {"content": "from rastervision.data.raster_source.rasterio_source import (\n RasterioRasterSource)\nfrom rastervision.data.crs_transformer.identity_crs_transformer import (\n IdentityCRSTransformer)\nfrom rastervision.utils.files import download_if_needed\n\n\nclass ImageSource(RasterioRasterSource):\n def __init__(self, uri, raster_transformers, temp_dir, channel_order=None):\n self.uri = uri\n super().__init__(raster_transformers, temp_dir, channel_order)\n\n def _download_data(self, temp_dir):\n return download_if_needed(self.uri, self.temp_dir)\n\n def _set_crs_transformer(self):\n self.crs_transformer = IdentityCRSTransformer()\n", "path": "rastervision/data/raster_source/image_source.py"}]} | 2,201 | 583 |
gh_patches_debug_2885 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Wrong version number string within docker 1.9.1
### Description
After a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.

### Steps to reproduce
1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1
2. Access the Web-UI.
3. Login
4. Find the version string on the lower left side.
### Webserver logs
_No response_
### Paperless-ngx version
1.9.1
### Host OS
Alpine Linux x86-64
### Installation method
Docker - official image
### Browser
Chrome
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless/version.py]
1 from typing import Final
2 from typing import Tuple
3
4 __version__: Final[Tuple[int, int, int]] = (1, 9, 0)
5 # Version string like X.Y.Z
6 __full_version_str__: Final[str] = ".".join(map(str, __version__))
7 # Version string like X.Y
8 __major_minor_version_str__: Final[str] = ".".join(map(str, __version__[:-1]))
9
[end of src/paperless/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless/version.py b/src/paperless/version.py
--- a/src/paperless/version.py
+++ b/src/paperless/version.py
@@ -1,7 +1,7 @@
from typing import Final
from typing import Tuple
-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)
+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)
# Version string like X.Y.Z
__full_version_str__: Final[str] = ".".join(map(str, __version__))
# Version string like X.Y
| {"golden_diff": "diff --git a/src/paperless/version.py b/src/paperless/version.py\n--- a/src/paperless/version.py\n+++ b/src/paperless/version.py\n@@ -1,7 +1,7 @@\n from typing import Final\n from typing import Tuple\n \n-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)\n # Version string like X.Y.Z\n __full_version_str__: Final[str] = \".\".join(map(str, __version__))\n # Version string like X.Y\n", "issue": "[BUG] Wrong version number string within docker 1.9.1\n### Description\n\nAfter a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.\r\n\r\n\r\n\n\n### Steps to reproduce\n\n1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1\r\n2. Access the Web-UI.\r\n3. Login\r\n4. Find the version string on the lower left side.\n\n### Webserver logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.9.1\n\n### Host OS\n\nAlpine Linux x86-64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "from typing import Final\nfrom typing import Tuple\n\n__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n# Version string like X.Y.Z\n__full_version_str__: Final[str] = \".\".join(map(str, __version__))\n# Version string like X.Y\n__major_minor_version_str__: Final[str] = \".\".join(map(str, __version__[:-1]))\n", "path": "src/paperless/version.py"}]} | 924 | 135 |
gh_patches_debug_24719 | rasdani/github-patches | git_diff | google__timesketch-2186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
timesketch_cli_client.commands.search bug in timestamp handling
In [cli_client/python/timesketch_cli_client/commands/search.py#L169](https://github.com/google/timesketch/blob/f861eba06c5c8b248dee4610ac6b401a93714331/cli_client/python/timesketch_cli_client/commands/search.py#L169) the `time_range` and `time` args are assigned to methods of `timesketch_api_client.search.DateRangeChip` instead of being used as method arguments.
Example:
```py
range_chip.add_start_time = time_range[0]
range_chip.add_end_time = time_range[1]
```
Should be:
```py
range_chip.add_start_time(time_range[0])
range_chip.add_end_time(time_range[1])
```
</issue>
<code>
[start of cli_client/python/timesketch_cli_client/commands/search.py]
1 # Copyright 2021 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Commands for searching Timesketch data."""
15
16 import json
17 import sys
18
19 import click
20 from tabulate import tabulate
21
22 from timesketch_api_client import search
23
24
25 def format_output(search_obj, output_format, show_headers):
26 """Format search result output.
27
28 Args:
29 search_obj: API Search object.
30 output_format: The format to use.
31 show_headers: Boolean indicating if header row should be displayed.
32
33 Returns:
34 Search results in the requested output format.
35 """
36 dataframe = search_obj.to_pandas()
37
38 # Label is being set regardless of return_fields. Remove if it is not in
39 # the list of requested fields.
40 if "label" not in search_obj.return_fields:
41 dataframe = dataframe.drop(columns=["label"], errors="ignore")
42
43 # Remove internal OpenSeearch columns
44 dataframe = dataframe.drop(
45 columns=["__ts_timeline_id", "_id", "_index", "_source", "_type"],
46 errors="ignore",
47 )
48
49 result = None
50 if output_format == "text":
51 result = dataframe.to_string(index=False, header=show_headers)
52 elif output_format == "csv":
53 result = dataframe.to_csv(index=False, header=show_headers)
54 elif output_format == "tabular":
55 if show_headers:
56 result = tabulate(
57 dataframe, headers="keys", tablefmt="psql", showindex=False
58 )
59 else:
60 result = tabulate(dataframe, tablefmt="psql", showindex=False)
61
62 return result
63
64
65 def describe_query(search_obj):
66 """Print details of a search query nd filter."""
67 filter_pretty = json.dumps(search_obj.query_filter, indent=2)
68 click.echo(f"Query string: {search_obj.query_string}")
69 click.echo(f"Return fields: {search_obj.return_fields}")
70 click.echo(f"Filter: {filter_pretty}")
71
72
73 @click.command("search")
74 @click.option(
75 "--query", "-q", default="*", help="Search query in OpenSearch query string format"
76 )
77 @click.option(
78 "--time", "times", multiple=True, help="Datetime filter (e.g. 2020-01-01T12:00)"
79 )
80 @click.option(
81 "--time-range",
82 "time_ranges",
83 multiple=True,
84 nargs=2,
85 help="Datetime range filter (e.g: 2020-01-01 2020-02-01)",
86 )
87 @click.option("--label", "labels", multiple=True, help="Filter events with label")
88 @click.option(
89 "--header/--no-header",
90 default=True,
91 help="Toggle header information (default is to show)",
92 )
93 @click.option(
94 "--output-format", "output", help="Set output format (overrides global setting)"
95 )
96 @click.option(
97 "--return-fields", "return_fields", default="", help="What event fields to show"
98 )
99 @click.option(
100 "--order", default="asc", help="Order the output (asc/desc) based on the time field"
101 )
102 @click.option(
103 "--limit", type=int, default=40, help="Limit amount of events to show (default: 40)"
104 )
105 @click.option("--saved-search", type=int, help="Query and filter from saved search")
106 @click.option(
107 "--describe",
108 is_flag=True,
109 default=False,
110 help="Show the query and filter then exit",
111 )
112 @click.pass_context
113 # pylint: disable=too-many-arguments
114 def search_group(
115 ctx,
116 query,
117 times,
118 time_ranges,
119 labels,
120 header,
121 output,
122 return_fields,
123 order,
124 limit,
125 saved_search,
126 describe,
127 ):
128 """Search and explore."""
129 sketch = ctx.obj.sketch
130 output_format = ctx.obj.output_format
131 search_obj = search.Search(sketch=sketch)
132
133 if output:
134 output_format = output
135
136 new_line = True
137 if output_format == "csv":
138 new_line = False
139
140 # Construct query from saved search and return early.
141 if saved_search:
142 search_obj.from_saved(saved_search)
143 if describe:
144 describe_query(search_obj)
145 return
146 click.echo(format_output(search_obj, output_format, header), nl=new_line)
147 return
148
149 # Construct the query from flags.
150 # TODO (berggren): Add support for query DSL.
151 search_obj.query_string = query
152
153 if return_fields:
154 search_obj.return_fields = return_fields
155
156 if limit:
157 search_obj.max_entries = limit
158
159 if order == "asc":
160 search_obj.order_ascending()
161 elif order == "desc":
162 search_obj.order_descending()
163
164 # TODO: Add term chips.
165 if time_ranges:
166 for time_range in time_ranges:
167 try:
168 range_chip = search.DateRangeChip()
169 range_chip.add_start_time = time_range[0]
170 range_chip.add_end_time = time_range[1]
171 search_obj.add_chip(range_chip)
172 except ValueError:
173 click.echo("Error parsing date (make sure it is ISO formatted)")
174 sys.exit(1)
175
176 # TODO (berggren): This should support dates like 2021-02-12 and then
177 # convert to ISO format.
178 if times:
179 for time in times:
180 try:
181 range_chip = search.DateRangeChip()
182 range_chip.add_start_time = time
183 range_chip.add_end_time = time
184 search_obj.add_chip(range_chip)
185 except ValueError:
186 click.echo("Error parsing date (make sure it is ISO formatted)")
187 sys.exit(1)
188
189 if labels:
190 for label in labels:
191 label_chip = search.LabelChip()
192 if label == "star":
193 label_chip.use_star_label()
194 elif label == "comment":
195 label_chip.use_comment_label()
196 else:
197 label_chip.label = label
198 search_obj.add_chip(label_chip)
199
200 if describe:
201 describe_query(search_obj)
202 return
203
204 click.echo(format_output(search_obj, output_format, header), nl=new_line)
205
206
207 @click.group("saved-searches")
208 def saved_searches_group():
209 """Managed saved searches."""
210
211
212 @saved_searches_group.command("list")
213 @click.pass_context
214 def list_saved_searches(ctx):
215 """List saved searches in the sketch."""
216 sketch = ctx.obj.sketch
217 for saved_search in sketch.list_saved_searches():
218 click.echo(f"{saved_search.id} {saved_search.name}")
219
220
221 @saved_searches_group.command("describe")
222 @click.argument("search_id", type=int, required=False)
223 @click.pass_context
224 def describe_saved_search(ctx, search_id):
225 """Show details for saved search."""
226 sketch = ctx.obj.sketch
227 # TODO (berggren): Add support for saved search name.
228 saved_search = sketch.get_saved_search(search_id=search_id)
229 if not saved_search:
230 click.echo("No such saved search")
231 return
232 filter_pretty = json.dumps(saved_search.query_filter, indent=2)
233 click.echo(f"query_string: {saved_search.query_string}")
234 click.echo(f"query_filter: {filter_pretty}")
235
[end of cli_client/python/timesketch_cli_client/commands/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cli_client/python/timesketch_cli_client/commands/search.py b/cli_client/python/timesketch_cli_client/commands/search.py
--- a/cli_client/python/timesketch_cli_client/commands/search.py
+++ b/cli_client/python/timesketch_cli_client/commands/search.py
@@ -166,8 +166,8 @@
for time_range in time_ranges:
try:
range_chip = search.DateRangeChip()
- range_chip.add_start_time = time_range[0]
- range_chip.add_end_time = time_range[1]
+ range_chip.add_start_time(time_range[0])
+ range_chip.add_end_time(time_range[1])
search_obj.add_chip(range_chip)
except ValueError:
click.echo("Error parsing date (make sure it is ISO formatted)")
@@ -179,8 +179,8 @@
for time in times:
try:
range_chip = search.DateRangeChip()
- range_chip.add_start_time = time
- range_chip.add_end_time = time
+ range_chip.add_start_time(time)
+ range_chip.add_end_time(time)
search_obj.add_chip(range_chip)
except ValueError:
click.echo("Error parsing date (make sure it is ISO formatted)")
| {"golden_diff": "diff --git a/cli_client/python/timesketch_cli_client/commands/search.py b/cli_client/python/timesketch_cli_client/commands/search.py\n--- a/cli_client/python/timesketch_cli_client/commands/search.py\n+++ b/cli_client/python/timesketch_cli_client/commands/search.py\n@@ -166,8 +166,8 @@\n for time_range in time_ranges:\n try:\n range_chip = search.DateRangeChip()\n- range_chip.add_start_time = time_range[0]\n- range_chip.add_end_time = time_range[1]\n+ range_chip.add_start_time(time_range[0])\n+ range_chip.add_end_time(time_range[1])\n search_obj.add_chip(range_chip)\n except ValueError:\n click.echo(\"Error parsing date (make sure it is ISO formatted)\")\n@@ -179,8 +179,8 @@\n for time in times:\n try:\n range_chip = search.DateRangeChip()\n- range_chip.add_start_time = time\n- range_chip.add_end_time = time\n+ range_chip.add_start_time(time)\n+ range_chip.add_end_time(time)\n search_obj.add_chip(range_chip)\n except ValueError:\n click.echo(\"Error parsing date (make sure it is ISO formatted)\")\n", "issue": "timesketch_cli_client.commands.search bug in timestamp handling\nIn [cli_client/python/timesketch_cli_client/commands/search.py#L169](https://github.com/google/timesketch/blob/f861eba06c5c8b248dee4610ac6b401a93714331/cli_client/python/timesketch_cli_client/commands/search.py#L169) the `time_range` and `time` args are assigned to methods of `timesketch_api_client.search.DateRangeChip` instead of being used as method arguments.\r\n\r\nExample:\r\n```py\r\n range_chip.add_start_time = time_range[0]\r\n range_chip.add_end_time = time_range[1]\r\n```\r\n\r\nShould be:\r\n```py\r\n range_chip.add_start_time(time_range[0])\r\n range_chip.add_end_time(time_range[1])\r\n```\n", "before_files": [{"content": "# Copyright 2021 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Commands for searching Timesketch data.\"\"\"\n\nimport json\nimport sys\n\nimport click\nfrom tabulate import tabulate\n\nfrom timesketch_api_client import search\n\n\ndef format_output(search_obj, output_format, show_headers):\n \"\"\"Format search result output.\n\n Args:\n search_obj: API Search object.\n output_format: The format to use.\n show_headers: Boolean indicating if header row should be displayed.\n\n Returns:\n Search results in the requested output format.\n \"\"\"\n dataframe = search_obj.to_pandas()\n\n # Label is being set regardless of return_fields. Remove if it is not in\n # the list of requested fields.\n if \"label\" not in search_obj.return_fields:\n dataframe = dataframe.drop(columns=[\"label\"], errors=\"ignore\")\n\n # Remove internal OpenSeearch columns\n dataframe = dataframe.drop(\n columns=[\"__ts_timeline_id\", \"_id\", \"_index\", \"_source\", \"_type\"],\n errors=\"ignore\",\n )\n\n result = None\n if output_format == \"text\":\n result = dataframe.to_string(index=False, header=show_headers)\n elif output_format == \"csv\":\n result = dataframe.to_csv(index=False, header=show_headers)\n elif output_format == \"tabular\":\n if show_headers:\n result = tabulate(\n dataframe, headers=\"keys\", tablefmt=\"psql\", showindex=False\n )\n else:\n result = tabulate(dataframe, tablefmt=\"psql\", showindex=False)\n\n return result\n\n\ndef describe_query(search_obj):\n \"\"\"Print details of a search query nd filter.\"\"\"\n filter_pretty = json.dumps(search_obj.query_filter, indent=2)\n click.echo(f\"Query string: {search_obj.query_string}\")\n click.echo(f\"Return fields: {search_obj.return_fields}\")\n click.echo(f\"Filter: {filter_pretty}\")\n\n\[email protected](\"search\")\[email protected](\n \"--query\", \"-q\", default=\"*\", help=\"Search query in OpenSearch query string format\"\n)\[email protected](\n \"--time\", \"times\", multiple=True, help=\"Datetime filter (e.g. 2020-01-01T12:00)\"\n)\[email protected](\n \"--time-range\",\n \"time_ranges\",\n multiple=True,\n nargs=2,\n help=\"Datetime range filter (e.g: 2020-01-01 2020-02-01)\",\n)\[email protected](\"--label\", \"labels\", multiple=True, help=\"Filter events with label\")\[email protected](\n \"--header/--no-header\",\n default=True,\n help=\"Toggle header information (default is to show)\",\n)\[email protected](\n \"--output-format\", \"output\", help=\"Set output format (overrides global setting)\"\n)\[email protected](\n \"--return-fields\", \"return_fields\", default=\"\", help=\"What event fields to show\"\n)\[email protected](\n \"--order\", default=\"asc\", help=\"Order the output (asc/desc) based on the time field\"\n)\[email protected](\n \"--limit\", type=int, default=40, help=\"Limit amount of events to show (default: 40)\"\n)\[email protected](\"--saved-search\", type=int, help=\"Query and filter from saved search\")\[email protected](\n \"--describe\",\n is_flag=True,\n default=False,\n help=\"Show the query and filter then exit\",\n)\[email protected]_context\n# pylint: disable=too-many-arguments\ndef search_group(\n ctx,\n query,\n times,\n time_ranges,\n labels,\n header,\n output,\n return_fields,\n order,\n limit,\n saved_search,\n describe,\n):\n \"\"\"Search and explore.\"\"\"\n sketch = ctx.obj.sketch\n output_format = ctx.obj.output_format\n search_obj = search.Search(sketch=sketch)\n\n if output:\n output_format = output\n\n new_line = True\n if output_format == \"csv\":\n new_line = False\n\n # Construct query from saved search and return early.\n if saved_search:\n search_obj.from_saved(saved_search)\n if describe:\n describe_query(search_obj)\n return\n click.echo(format_output(search_obj, output_format, header), nl=new_line)\n return\n\n # Construct the query from flags.\n # TODO (berggren): Add support for query DSL.\n search_obj.query_string = query\n\n if return_fields:\n search_obj.return_fields = return_fields\n\n if limit:\n search_obj.max_entries = limit\n\n if order == \"asc\":\n search_obj.order_ascending()\n elif order == \"desc\":\n search_obj.order_descending()\n\n # TODO: Add term chips.\n if time_ranges:\n for time_range in time_ranges:\n try:\n range_chip = search.DateRangeChip()\n range_chip.add_start_time = time_range[0]\n range_chip.add_end_time = time_range[1]\n search_obj.add_chip(range_chip)\n except ValueError:\n click.echo(\"Error parsing date (make sure it is ISO formatted)\")\n sys.exit(1)\n\n # TODO (berggren): This should support dates like 2021-02-12 and then\n # convert to ISO format.\n if times:\n for time in times:\n try:\n range_chip = search.DateRangeChip()\n range_chip.add_start_time = time\n range_chip.add_end_time = time\n search_obj.add_chip(range_chip)\n except ValueError:\n click.echo(\"Error parsing date (make sure it is ISO formatted)\")\n sys.exit(1)\n\n if labels:\n for label in labels:\n label_chip = search.LabelChip()\n if label == \"star\":\n label_chip.use_star_label()\n elif label == \"comment\":\n label_chip.use_comment_label()\n else:\n label_chip.label = label\n search_obj.add_chip(label_chip)\n\n if describe:\n describe_query(search_obj)\n return\n\n click.echo(format_output(search_obj, output_format, header), nl=new_line)\n\n\[email protected](\"saved-searches\")\ndef saved_searches_group():\n \"\"\"Managed saved searches.\"\"\"\n\n\n@saved_searches_group.command(\"list\")\[email protected]_context\ndef list_saved_searches(ctx):\n \"\"\"List saved searches in the sketch.\"\"\"\n sketch = ctx.obj.sketch\n for saved_search in sketch.list_saved_searches():\n click.echo(f\"{saved_search.id} {saved_search.name}\")\n\n\n@saved_searches_group.command(\"describe\")\[email protected](\"search_id\", type=int, required=False)\[email protected]_context\ndef describe_saved_search(ctx, search_id):\n \"\"\"Show details for saved search.\"\"\"\n sketch = ctx.obj.sketch\n # TODO (berggren): Add support for saved search name.\n saved_search = sketch.get_saved_search(search_id=search_id)\n if not saved_search:\n click.echo(\"No such saved search\")\n return\n filter_pretty = json.dumps(saved_search.query_filter, indent=2)\n click.echo(f\"query_string: {saved_search.query_string}\")\n click.echo(f\"query_filter: {filter_pretty}\")\n", "path": "cli_client/python/timesketch_cli_client/commands/search.py"}]} | 3,005 | 272 |
gh_patches_debug_17439 | rasdani/github-patches | git_diff | Parsl__parsl-929 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Properly handle func names for bash apps
At the moment bash apps are always `remote_side_bash_executor` in the monitoring DB because that is what is actually running. We should pass through the name of the app's function instead.
</issue>
<code>
[start of parsl/app/bash.py]
1 import logging
2
3 from inspect import signature, Parameter
4 from parsl.app.errors import wrap_error
5 from parsl.app.futures import DataFuture
6 from parsl.app.app import AppBase
7 from parsl.dataflow.dflow import DataFlowKernelLoader
8
9 logger = logging.getLogger(__name__)
10
11
12 def remote_side_bash_executor(func, *args, **kwargs):
13 """Execute the bash app type function and return the command line string.
14
15 This string is reformatted with the *args, and **kwargs
16 from call time.
17 """
18 import os
19 import time
20 import subprocess
21 import logging
22 import parsl.app.errors as pe
23
24 logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)
25
26 # start_t = time.time()
27
28 func_name = func.__name__
29
30 partial_cmdline = None
31
32 # Try to run the func to compose the commandline
33 try:
34 # Execute the func to get the commandline
35 partial_cmdline = func(*args, **kwargs)
36 # Reformat the commandline with current args and kwargs
37 executable = partial_cmdline.format(*args, **kwargs)
38
39 except AttributeError as e:
40 if partial_cmdline is not None:
41 raise pe.AppBadFormatting("App formatting failed for app '{}' with AttributeError: {}".format(func_name, e))
42 else:
43 raise pe.BashAppNoReturn("Bash app '{}' did not return a value, or returned none - with this exception: {}".format(func_name, e), None)
44
45 except IndexError as e:
46 raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
47 except Exception as e:
48 logging.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
49 raise e
50
51 logging.debug("Executable: %s", executable)
52
53 # Updating stdout, stderr if values passed at call time.
54
55 def open_std_fd(fdname):
56 # fdname is 'stdout' or 'stderr'
57 stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
58 if stdfspec is None:
59 return None
60 elif isinstance(stdfspec, str):
61 fname = stdfspec
62 mode = 'a+'
63 elif isinstance(stdfspec, tuple):
64 if len(stdfspec) != 2:
65 raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))
66 fname, mode = stdfspec
67 else:
68 raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
69 try:
70 fd = open(fname, mode)
71 except Exception as e:
72 raise pe.BadStdStreamFile(fname, e)
73 return fd
74
75 std_out = open_std_fd('stdout')
76 std_err = open_std_fd('stderr')
77 timeout = kwargs.get('walltime')
78
79 returncode = None
80 try:
81 proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')
82 proc.wait(timeout=timeout)
83 returncode = proc.returncode
84
85 except subprocess.TimeoutExpired:
86 # print("Timeout")
87 raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout))
88
89 except Exception as e:
90 # print("Caught exception: ", e)
91 raise pe.AppException("[{}] App caught exception: {}".format(func_name, proc.returncode), e)
92
93 if returncode != 0:
94 raise pe.AppFailure("[{}] App failed with exit code: {}".format(func_name, proc.returncode), proc.returncode)
95
96 # TODO : Add support for globs here
97
98 missing = []
99 for outputfile in kwargs.get('outputs', []):
100 fpath = outputfile
101 if type(outputfile) != str:
102 fpath = outputfile.filepath
103
104 if not os.path.exists(fpath):
105 missing.extend([outputfile])
106
107 if missing:
108 raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
109
110 # exec_duration = time.time() - start_t
111 return returncode
112
113
114 class BashApp(AppBase):
115
116 def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
117 super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)
118 self.kwargs = {}
119
120 # We duplicate the extraction of parameter defaults
121 # to self.kwargs to ensure availability at point of
122 # command string format. Refer: #349
123 sig = signature(func)
124
125 for s in sig.parameters:
126 if sig.parameters[s].default != Parameter.empty:
127 self.kwargs[s] = sig.parameters[s].default
128
129 def __call__(self, *args, **kwargs):
130 """Handle the call to a Bash app.
131
132 Args:
133 - Arbitrary
134
135 Kwargs:
136 - Arbitrary
137
138 Returns:
139 If outputs=[...] was a kwarg then:
140 App_fut, [Data_Futures...]
141 else:
142 App_fut
143
144 """
145 # Update kwargs in the app definition with ones passed in at calltime
146 self.kwargs.update(kwargs)
147
148 if self.data_flow_kernel is None:
149 dfk = DataFlowKernelLoader.dfk()
150 else:
151 dfk = self.data_flow_kernel
152
153 app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,
154 executors=self.executors,
155 fn_hash=self.func_hash,
156 cache=self.cache,
157 **self.kwargs)
158
159 out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)
160 for o in kwargs.get('outputs', [])]
161 app_fut._outputs = out_futs
162
163 return app_fut
164
[end of parsl/app/bash.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/app/bash.py b/parsl/app/bash.py
--- a/parsl/app/bash.py
+++ b/parsl/app/bash.py
@@ -1,6 +1,7 @@
import logging
-
+from functools import update_wrapper
from inspect import signature, Parameter
+
from parsl.app.errors import wrap_error
from parsl.app.futures import DataFuture
from parsl.app.app import AppBase
@@ -150,7 +151,8 @@
else:
dfk = self.data_flow_kernel
- app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,
+ app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),
+ self.func, *args,
executors=self.executors,
fn_hash=self.func_hash,
cache=self.cache,
| {"golden_diff": "diff --git a/parsl/app/bash.py b/parsl/app/bash.py\n--- a/parsl/app/bash.py\n+++ b/parsl/app/bash.py\n@@ -1,6 +1,7 @@\n import logging\n-\n+from functools import update_wrapper\n from inspect import signature, Parameter\n+\n from parsl.app.errors import wrap_error\n from parsl.app.futures import DataFuture\n from parsl.app.app import AppBase\n@@ -150,7 +151,8 @@\n else:\n dfk = self.data_flow_kernel\n \n- app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,\n+ app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),\n+ self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n", "issue": "Properly handle func names for bash apps\nAt the moment bash apps are always `remote_side_bash_executor` in the monitoring DB because that is what is actually running. We should pass through the name of the app's function instead.\n", "before_files": [{"content": "import logging\n\nfrom inspect import signature, Parameter\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.futures import DataFuture\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\nlogger = logging.getLogger(__name__)\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Execute the bash app type function and return the command line string.\n\n This string is reformatted with the *args, and **kwargs\n from call time.\n \"\"\"\n import os\n import time\n import subprocess\n import logging\n import parsl.app.errors as pe\n\n logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)\n\n # start_t = time.time()\n\n func_name = func.__name__\n\n partial_cmdline = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n partial_cmdline = func(*args, **kwargs)\n # Reformat the commandline with current args and kwargs\n executable = partial_cmdline.format(*args, **kwargs)\n\n except AttributeError as e:\n if partial_cmdline is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned none - with this exception: {}\".format(func_name, e), None)\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n logging.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n\n logging.debug(\"Executable: %s\", executable)\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n elif isinstance(stdfspec, str):\n fname = stdfspec\n mode = 'a+'\n elif isinstance(stdfspec, tuple):\n if len(stdfspec) != 2:\n raise pe.BadStdStreamFile(\"std descriptor %s has incorrect tuple length %s\" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))\n fname, mode = stdfspec\n else:\n raise pe.BadStdStreamFile(\"std descriptor %s has unexpected type %s\" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))\n try:\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n # print(\"Timeout\")\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n # print(\"Caught exception: \", e)\n raise pe.AppException(\"[{}] App caught exception: {}\".format(func_name, proc.returncode), e)\n\n if returncode != 0:\n raise pe.AppFailure(\"[{}] App failed with exit code: {}\".format(func_name, proc.returncode), proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile\n if type(outputfile) != str:\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n # exec_duration = time.time() - start_t\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut\n\n \"\"\"\n # Update kwargs in the app definition with ones passed in at calltime\n self.kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n **self.kwargs)\n\n out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)\n for o in kwargs.get('outputs', [])]\n app_fut._outputs = out_futs\n\n return app_fut\n", "path": "parsl/app/bash.py"}]} | 2,273 | 193 |
gh_patches_debug_26664 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Total span limit
For *really* long running background jobs it'd be possible to have a tracked request with so many spans it causes memory problems. Following #471 we should probably have a (very high) total cap on the number of captured spans.
</issue>
<code>
[start of src/scout_apm/core/tracked_request.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import logging
6 from uuid import uuid4
7
8 from scout_apm.core import backtrace, objtrace
9 from scout_apm.core.commands import BatchCommand
10 from scout_apm.core.n_plus_one_tracker import NPlusOneTracker
11 from scout_apm.core.samplers.memory import get_rss_in_mb
12 from scout_apm.core.samplers.thread import SamplersThread
13 from scout_apm.core.socket import CoreAgentSocketThread
14
15 logger = logging.getLogger(__name__)
16
17
18 class TrackedRequest(object):
19 """
20 This is a container which keeps track of all module instances for a single
21 request. For convenience they are made available as attributes based on
22 their keyname
23 """
24
25 __slots__ = (
26 "request_id",
27 "start_time",
28 "end_time",
29 "active_spans",
30 "complete_spans",
31 "tags",
32 "is_real_request",
33 "_memory_start",
34 "n_plus_one_tracker",
35 )
36
37 @classmethod
38 def instance(cls):
39 from scout_apm.core.context import context
40
41 return context.get_tracked_request()
42
43 def __init__(self):
44 self.request_id = "req-" + str(uuid4())
45 self.start_time = dt.datetime.utcnow()
46 self.end_time = None
47 self.active_spans = []
48 self.complete_spans = []
49 self.tags = {}
50 self.is_real_request = False
51 self._memory_start = get_rss_in_mb()
52 self.n_plus_one_tracker = NPlusOneTracker()
53 logger.debug("Starting request: %s", self.request_id)
54
55 def __repr__(self):
56 # Incomplete to avoid TMI
57 return "<TrackedRequest(request_id={}, tags={})>".format(
58 repr(self.request_id), repr(self.tags)
59 )
60
61 def tag(self, key, value):
62 if key in self.tags:
63 logger.debug(
64 "Overwriting previously set tag for request %s: %s",
65 self.request_id,
66 key,
67 )
68 self.tags[key] = value
69
70 def start_span(
71 self,
72 operation,
73 ignore=False,
74 ignore_children=False,
75 should_capture_backtrace=True,
76 ):
77 parent = self.current_span()
78 if parent is not None:
79 parent_id = parent.span_id
80 if parent.ignore_children:
81 ignore = True
82 ignore_children = True
83 else:
84 parent_id = None
85
86 new_span = Span(
87 request_id=self.request_id,
88 operation=operation,
89 ignore=ignore,
90 ignore_children=ignore_children,
91 parent=parent_id,
92 should_capture_backtrace=should_capture_backtrace,
93 )
94 self.active_spans.append(new_span)
95 return new_span
96
97 def stop_span(self):
98 try:
99 stopping_span = self.active_spans.pop()
100 except IndexError as exc:
101 logger.debug("Exception when stopping span", exc_info=exc)
102 else:
103 stopping_span.stop()
104 if not stopping_span.ignore:
105 stopping_span.annotate()
106 self.complete_spans.append(stopping_span)
107
108 if len(self.active_spans) == 0:
109 self.finish()
110
111 def current_span(self):
112 if self.active_spans:
113 return self.active_spans[-1]
114 else:
115 return None
116
117 # Request is done, release any info we have about it.
118 def finish(self):
119 logger.debug("Stopping request: %s", self.request_id)
120 if self.end_time is None:
121 self.end_time = dt.datetime.utcnow()
122 if self.is_real_request:
123 self.tag("mem_delta", self._get_mem_delta())
124 if not self.is_ignored():
125 batch_command = BatchCommand.from_tracked_request(self)
126 CoreAgentSocketThread.send(batch_command)
127 SamplersThread.ensure_started()
128
129 from scout_apm.core.context import context
130
131 context.clear_tracked_request(self)
132
133 def _get_mem_delta(self):
134 current_mem = get_rss_in_mb()
135 if current_mem > self._memory_start:
136 return current_mem - self._memory_start
137 return 0.0
138
139 # A request is ignored if the tag "ignore_transaction" is set to True
140 def is_ignored(self):
141 return self.tags.get("ignore_transaction", False)
142
143
144 class Span(object):
145 __slots__ = (
146 "span_id",
147 "start_time",
148 "end_time",
149 "request_id",
150 "operation",
151 "ignore",
152 "ignore_children",
153 "parent",
154 "tags",
155 "start_objtrace_counts",
156 "end_objtrace_counts",
157 "should_capture_backtrace",
158 )
159
160 def __init__(
161 self,
162 request_id=None,
163 operation=None,
164 ignore=False,
165 ignore_children=False,
166 parent=None,
167 should_capture_backtrace=True,
168 ):
169 self.span_id = "span-" + str(uuid4())
170 self.start_time = dt.datetime.utcnow()
171 self.end_time = None
172 self.request_id = request_id
173 self.operation = operation
174 self.ignore = ignore
175 self.ignore_children = ignore_children
176 self.parent = parent
177 self.tags = {}
178 self.start_objtrace_counts = objtrace.get_counts()
179 self.end_objtrace_counts = (0, 0, 0, 0)
180 self.should_capture_backtrace = should_capture_backtrace
181
182 def __repr__(self):
183 # Incomplete to avoid TMI
184 return "<Span(span_id={}, operation={}, ignore={}, tags={})>".format(
185 repr(self.span_id), repr(self.operation), repr(self.ignore), repr(self.tags)
186 )
187
188 def stop(self):
189 self.end_time = dt.datetime.utcnow()
190 self.end_objtrace_counts = objtrace.get_counts()
191
192 def tag(self, key, value):
193 if key in self.tags:
194 logger.debug(
195 "Overwriting previously set tag for span %s: %s", self.span_id, key
196 )
197 self.tags[key] = value
198
199 # In seconds
200 def duration(self):
201 if self.end_time is not None:
202 return (self.end_time - self.start_time).total_seconds()
203 else:
204 # Current, running duration
205 return (dt.datetime.utcnow() - self.start_time).total_seconds()
206
207 # Add any interesting annotations to the span. Assumes that we are in the
208 # process of stopping this span.
209 def annotate(self):
210 self.add_allocation_tags()
211 if not self.should_capture_backtrace:
212 return
213 slow_threshold = 0.5
214 if self.duration() > slow_threshold:
215 self.capture_backtrace()
216
217 def add_allocation_tags(self):
218 if not objtrace.is_extension:
219 return
220
221 start_allocs = (
222 self.start_objtrace_counts[0]
223 + self.start_objtrace_counts[1]
224 + self.start_objtrace_counts[2]
225 )
226 end_allocs = (
227 self.end_objtrace_counts[0]
228 + self.end_objtrace_counts[1]
229 + self.end_objtrace_counts[2]
230 )
231
232 # If even one of the counters rolled over, we're pretty much
233 # guaranteed to have end_allocs be less than start_allocs.
234 # This should rarely happen. Max Unsigned Long Long is a big number
235 if end_allocs - start_allocs < 0:
236 logger.debug(
237 "End allocation count smaller than start allocation "
238 "count for span %s: start = %d, end = %d",
239 self.span_id,
240 start_allocs,
241 end_allocs,
242 )
243 return
244
245 self.tag("allocations", end_allocs - start_allocs)
246 self.tag("start_allocations", start_allocs)
247 self.tag("stop_allocations", end_allocs)
248
249 def capture_backtrace(self):
250 self.tag("stack", backtrace.capture())
251
[end of src/scout_apm/core/tracked_request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/core/tracked_request.py b/src/scout_apm/core/tracked_request.py
--- a/src/scout_apm/core/tracked_request.py
+++ b/src/scout_apm/core/tracked_request.py
@@ -32,8 +32,12 @@
"is_real_request",
"_memory_start",
"n_plus_one_tracker",
+ "hit_max",
)
+ # Stop adding new spans at this point, to avoid exhausting memory
+ MAX_COMPLETE_SPANS = 1500
+
@classmethod
def instance(cls):
from scout_apm.core.context import context
@@ -50,6 +54,7 @@
self.is_real_request = False
self._memory_start = get_rss_in_mb()
self.n_plus_one_tracker = NPlusOneTracker()
+ self.hit_max = False
logger.debug("Starting request: %s", self.request_id)
def __repr__(self):
@@ -83,6 +88,15 @@
else:
parent_id = None
+ if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS:
+ if not self.hit_max:
+ logger.warning(
+ "Hit the maximum number of spans, this trace will be incomplete."
+ )
+ self.hit_max = True
+ ignore = True
+ ignore_children = True
+
new_span = Span(
request_id=self.request_id,
operation=operation,
| {"golden_diff": "diff --git a/src/scout_apm/core/tracked_request.py b/src/scout_apm/core/tracked_request.py\n--- a/src/scout_apm/core/tracked_request.py\n+++ b/src/scout_apm/core/tracked_request.py\n@@ -32,8 +32,12 @@\n \"is_real_request\",\n \"_memory_start\",\n \"n_plus_one_tracker\",\n+ \"hit_max\",\n )\n \n+ # Stop adding new spans at this point, to avoid exhausting memory\n+ MAX_COMPLETE_SPANS = 1500\n+\n @classmethod\n def instance(cls):\n from scout_apm.core.context import context\n@@ -50,6 +54,7 @@\n self.is_real_request = False\n self._memory_start = get_rss_in_mb()\n self.n_plus_one_tracker = NPlusOneTracker()\n+ self.hit_max = False\n logger.debug(\"Starting request: %s\", self.request_id)\n \n def __repr__(self):\n@@ -83,6 +88,15 @@\n else:\n parent_id = None\n \n+ if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS:\n+ if not self.hit_max:\n+ logger.warning(\n+ \"Hit the maximum number of spans, this trace will be incomplete.\"\n+ )\n+ self.hit_max = True\n+ ignore = True\n+ ignore_children = True\n+\n new_span = Span(\n request_id=self.request_id,\n operation=operation,\n", "issue": "Total span limit\nFor *really* long running background jobs it'd be possible to have a tracked request with so many spans it causes memory problems. Following #471 we should probably have a (very high) total cap on the number of captured spans.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\nfrom uuid import uuid4\n\nfrom scout_apm.core import backtrace, objtrace\nfrom scout_apm.core.commands import BatchCommand\nfrom scout_apm.core.n_plus_one_tracker import NPlusOneTracker\nfrom scout_apm.core.samplers.memory import get_rss_in_mb\nfrom scout_apm.core.samplers.thread import SamplersThread\nfrom scout_apm.core.socket import CoreAgentSocketThread\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrackedRequest(object):\n \"\"\"\n This is a container which keeps track of all module instances for a single\n request. For convenience they are made available as attributes based on\n their keyname\n \"\"\"\n\n __slots__ = (\n \"request_id\",\n \"start_time\",\n \"end_time\",\n \"active_spans\",\n \"complete_spans\",\n \"tags\",\n \"is_real_request\",\n \"_memory_start\",\n \"n_plus_one_tracker\",\n )\n\n @classmethod\n def instance(cls):\n from scout_apm.core.context import context\n\n return context.get_tracked_request()\n\n def __init__(self):\n self.request_id = \"req-\" + str(uuid4())\n self.start_time = dt.datetime.utcnow()\n self.end_time = None\n self.active_spans = []\n self.complete_spans = []\n self.tags = {}\n self.is_real_request = False\n self._memory_start = get_rss_in_mb()\n self.n_plus_one_tracker = NPlusOneTracker()\n logger.debug(\"Starting request: %s\", self.request_id)\n\n def __repr__(self):\n # Incomplete to avoid TMI\n return \"<TrackedRequest(request_id={}, tags={})>\".format(\n repr(self.request_id), repr(self.tags)\n )\n\n def tag(self, key, value):\n if key in self.tags:\n logger.debug(\n \"Overwriting previously set tag for request %s: %s\",\n self.request_id,\n key,\n )\n self.tags[key] = value\n\n def start_span(\n self,\n operation,\n ignore=False,\n ignore_children=False,\n should_capture_backtrace=True,\n ):\n parent = self.current_span()\n if parent is not None:\n parent_id = parent.span_id\n if parent.ignore_children:\n ignore = True\n ignore_children = True\n else:\n parent_id = None\n\n new_span = Span(\n request_id=self.request_id,\n operation=operation,\n ignore=ignore,\n ignore_children=ignore_children,\n parent=parent_id,\n should_capture_backtrace=should_capture_backtrace,\n )\n self.active_spans.append(new_span)\n return new_span\n\n def stop_span(self):\n try:\n stopping_span = self.active_spans.pop()\n except IndexError as exc:\n logger.debug(\"Exception when stopping span\", exc_info=exc)\n else:\n stopping_span.stop()\n if not stopping_span.ignore:\n stopping_span.annotate()\n self.complete_spans.append(stopping_span)\n\n if len(self.active_spans) == 0:\n self.finish()\n\n def current_span(self):\n if self.active_spans:\n return self.active_spans[-1]\n else:\n return None\n\n # Request is done, release any info we have about it.\n def finish(self):\n logger.debug(\"Stopping request: %s\", self.request_id)\n if self.end_time is None:\n self.end_time = dt.datetime.utcnow()\n if self.is_real_request:\n self.tag(\"mem_delta\", self._get_mem_delta())\n if not self.is_ignored():\n batch_command = BatchCommand.from_tracked_request(self)\n CoreAgentSocketThread.send(batch_command)\n SamplersThread.ensure_started()\n\n from scout_apm.core.context import context\n\n context.clear_tracked_request(self)\n\n def _get_mem_delta(self):\n current_mem = get_rss_in_mb()\n if current_mem > self._memory_start:\n return current_mem - self._memory_start\n return 0.0\n\n # A request is ignored if the tag \"ignore_transaction\" is set to True\n def is_ignored(self):\n return self.tags.get(\"ignore_transaction\", False)\n\n\nclass Span(object):\n __slots__ = (\n \"span_id\",\n \"start_time\",\n \"end_time\",\n \"request_id\",\n \"operation\",\n \"ignore\",\n \"ignore_children\",\n \"parent\",\n \"tags\",\n \"start_objtrace_counts\",\n \"end_objtrace_counts\",\n \"should_capture_backtrace\",\n )\n\n def __init__(\n self,\n request_id=None,\n operation=None,\n ignore=False,\n ignore_children=False,\n parent=None,\n should_capture_backtrace=True,\n ):\n self.span_id = \"span-\" + str(uuid4())\n self.start_time = dt.datetime.utcnow()\n self.end_time = None\n self.request_id = request_id\n self.operation = operation\n self.ignore = ignore\n self.ignore_children = ignore_children\n self.parent = parent\n self.tags = {}\n self.start_objtrace_counts = objtrace.get_counts()\n self.end_objtrace_counts = (0, 0, 0, 0)\n self.should_capture_backtrace = should_capture_backtrace\n\n def __repr__(self):\n # Incomplete to avoid TMI\n return \"<Span(span_id={}, operation={}, ignore={}, tags={})>\".format(\n repr(self.span_id), repr(self.operation), repr(self.ignore), repr(self.tags)\n )\n\n def stop(self):\n self.end_time = dt.datetime.utcnow()\n self.end_objtrace_counts = objtrace.get_counts()\n\n def tag(self, key, value):\n if key in self.tags:\n logger.debug(\n \"Overwriting previously set tag for span %s: %s\", self.span_id, key\n )\n self.tags[key] = value\n\n # In seconds\n def duration(self):\n if self.end_time is not None:\n return (self.end_time - self.start_time).total_seconds()\n else:\n # Current, running duration\n return (dt.datetime.utcnow() - self.start_time).total_seconds()\n\n # Add any interesting annotations to the span. Assumes that we are in the\n # process of stopping this span.\n def annotate(self):\n self.add_allocation_tags()\n if not self.should_capture_backtrace:\n return\n slow_threshold = 0.5\n if self.duration() > slow_threshold:\n self.capture_backtrace()\n\n def add_allocation_tags(self):\n if not objtrace.is_extension:\n return\n\n start_allocs = (\n self.start_objtrace_counts[0]\n + self.start_objtrace_counts[1]\n + self.start_objtrace_counts[2]\n )\n end_allocs = (\n self.end_objtrace_counts[0]\n + self.end_objtrace_counts[1]\n + self.end_objtrace_counts[2]\n )\n\n # If even one of the counters rolled over, we're pretty much\n # guaranteed to have end_allocs be less than start_allocs.\n # This should rarely happen. Max Unsigned Long Long is a big number\n if end_allocs - start_allocs < 0:\n logger.debug(\n \"End allocation count smaller than start allocation \"\n \"count for span %s: start = %d, end = %d\",\n self.span_id,\n start_allocs,\n end_allocs,\n )\n return\n\n self.tag(\"allocations\", end_allocs - start_allocs)\n self.tag(\"start_allocations\", start_allocs)\n self.tag(\"stop_allocations\", end_allocs)\n\n def capture_backtrace(self):\n self.tag(\"stack\", backtrace.capture())\n", "path": "src/scout_apm/core/tracked_request.py"}]} | 2,926 | 325 |
gh_patches_debug_34542 | rasdani/github-patches | git_diff | numpy__numpy-13976 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAINT: remove numpy/tools/test-installed-numpy.py ?
Looking at Matti's modifications in #13482, I don't see a good reason to keep `test-installed-numpy.py` if we are going to continue using `runtests.py`. Both have separate parsers to accomplish similar things, but `runtests.py` is more developed / actively used while `test-installed-numpy.py` still has discussion of nose / python 2.4, and Matti's proposed change there adds `-n` option which does something different in `runtests.py`.
`runtests.py -n` will test installed NumPy instead of rebuilding, so seems like redundancy / maintenance burden we don't need moving forward
</issue>
<code>
[start of tools/test-installed-numpy.py]
1 #!/usr/bin/env python
2 from __future__ import division, absolute_import, print_function
3
4 # A simple script to test the installed version of numpy by calling
5 # 'numpy.test()'. Key features:
6 # -- convenient command-line syntax
7 # -- sets exit status appropriately, useful for automated test environments
8
9 # It would be better to set this up as a module in the numpy namespace, so
10 # that it could be run as:
11 # python -m numpy.run_tests <args>
12 # But, python2.4's -m switch only works with top-level modules, not modules
13 # that are inside packages. So, once we drop 2.4 support, maybe...
14
15 import sys, os
16 # In case we are run from the source directory, we don't want to import numpy
17 # from there, we want to import the installed version:
18 sys.path.pop(0)
19
20 from optparse import OptionParser
21 parser = OptionParser("usage: %prog [options] -- [nosetests options]")
22 parser.add_option("-v", "--verbose",
23 action="count", dest="verbose", default=1,
24 help="increase verbosity")
25 parser.add_option("--doctests",
26 action="store_true", dest="doctests", default=False,
27 help="Run doctests in module")
28 parser.add_option("--coverage",
29 action="store_true", dest="coverage", default=False,
30 help="report coverage of NumPy code (requires 'pytest-cov' module")
31 parser.add_option("-m", "--mode",
32 action="store", dest="mode", default="fast",
33 help="'fast', 'full', or something that could be "
34 "passed to pytest [default: %default]")
35 parser.add_option("-n", "--durations",
36 dest="durations", default=-1,
37 help="show time to run slowest N tests [default: -1]")
38 (options, args) = parser.parse_args()
39
40 import numpy
41
42 # Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
43 # The same flags check is also used in the tests to switch behavior.
44 if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
45 if not numpy.ones((10, 1), order='C').flags.f_contiguous:
46 print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
47 sys.exit(1)
48 elif numpy.ones((10, 1), order='C').flags.f_contiguous:
49 print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
50 sys.exit(1)
51
52 if options.coverage:
53 # Produce code coverage XML report for codecov.io
54 args += ["--cov-report=xml"]
55
56 result = numpy.test(options.mode,
57 verbose=options.verbose,
58 extra_argv=args,
59 doctests=options.doctests,
60 durations=int(options.durations),
61 coverage=options.coverage)
62
63 if result:
64 sys.exit(0)
65 else:
66 sys.exit(1)
67
[end of tools/test-installed-numpy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
deleted file mode 100755
--- a/tools/test-installed-numpy.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-# A simple script to test the installed version of numpy by calling
-# 'numpy.test()'. Key features:
-# -- convenient command-line syntax
-# -- sets exit status appropriately, useful for automated test environments
-
-# It would be better to set this up as a module in the numpy namespace, so
-# that it could be run as:
-# python -m numpy.run_tests <args>
-# But, python2.4's -m switch only works with top-level modules, not modules
-# that are inside packages. So, once we drop 2.4 support, maybe...
-
-import sys, os
-# In case we are run from the source directory, we don't want to import numpy
-# from there, we want to import the installed version:
-sys.path.pop(0)
-
-from optparse import OptionParser
-parser = OptionParser("usage: %prog [options] -- [nosetests options]")
-parser.add_option("-v", "--verbose",
- action="count", dest="verbose", default=1,
- help="increase verbosity")
-parser.add_option("--doctests",
- action="store_true", dest="doctests", default=False,
- help="Run doctests in module")
-parser.add_option("--coverage",
- action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'pytest-cov' module")
-parser.add_option("-m", "--mode",
- action="store", dest="mode", default="fast",
- help="'fast', 'full', or something that could be "
- "passed to pytest [default: %default]")
-parser.add_option("-n", "--durations",
- dest="durations", default=-1,
- help="show time to run slowest N tests [default: -1]")
-(options, args) = parser.parse_args()
-
-import numpy
-
-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
-# The same flags check is also used in the tests to switch behavior.
-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
- if not numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
- sys.exit(1)
-elif numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
- sys.exit(1)
-
-if options.coverage:
- # Produce code coverage XML report for codecov.io
- args += ["--cov-report=xml"]
-
-result = numpy.test(options.mode,
- verbose=options.verbose,
- extra_argv=args,
- doctests=options.doctests,
- durations=int(options.durations),
- coverage=options.coverage)
-
-if result:
- sys.exit(0)
-else:
- sys.exit(1)
| {"golden_diff": "diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py\ndeleted file mode 100755\n--- a/tools/test-installed-numpy.py\n+++ /dev/null\n@@ -1,66 +0,0 @@\n-#!/usr/bin/env python\n-from __future__ import division, absolute_import, print_function\n-\n-# A simple script to test the installed version of numpy by calling\n-# 'numpy.test()'. Key features:\n-# -- convenient command-line syntax\n-# -- sets exit status appropriately, useful for automated test environments\n-\n-# It would be better to set this up as a module in the numpy namespace, so\n-# that it could be run as:\n-# python -m numpy.run_tests <args>\n-# But, python2.4's -m switch only works with top-level modules, not modules\n-# that are inside packages. So, once we drop 2.4 support, maybe...\n-\n-import sys, os\n-# In case we are run from the source directory, we don't want to import numpy\n-# from there, we want to import the installed version:\n-sys.path.pop(0)\n-\n-from optparse import OptionParser\n-parser = OptionParser(\"usage: %prog [options] -- [nosetests options]\")\n-parser.add_option(\"-v\", \"--verbose\",\n- action=\"count\", dest=\"verbose\", default=1,\n- help=\"increase verbosity\")\n-parser.add_option(\"--doctests\",\n- action=\"store_true\", dest=\"doctests\", default=False,\n- help=\"Run doctests in module\")\n-parser.add_option(\"--coverage\",\n- action=\"store_true\", dest=\"coverage\", default=False,\n- help=\"report coverage of NumPy code (requires 'pytest-cov' module\")\n-parser.add_option(\"-m\", \"--mode\",\n- action=\"store\", dest=\"mode\", default=\"fast\",\n- help=\"'fast', 'full', or something that could be \"\n- \"passed to pytest [default: %default]\")\n-parser.add_option(\"-n\", \"--durations\",\n- dest=\"durations\", default=-1,\n- help=\"show time to run slowest N tests [default: -1]\")\n-(options, args) = parser.parse_args()\n-\n-import numpy\n-\n-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.\n-# The same flags check is also used in the tests to switch behavior.\n-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\"):\n- if not numpy.ones((10, 1), order='C').flags.f_contiguous:\n- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')\n- sys.exit(1)\n-elif numpy.ones((10, 1), order='C').flags.f_contiguous:\n- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')\n- sys.exit(1)\n-\n-if options.coverage:\n- # Produce code coverage XML report for codecov.io\n- args += [\"--cov-report=xml\"]\n-\n-result = numpy.test(options.mode,\n- verbose=options.verbose,\n- extra_argv=args,\n- doctests=options.doctests,\n- durations=int(options.durations),\n- coverage=options.coverage)\n-\n-if result:\n- sys.exit(0)\n-else:\n- sys.exit(1)\n", "issue": "MAINT: remove numpy/tools/test-installed-numpy.py ?\nLooking at Matti's modifications in #13482, I don't see a good reason to keep `test-installed-numpy.py` if we are going to continue using `runtests.py`. Both have separate parsers to accomplish similar things, but `runtests.py` is more developed / actively used while `test-installed-numpy.py` still has discussion of nose / python 2.4, and Matti's proposed change there adds `-n` option which does something different in `runtests.py`.\r\n\r\n`runtests.py -n` will test installed NumPy instead of rebuilding, so seems like redundancy / maintenance burden we don't need moving forward\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\n\n# A simple script to test the installed version of numpy by calling\n# 'numpy.test()'. Key features:\n# -- convenient command-line syntax\n# -- sets exit status appropriately, useful for automated test environments\n\n# It would be better to set this up as a module in the numpy namespace, so\n# that it could be run as:\n# python -m numpy.run_tests <args>\n# But, python2.4's -m switch only works with top-level modules, not modules\n# that are inside packages. So, once we drop 2.4 support, maybe...\n\nimport sys, os\n# In case we are run from the source directory, we don't want to import numpy\n# from there, we want to import the installed version:\nsys.path.pop(0)\n\nfrom optparse import OptionParser\nparser = OptionParser(\"usage: %prog [options] -- [nosetests options]\")\nparser.add_option(\"-v\", \"--verbose\",\n action=\"count\", dest=\"verbose\", default=1,\n help=\"increase verbosity\")\nparser.add_option(\"--doctests\",\n action=\"store_true\", dest=\"doctests\", default=False,\n help=\"Run doctests in module\")\nparser.add_option(\"--coverage\",\n action=\"store_true\", dest=\"coverage\", default=False,\n help=\"report coverage of NumPy code (requires 'pytest-cov' module\")\nparser.add_option(\"-m\", \"--mode\",\n action=\"store\", dest=\"mode\", default=\"fast\",\n help=\"'fast', 'full', or something that could be \"\n \"passed to pytest [default: %default]\")\nparser.add_option(\"-n\", \"--durations\",\n dest=\"durations\", default=-1,\n help=\"show time to run slowest N tests [default: -1]\")\n(options, args) = parser.parse_args()\n\nimport numpy\n\n# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.\n# The same flags check is also used in the tests to switch behavior.\nif (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\"):\n if not numpy.ones((10, 1), order='C').flags.f_contiguous:\n print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')\n sys.exit(1)\nelif numpy.ones((10, 1), order='C').flags.f_contiguous:\n print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')\n sys.exit(1)\n\nif options.coverage:\n # Produce code coverage XML report for codecov.io\n args += [\"--cov-report=xml\"]\n\nresult = numpy.test(options.mode,\n verbose=options.verbose,\n extra_argv=args,\n doctests=options.doctests,\n durations=int(options.durations),\n coverage=options.coverage)\n\nif result:\n sys.exit(0)\nelse:\n sys.exit(1)\n", "path": "tools/test-installed-numpy.py"}]} | 1,456 | 739 |
gh_patches_debug_35722 | rasdani/github-patches | git_diff | mdn__kuma-7782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[regression] Multi-locale search is not searching in all locales
**Summary**
http://localhost.org:8000/api/v1/search?q=video&locale=fr&locale=de
actually searches on `locale == ['de']` and
http://localhost.org:8000/api/v1/search?q=video&locale=de&locale=fr actually searches on `locale== ['fr']`.
Originally from here:
https://github.com/mdn/yari/pull/1473#pullrequestreview-584750752
</issue>
<code>
[start of kuma/api/v1/search/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.utils.datastructures import MultiValueDict
4
5
6 class SearchForm(forms.Form):
7 q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)
8 locale = forms.MultipleChoiceField(
9 required=False,
10 # The `settings.LANGUAGES` looks like this:
11 # [('en-US', 'English (US)'), ...]
12 # But all locales are stored in lowercase in Elasticsearch, so
13 # force everything to lowercase.
14 choices=[(code.lower(), name) for code, name in settings.LANGUAGES],
15 )
16
17 SORT_CHOICES = ("best", "relevance", "popularity")
18 sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])
19
20 ARCHIVE_CHOICES = ("exclude", "include", "only")
21 archive = forms.ChoiceField(
22 required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]
23 )
24
25 size = forms.IntegerField(required=True, min_value=1, max_value=100)
26 page = forms.IntegerField(required=True, min_value=1, max_value=10)
27
28 def __init__(self, data, **kwargs):
29 initial = kwargs.get("initial", {})
30 # This makes it possible to supply `initial={some dict}` to the form
31 # and have its values become part of the default. Normally, in Django,
32 # the `SomeForm(data, initial={...})` is just used to prepopulate the
33 # HTML generated form widgets.
34 # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered
35 data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})
36
37 # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`
38 # we can't edit it there. So instead, we mutate it here in the `data`
39 if "locale" in data:
40 # Always force it to lowercase, because that's what the ChoiceField
41 # is configured to. And the searches should always be in lower case.
42 # Remember, Django forms will allow this to be a single string
43 # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).
44 if isinstance(data["locale"], str):
45 data["locale"] = data["locale"].lower()
46 else:
47 data["locale"] = [x.lower() for x in data["locale"]]
48
49 # If, for keys we have an initial value for, it was passed an empty string,
50 # then swap it for the initial value.
51 # For example `?q=searching&page=` you probably meant to omit it
52 # but "allowing" it to be an empty string makes it convenient for the client.
53 for key, values in data.items():
54 if key in initial and values == "":
55 data[key] = initial[key]
56
57 super().__init__(data, **kwargs)
58
[end of kuma/api/v1/search/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/api/v1/search/forms.py b/kuma/api/v1/search/forms.py
--- a/kuma/api/v1/search/forms.py
+++ b/kuma/api/v1/search/forms.py
@@ -3,15 +3,26 @@
from django.utils.datastructures import MultiValueDict
+class MultipleChoiceFieldICase(forms.MultipleChoiceField):
+ """Just like forms.MultipleChoiceField but everything's case insentive.
+
+ For simplicity, this field assumes that each choice is a tuple where
+ the first element is always a string.
+ """
+
+ def valid_value(self, value):
+ return str(value).lower() in [x[0].lower() for x in self.choices]
+
+
class SearchForm(forms.Form):
q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)
- locale = forms.MultipleChoiceField(
+ locale = MultipleChoiceFieldICase(
required=False,
# The `settings.LANGUAGES` looks like this:
# [('en-US', 'English (US)'), ...]
# But all locales are stored in lowercase in Elasticsearch, so
# force everything to lowercase.
- choices=[(code.lower(), name) for code, name in settings.LANGUAGES],
+ choices=[(code, name) for code, name in settings.LANGUAGES],
)
SORT_CHOICES = ("best", "relevance", "popularity")
@@ -34,18 +45,6 @@
# See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered
data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})
- # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`
- # we can't edit it there. So instead, we mutate it here in the `data`
- if "locale" in data:
- # Always force it to lowercase, because that's what the ChoiceField
- # is configured to. And the searches should always be in lower case.
- # Remember, Django forms will allow this to be a single string
- # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).
- if isinstance(data["locale"], str):
- data["locale"] = data["locale"].lower()
- else:
- data["locale"] = [x.lower() for x in data["locale"]]
-
# If, for keys we have an initial value for, it was passed an empty string,
# then swap it for the initial value.
# For example `?q=searching&page=` you probably meant to omit it
| {"golden_diff": "diff --git a/kuma/api/v1/search/forms.py b/kuma/api/v1/search/forms.py\n--- a/kuma/api/v1/search/forms.py\n+++ b/kuma/api/v1/search/forms.py\n@@ -3,15 +3,26 @@\n from django.utils.datastructures import MultiValueDict\n \n \n+class MultipleChoiceFieldICase(forms.MultipleChoiceField):\n+ \"\"\"Just like forms.MultipleChoiceField but everything's case insentive.\n+\n+ For simplicity, this field assumes that each choice is a tuple where\n+ the first element is always a string.\n+ \"\"\"\n+\n+ def valid_value(self, value):\n+ return str(value).lower() in [x[0].lower() for x in self.choices]\n+\n+\n class SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n- locale = forms.MultipleChoiceField(\n+ locale = MultipleChoiceFieldICase(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n- choices=[(code.lower(), name) for code, name in settings.LANGUAGES],\n+ choices=[(code, name) for code, name in settings.LANGUAGES],\n )\n \n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n@@ -34,18 +45,6 @@\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n \n- # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`\n- # we can't edit it there. So instead, we mutate it here in the `data`\n- if \"locale\" in data:\n- # Always force it to lowercase, because that's what the ChoiceField\n- # is configured to. And the searches should always be in lower case.\n- # Remember, Django forms will allow this to be a single string\n- # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).\n- if isinstance(data[\"locale\"], str):\n- data[\"locale\"] = data[\"locale\"].lower()\n- else:\n- data[\"locale\"] = [x.lower() for x in data[\"locale\"]]\n-\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n", "issue": "[regression] Multi-locale search is not searching in all locales\n**Summary**\r\nhttp://localhost.org:8000/api/v1/search?q=video&locale=fr&locale=de\r\nactually searches on `locale == ['de']` and \r\nhttp://localhost.org:8000/api/v1/search?q=video&locale=de&locale=fr actually searches on `locale== ['fr']`.\r\n\r\nOriginally from here:\r\nhttps://github.com/mdn/yari/pull/1473#pullrequestreview-584750752\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n locale = forms.MultipleChoiceField(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n choices=[(code.lower(), name) for code, name in settings.LANGUAGES],\n )\n\n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])\n\n ARCHIVE_CHOICES = (\"exclude\", \"include\", \"only\")\n archive = forms.ChoiceField(\n required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]\n )\n\n size = forms.IntegerField(required=True, min_value=1, max_value=100)\n page = forms.IntegerField(required=True, min_value=1, max_value=10)\n\n def __init__(self, data, **kwargs):\n initial = kwargs.get(\"initial\", {})\n # This makes it possible to supply `initial={some dict}` to the form\n # and have its values become part of the default. Normally, in Django,\n # the `SomeForm(data, initial={...})` is just used to prepopulate the\n # HTML generated form widgets.\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n\n # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`\n # we can't edit it there. So instead, we mutate it here in the `data`\n if \"locale\" in data:\n # Always force it to lowercase, because that's what the ChoiceField\n # is configured to. And the searches should always be in lower case.\n # Remember, Django forms will allow this to be a single string\n # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).\n if isinstance(data[\"locale\"], str):\n data[\"locale\"] = data[\"locale\"].lower()\n else:\n data[\"locale\"] = [x.lower() for x in data[\"locale\"]]\n\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n # but \"allowing\" it to be an empty string makes it convenient for the client.\n for key, values in data.items():\n if key in initial and values == \"\":\n data[key] = initial[key]\n\n super().__init__(data, **kwargs)\n", "path": "kuma/api/v1/search/forms.py"}]} | 1,430 | 603 |
gh_patches_debug_25727 | rasdani/github-patches | git_diff | buildbot__buildbot-185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix bug: GitPoller is never activated on nine
a851083 is not a part of this PR.
I merge upstream/nine in my branch and this commit was added to this PR.
Fix bug: GitPoller is never activated on nine
a851083 is not a part of this PR.
I merge upstream/nine in my branch and this commit was added to this PR.
</issue>
<code>
[start of master/buildbot/steps/trigger.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from buildbot.process.buildstep import LoggingBuildStep, SUCCESS, FAILURE, EXCEPTION
17 from buildbot.process.properties import Properties
18 from buildbot.schedulers.triggerable import Triggerable
19 from twisted.python import log
20 from twisted.internet import defer
21
22 class Trigger(LoggingBuildStep):
23 """I trigger a scheduler.Triggerable, to use one or more Builders as if
24 they were a single buildstep (like a subroutine call).
25 """
26 name = "trigger"
27
28 renderables = [ 'set_properties', 'schedulerNames', 'sourceStamp' ]
29
30 flunkOnFailure = True
31
32 def __init__(self, schedulerNames=[], sourceStamp=None, updateSourceStamp=None, alwaysUseLatest=False,
33 waitForFinish=False, set_properties={}, copy_properties=[], **kwargs):
34 """
35 Trigger the given schedulers when this step is executed.
36
37 @param schedulerNames: A list of scheduler names that should be
38 triggered. Schedulers can be specified using
39 WithProperties, if desired.
40
41 @param sourceStamp: A dict containing the source stamp to use for the
42 build. Keys must include branch, revision, repository and
43 project. In addition, patch_body, patch_level, and
44 patch_subdir can be specified. Only one of
45 sourceStamp, updateSourceStamp and alwaysUseLatest
46 can be specified. Any of these can be specified using
47 WithProperties, if desired.
48
49 @param updateSourceStamp: If True (the default), I will try to give
50 the schedulers an absolute SourceStamp for
51 their builds, so that a HEAD build will use
52 the same revision even if more changes have
53 occurred since my build's update step was
54 run. If False, I will use the original
55 SourceStamp unmodified.
56
57 @param alwaysUseLatest: If False (the default), I will give the
58 SourceStamp of the current build to the
59 schedulers (as controled by updateSourceStamp).
60 If True, I will give the schedulers an empty
61 SourceStamp, corresponding to the latest
62 revision.
63
64 @param waitForFinish: If False (the default), this step will finish
65 as soon as I've started the triggered
66 schedulers. If True, I will wait until all of
67 the triggered schedulers have finished their
68 builds.
69
70 @param set_properties: A dictionary of properties to set for any
71 builds resulting from this trigger. These
72 properties will override properties set in the
73 Triggered scheduler's constructor.
74
75 @param copy_properties: a list of property names to copy verbatim
76 into any builds resulting from this trigger.
77
78 """
79 assert schedulerNames, "You must specify a scheduler to trigger"
80 if sourceStamp and updateSourceStamp:
81 raise ValueError("You can't specify both sourceStamp and updateSourceStamp")
82 if sourceStamp and alwaysUseLatest:
83 raise ValueError("You can't specify both sourceStamp and alwaysUseLatest")
84 if alwaysUseLatest and updateSourceStamp:
85 raise ValueError("You can't specify both alwaysUseLatest and updateSourceStamp")
86 self.schedulerNames = schedulerNames
87 self.sourceStamp = sourceStamp
88 self.updateSourceStamp = updateSourceStamp or not (alwaysUseLatest or sourceStamp)
89 self.alwaysUseLatest = alwaysUseLatest
90 self.waitForFinish = waitForFinish
91 self.set_properties = set_properties
92 self.copy_properties = copy_properties
93 self.running = False
94 self.ended = False
95 LoggingBuildStep.__init__(self, **kwargs)
96 self.addFactoryArguments(schedulerNames=schedulerNames,
97 sourceStamp=sourceStamp,
98 updateSourceStamp=updateSourceStamp,
99 alwaysUseLatest=alwaysUseLatest,
100 waitForFinish=waitForFinish,
101 set_properties=set_properties,
102 copy_properties=copy_properties)
103
104 def interrupt(self, reason):
105 if self.running:
106 self.step_status.setText(["interrupted"])
107 return self.end(EXCEPTION)
108
109 def end(self, result):
110 if not self.ended:
111 self.ended = True
112 return self.finished(result)
113
114 def start(self):
115 properties = self.build.getProperties()
116
117 # make a new properties object from a dict rendered by the old
118 # properties object
119 props_to_set = Properties()
120 props_to_set.update(self.set_properties, "Trigger")
121 for p in self.copy_properties:
122 if p not in properties:
123 continue
124 props_to_set.setProperty(p, properties[p],
125 "%s (in triggering build)" % properties.getPropertySource(p))
126
127 self.running = True
128
129 # (is there an easier way to find the BuildMaster?)
130 all_schedulers = self.build.builder.botmaster.parent.allSchedulers()
131 all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])
132 unknown_schedulers = []
133 triggered_schedulers = []
134
135 # TODO: don't fire any schedulers if we discover an unknown one
136 for scheduler in self.schedulerNames:
137 scheduler = scheduler
138 if all_schedulers.has_key(scheduler):
139 sch = all_schedulers[scheduler]
140 if isinstance(sch, Triggerable):
141 triggered_schedulers.append(scheduler)
142 else:
143 unknown_schedulers.append(scheduler)
144 else:
145 unknown_schedulers.append(scheduler)
146
147 if unknown_schedulers:
148 self.step_status.setText(['no scheduler:'] + unknown_schedulers)
149 return self.end(FAILURE)
150
151 master = self.build.builder.botmaster.parent # seriously?!
152 if self.sourceStamp:
153 d = master.db.sourcestamps.addSourceStamp(**self.sourceStamp)
154 elif self.alwaysUseLatest:
155 d = defer.succeed(None)
156 else:
157 ss = self.build.getSourceStamp()
158 if self.updateSourceStamp:
159 got = properties.getProperty('got_revision')
160 if got:
161 ss = ss.getAbsoluteSourceStamp(got)
162 d = ss.getSourceStampId(master)
163 def start_builds(ssid):
164 dl = []
165 for scheduler in triggered_schedulers:
166 sch = all_schedulers[scheduler]
167 dl.append(sch.trigger(ssid, set_props=props_to_set))
168 self.step_status.setText(['triggered'] + triggered_schedulers)
169
170 d = defer.DeferredList(dl, consumeErrors=1)
171 if self.waitForFinish:
172 return d
173 else:
174 # do something to handle errors
175 d.addErrback(log.err,
176 '(ignored) while invoking Triggerable schedulers:')
177 self.end(SUCCESS)
178 return None
179 d.addCallback(start_builds)
180
181 def cb(rclist):
182 rc = SUCCESS # (this rc is not the same variable as that above)
183 for was_cb, results in rclist:
184 # TODO: make this algo more configurable
185 if not was_cb:
186 rc = EXCEPTION
187 log.err(results)
188 break
189 if results == FAILURE:
190 rc = FAILURE
191 return self.end(rc)
192 def eb(why):
193 return self.end(FAILURE)
194
195 if self.waitForFinish:
196 d.addCallbacks(cb, eb)
197
198 d.addErrback(log.err, '(ignored) while triggering builds:')
199
[end of master/buildbot/steps/trigger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/steps/trigger.py b/master/buildbot/steps/trigger.py
--- a/master/buildbot/steps/trigger.py
+++ b/master/buildbot/steps/trigger.py
@@ -77,15 +77,18 @@
"""
assert schedulerNames, "You must specify a scheduler to trigger"
- if sourceStamp and updateSourceStamp:
+ if sourceStamp and (updateSourceStamp is not None):
raise ValueError("You can't specify both sourceStamp and updateSourceStamp")
if sourceStamp and alwaysUseLatest:
raise ValueError("You can't specify both sourceStamp and alwaysUseLatest")
- if alwaysUseLatest and updateSourceStamp:
+ if alwaysUseLatest and (updateSourceStamp is not None):
raise ValueError("You can't specify both alwaysUseLatest and updateSourceStamp")
self.schedulerNames = schedulerNames
self.sourceStamp = sourceStamp
- self.updateSourceStamp = updateSourceStamp or not (alwaysUseLatest or sourceStamp)
+ if updateSourceStamp is not None:
+ self.updateSourceStamp = updateSourceStamp
+ else:
+ self.updateSourceStamp = not (alwaysUseLatest or sourceStamp)
self.alwaysUseLatest = alwaysUseLatest
self.waitForFinish = waitForFinish
self.set_properties = set_properties
| {"golden_diff": "diff --git a/master/buildbot/steps/trigger.py b/master/buildbot/steps/trigger.py\n--- a/master/buildbot/steps/trigger.py\n+++ b/master/buildbot/steps/trigger.py\n@@ -77,15 +77,18 @@\n \n \"\"\"\n assert schedulerNames, \"You must specify a scheduler to trigger\"\n- if sourceStamp and updateSourceStamp:\n+ if sourceStamp and (updateSourceStamp is not None):\n raise ValueError(\"You can't specify both sourceStamp and updateSourceStamp\")\n if sourceStamp and alwaysUseLatest:\n raise ValueError(\"You can't specify both sourceStamp and alwaysUseLatest\")\n- if alwaysUseLatest and updateSourceStamp:\n+ if alwaysUseLatest and (updateSourceStamp is not None):\n raise ValueError(\"You can't specify both alwaysUseLatest and updateSourceStamp\")\n self.schedulerNames = schedulerNames\n self.sourceStamp = sourceStamp\n- self.updateSourceStamp = updateSourceStamp or not (alwaysUseLatest or sourceStamp)\n+ if updateSourceStamp is not None:\n+ self.updateSourceStamp = updateSourceStamp\n+ else:\n+ self.updateSourceStamp = not (alwaysUseLatest or sourceStamp)\n self.alwaysUseLatest = alwaysUseLatest\n self.waitForFinish = waitForFinish\n self.set_properties = set_properties\n", "issue": "Fix bug: GitPoller is never activated on nine\n a851083 is not a part of this PR. \nI merge upstream/nine in my branch and this commit was added to this PR.\n\nFix bug: GitPoller is never activated on nine\n a851083 is not a part of this PR. \nI merge upstream/nine in my branch and this commit was added to this PR.\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom buildbot.process.buildstep import LoggingBuildStep, SUCCESS, FAILURE, EXCEPTION\nfrom buildbot.process.properties import Properties\nfrom buildbot.schedulers.triggerable import Triggerable\nfrom twisted.python import log\nfrom twisted.internet import defer\n\nclass Trigger(LoggingBuildStep):\n \"\"\"I trigger a scheduler.Triggerable, to use one or more Builders as if\n they were a single buildstep (like a subroutine call).\n \"\"\"\n name = \"trigger\"\n\n renderables = [ 'set_properties', 'schedulerNames', 'sourceStamp' ]\n\n flunkOnFailure = True\n\n def __init__(self, schedulerNames=[], sourceStamp=None, updateSourceStamp=None, alwaysUseLatest=False,\n waitForFinish=False, set_properties={}, copy_properties=[], **kwargs):\n \"\"\"\n Trigger the given schedulers when this step is executed.\n\n @param schedulerNames: A list of scheduler names that should be\n triggered. Schedulers can be specified using\n WithProperties, if desired.\n\n @param sourceStamp: A dict containing the source stamp to use for the\n build. Keys must include branch, revision, repository and\n project. In addition, patch_body, patch_level, and\n patch_subdir can be specified. Only one of\n sourceStamp, updateSourceStamp and alwaysUseLatest\n can be specified. Any of these can be specified using\n WithProperties, if desired.\n\n @param updateSourceStamp: If True (the default), I will try to give\n the schedulers an absolute SourceStamp for\n their builds, so that a HEAD build will use\n the same revision even if more changes have\n occurred since my build's update step was\n run. If False, I will use the original\n SourceStamp unmodified.\n\n @param alwaysUseLatest: If False (the default), I will give the\n SourceStamp of the current build to the\n schedulers (as controled by updateSourceStamp).\n If True, I will give the schedulers an empty\n SourceStamp, corresponding to the latest\n revision.\n\n @param waitForFinish: If False (the default), this step will finish\n as soon as I've started the triggered\n schedulers. If True, I will wait until all of\n the triggered schedulers have finished their\n builds.\n\n @param set_properties: A dictionary of properties to set for any\n builds resulting from this trigger. These\n properties will override properties set in the\n Triggered scheduler's constructor.\n\n @param copy_properties: a list of property names to copy verbatim\n into any builds resulting from this trigger.\n\n \"\"\"\n assert schedulerNames, \"You must specify a scheduler to trigger\"\n if sourceStamp and updateSourceStamp:\n raise ValueError(\"You can't specify both sourceStamp and updateSourceStamp\")\n if sourceStamp and alwaysUseLatest:\n raise ValueError(\"You can't specify both sourceStamp and alwaysUseLatest\")\n if alwaysUseLatest and updateSourceStamp:\n raise ValueError(\"You can't specify both alwaysUseLatest and updateSourceStamp\")\n self.schedulerNames = schedulerNames\n self.sourceStamp = sourceStamp\n self.updateSourceStamp = updateSourceStamp or not (alwaysUseLatest or sourceStamp)\n self.alwaysUseLatest = alwaysUseLatest\n self.waitForFinish = waitForFinish\n self.set_properties = set_properties\n self.copy_properties = copy_properties\n self.running = False\n self.ended = False\n LoggingBuildStep.__init__(self, **kwargs)\n self.addFactoryArguments(schedulerNames=schedulerNames,\n sourceStamp=sourceStamp,\n updateSourceStamp=updateSourceStamp,\n alwaysUseLatest=alwaysUseLatest,\n waitForFinish=waitForFinish,\n set_properties=set_properties,\n copy_properties=copy_properties)\n\n def interrupt(self, reason):\n if self.running:\n self.step_status.setText([\"interrupted\"])\n return self.end(EXCEPTION)\n\n def end(self, result):\n if not self.ended:\n self.ended = True\n return self.finished(result)\n\n def start(self):\n properties = self.build.getProperties()\n\n # make a new properties object from a dict rendered by the old \n # properties object\n props_to_set = Properties()\n props_to_set.update(self.set_properties, \"Trigger\")\n for p in self.copy_properties:\n if p not in properties:\n continue\n props_to_set.setProperty(p, properties[p],\n \"%s (in triggering build)\" % properties.getPropertySource(p))\n\n self.running = True\n\n # (is there an easier way to find the BuildMaster?)\n all_schedulers = self.build.builder.botmaster.parent.allSchedulers()\n all_schedulers = dict([(sch.name, sch) for sch in all_schedulers])\n unknown_schedulers = []\n triggered_schedulers = []\n\n # TODO: don't fire any schedulers if we discover an unknown one\n for scheduler in self.schedulerNames:\n scheduler = scheduler\n if all_schedulers.has_key(scheduler):\n sch = all_schedulers[scheduler]\n if isinstance(sch, Triggerable):\n triggered_schedulers.append(scheduler)\n else:\n unknown_schedulers.append(scheduler)\n else:\n unknown_schedulers.append(scheduler)\n\n if unknown_schedulers:\n self.step_status.setText(['no scheduler:'] + unknown_schedulers)\n return self.end(FAILURE)\n\n master = self.build.builder.botmaster.parent # seriously?!\n if self.sourceStamp:\n d = master.db.sourcestamps.addSourceStamp(**self.sourceStamp)\n elif self.alwaysUseLatest:\n d = defer.succeed(None)\n else:\n ss = self.build.getSourceStamp()\n if self.updateSourceStamp:\n got = properties.getProperty('got_revision')\n if got:\n ss = ss.getAbsoluteSourceStamp(got)\n d = ss.getSourceStampId(master)\n def start_builds(ssid):\n dl = []\n for scheduler in triggered_schedulers:\n sch = all_schedulers[scheduler]\n dl.append(sch.trigger(ssid, set_props=props_to_set))\n self.step_status.setText(['triggered'] + triggered_schedulers)\n\n d = defer.DeferredList(dl, consumeErrors=1)\n if self.waitForFinish:\n return d\n else:\n # do something to handle errors\n d.addErrback(log.err,\n '(ignored) while invoking Triggerable schedulers:')\n self.end(SUCCESS)\n return None\n d.addCallback(start_builds)\n\n def cb(rclist):\n rc = SUCCESS # (this rc is not the same variable as that above)\n for was_cb, results in rclist:\n # TODO: make this algo more configurable\n if not was_cb:\n rc = EXCEPTION\n log.err(results)\n break\n if results == FAILURE:\n rc = FAILURE\n return self.end(rc)\n def eb(why):\n return self.end(FAILURE)\n\n if self.waitForFinish:\n d.addCallbacks(cb, eb)\n\n d.addErrback(log.err, '(ignored) while triggering builds:')\n", "path": "master/buildbot/steps/trigger.py"}]} | 2,798 | 287 |
gh_patches_debug_24202 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-57 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix project description on PyPI
PyPI requires both a description and long_description to be set, with the former being used for listing a project among others and the latter for the detailed project page.
</issue>
<code>
[start of setup.py]
1 from distutils.core import setup
2
3 setup(
4 name='heat',
5 packages=['heat'],
6 version='0.0.1',
7 description='A framework for high performance data analytics and machine learning.',
8 author='Helmholtz Association',
9 author_email='[email protected]',
10 url='https://github.com/helmholtz-analytics/heat',
11 # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD
12 keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],
13 classifiers=[],
14 install_requires=[
15 'numpy>=1.13.0',
16 # 'torch>=0.4.0'
17 ],
18 extras_require={
19 'hdf5': ['h5py>=2.8.0']
20 }
21 )
22
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,19 +1,28 @@
from distutils.core import setup
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
setup(
name='heat',
packages=['heat'],
version='0.0.1',
description='A framework for high performance data analytics and machine learning.',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
author='Helmholtz Association',
author_email='[email protected]',
url='https://github.com/helmholtz-analytics/heat',
- # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD
keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],
- classifiers=[],
+ classifiers=[
+ 'Development Status :: 2 - Pre-Alpha',
+ 'Programming Language :: Python :: 3.5',
+ 'License :: OSI Approved :: MIT License',
+ 'Intended Audience :: Science/Research',
+ 'Topic :: Scientific/Engineering'
+ ],
install_requires=[
'numpy>=1.13.0',
- # 'torch>=0.4.0'
],
extras_require={
'hdf5': ['h5py>=2.8.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,19 +1,28 @@\n from distutils.core import setup\n \n+with open(\"README.md\", \"r\") as fh:\n+ long_description = fh.read()\n+\n setup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n- # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n- classifiers=[],\n+ classifiers=[\n+ 'Development Status :: 2 - Pre-Alpha',\n+ 'Programming Language :: Python :: 3.5',\n+ 'License :: OSI Approved :: MIT License',\n+ 'Intended Audience :: Science/Research',\n+ 'Topic :: Scientific/Engineering'\n+ ],\n install_requires=[\n 'numpy>=1.13.0',\n- # 'torch>=0.4.0'\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n", "issue": "Fix project description on PyPI\nPyPI requires both a description and long_description to be set, with the former being used for listing a project among others and the latter for the detailed project page.\n", "before_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n classifiers=[],\n install_requires=[\n 'numpy>=1.13.0',\n # 'torch>=0.4.0'\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n }\n)\n", "path": "setup.py"}]} | 791 | 323 |
gh_patches_debug_13848 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1014 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
opentelemetry-instrumentation-urllib
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """
17 This library allows tracing HTTP requests made by the
18 `urllib https://docs.python.org/3/library/urllib.html>`_ library.
19
20 Usage
21 -----
22
23 .. code-block:: python
24
25 from urllib import request
26 from opentelemetry.instrumentation.urllib import URLLibInstrumentor
27
28 # You can optionally pass a custom TracerProvider to
29 # URLLibInstrumentor().instrument()
30
31 URLLibInstrumentor().instrument()
32 req = request.Request('https://postman-echo.com/post', method="POST")
33 r = request.urlopen(req)
34
35 Configuration
36 -------------
37
38 Request/Response hooks
39 **********************
40
41 The urllib instrumentation supports extending tracing behavior with the help of
42 request and response hooks. These are functions that are called back by the instrumentation
43 right after a Span is created for a request and right before the span is finished processing a response respectively.
44 The hooks can be configured as follows:
45
46 ..code:: python
47
48 # `request_obj` is an instance of urllib.request.Request
49 def request_hook(span, request_obj):
50 pass
51
52 # `request_obj` is an instance of urllib.request.Request
53 # `response` is an instance of http.client.HTTPResponse
54 def response_hook(span, request_obj, response)
55 pass
56
57 URLLibInstrumentor.instrument(
58 request_hook=request_hook, response_hook=response_hook)
59 )
60
61 API
62 ---
63 """
64
65 import functools
66 import types
67 import typing
68
69 # from urllib import response
70 from http import client
71 from typing import Collection
72 from urllib.request import ( # pylint: disable=no-name-in-module,import-error
73 OpenerDirector,
74 Request,
75 )
76
77 from opentelemetry import context
78 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
79 from opentelemetry.instrumentation.urllib.package import _instruments
80 from opentelemetry.instrumentation.urllib.version import __version__
81 from opentelemetry.instrumentation.utils import (
82 _SUPPRESS_INSTRUMENTATION_KEY,
83 http_status_to_status_code,
84 )
85 from opentelemetry.propagate import inject
86 from opentelemetry.semconv.trace import SpanAttributes
87 from opentelemetry.trace import Span, SpanKind, get_tracer
88 from opentelemetry.trace.status import Status
89 from opentelemetry.util.http import remove_url_credentials
90
91 # A key to a context variable to avoid creating duplicate spans when instrumenting
92 # both, Session.request and Session.send, since Session.request calls into Session.send
93 _SUPPRESS_HTTP_INSTRUMENTATION_KEY = context.create_key(
94 "suppress_http_instrumentation"
95 )
96
97 _RequestHookT = typing.Optional[typing.Callable[[Span, Request], None]]
98 _ResponseHookT = typing.Optional[
99 typing.Callable[[Span, Request, client.HTTPResponse], None]
100 ]
101
102
103 class URLLibInstrumentor(BaseInstrumentor):
104 """An instrumentor for urllib
105 See `BaseInstrumentor`
106 """
107
108 def instrumentation_dependencies(self) -> Collection[str]:
109 return _instruments
110
111 def _instrument(self, **kwargs):
112 """Instruments urllib module
113
114 Args:
115 **kwargs: Optional arguments
116 ``tracer_provider``: a TracerProvider, defaults to global
117 ``request_hook``: An optional callback invoked that is invoked right after a span is created.
118 ``response_hook``: An optional callback which is invoked right before the span is finished processing a response
119 """
120 tracer_provider = kwargs.get("tracer_provider")
121 tracer = get_tracer(__name__, __version__, tracer_provider)
122 _instrument(
123 tracer,
124 request_hook=kwargs.get("request_hook"),
125 response_hook=kwargs.get("response_hook"),
126 )
127
128 def _uninstrument(self, **kwargs):
129 _uninstrument()
130
131 def uninstrument_opener(
132 self, opener: OpenerDirector
133 ): # pylint: disable=no-self-use
134 """uninstrument_opener a specific instance of urllib.request.OpenerDirector"""
135 _uninstrument_from(opener, restore_as_bound_func=True)
136
137
138 def _instrument(
139 tracer,
140 request_hook: _RequestHookT = None,
141 response_hook: _ResponseHookT = None,
142 ):
143 """Enables tracing of all requests calls that go through
144 :code:`urllib.Client._make_request`"""
145
146 opener_open = OpenerDirector.open
147
148 @functools.wraps(opener_open)
149 def instrumented_open(opener, fullurl, data=None, timeout=None):
150
151 if isinstance(fullurl, str):
152 request_ = Request(fullurl, data)
153 else:
154 request_ = fullurl
155
156 def get_or_create_headers():
157 return getattr(request_, "headers", {})
158
159 def call_wrapped():
160 return opener_open(opener, request_, data=data, timeout=timeout)
161
162 return _instrumented_open_call(
163 opener, request_, call_wrapped, get_or_create_headers
164 )
165
166 def _instrumented_open_call(
167 _, request, call_wrapped, get_or_create_headers
168 ): # pylint: disable=too-many-locals
169 if context.get_value(
170 _SUPPRESS_INSTRUMENTATION_KEY
171 ) or context.get_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY):
172 return call_wrapped()
173
174 method = request.get_method().upper()
175 url = request.full_url
176
177 span_name = f"HTTP {method}".strip()
178
179 url = remove_url_credentials(url)
180
181 labels = {
182 SpanAttributes.HTTP_METHOD: method,
183 SpanAttributes.HTTP_URL: url,
184 }
185
186 with tracer.start_as_current_span(
187 span_name, kind=SpanKind.CLIENT
188 ) as span:
189 exception = None
190 if callable(request_hook):
191 request_hook(span, request)
192 if span.is_recording():
193 span.set_attribute(SpanAttributes.HTTP_METHOD, method)
194 span.set_attribute(SpanAttributes.HTTP_URL, url)
195
196 headers = get_or_create_headers()
197 inject(headers)
198
199 token = context.attach(
200 context.set_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY, True)
201 )
202 try:
203 result = call_wrapped() # *** PROCEED
204 except Exception as exc: # pylint: disable=W0703
205 exception = exc
206 result = getattr(exc, "file", None)
207 finally:
208 context.detach(token)
209
210 if result is not None:
211
212 code_ = result.getcode()
213 labels[SpanAttributes.HTTP_STATUS_CODE] = str(code_)
214
215 if span.is_recording():
216 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, code_)
217 span.set_status(Status(http_status_to_status_code(code_)))
218
219 ver_ = str(getattr(result, "version", ""))
220 if ver_:
221 labels[
222 SpanAttributes.HTTP_FLAVOR
223 ] = f"{ver_[:1]}.{ver_[:-1]}"
224
225 if callable(response_hook):
226 response_hook(span, request, result)
227
228 if exception is not None:
229 raise exception.with_traceback(exception.__traceback__)
230
231 return result
232
233 instrumented_open.opentelemetry_instrumentation_urllib_applied = True
234 OpenerDirector.open = instrumented_open
235
236
237 def _uninstrument():
238 """Disables instrumentation of :code:`urllib` through this module.
239
240 Note that this only works if no other module also patches urllib."""
241 _uninstrument_from(OpenerDirector)
242
243
244 def _uninstrument_from(instr_root, restore_as_bound_func=False):
245
246 instr_func_name = "open"
247 instr_func = getattr(instr_root, instr_func_name)
248 if not getattr(
249 instr_func,
250 "opentelemetry_instrumentation_urllib_applied",
251 False,
252 ):
253 return
254
255 original = instr_func.__wrapped__ # pylint:disable=no-member
256 if restore_as_bound_func:
257 original = types.MethodType(original, instr_root)
258 setattr(instr_root, instr_func_name, original)
259
[end of instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py
@@ -184,14 +184,11 @@
}
with tracer.start_as_current_span(
- span_name, kind=SpanKind.CLIENT
+ span_name, kind=SpanKind.CLIENT, attributes=labels
) as span:
exception = None
if callable(request_hook):
request_hook(span, request)
- if span.is_recording():
- span.set_attribute(SpanAttributes.HTTP_METHOD, method)
- span.set_attribute(SpanAttributes.HTTP_URL, url)
headers = get_or_create_headers()
inject(headers)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py\n@@ -184,14 +184,11 @@\n }\n \n with tracer.start_as_current_span(\n- span_name, kind=SpanKind.CLIENT\n+ span_name, kind=SpanKind.CLIENT, attributes=labels\n ) as span:\n exception = None\n if callable(request_hook):\n request_hook(span, request)\n- if span.is_recording():\n- span.set_attribute(SpanAttributes.HTTP_METHOD, method)\n- span.set_attribute(SpanAttributes.HTTP_URL, url)\n \n headers = get_or_create_headers()\n inject(headers)\n", "issue": "opentelemetry-instrumentation-urllib\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nThis library allows tracing HTTP requests made by the\n`urllib https://docs.python.org/3/library/urllib.html>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from urllib import request\n from opentelemetry.instrumentation.urllib import URLLibInstrumentor\n\n # You can optionally pass a custom TracerProvider to\n # URLLibInstrumentor().instrument()\n\n URLLibInstrumentor().instrument()\n req = request.Request('https://postman-echo.com/post', method=\"POST\")\n r = request.urlopen(req)\n\nConfiguration\n-------------\n\nRequest/Response hooks\n**********************\n\nThe urllib instrumentation supports extending tracing behavior with the help of\nrequest and response hooks. These are functions that are called back by the instrumentation\nright after a Span is created for a request and right before the span is finished processing a response respectively.\nThe hooks can be configured as follows:\n\n..code:: python\n\n # `request_obj` is an instance of urllib.request.Request\n def request_hook(span, request_obj):\n pass\n\n # `request_obj` is an instance of urllib.request.Request\n # `response` is an instance of http.client.HTTPResponse\n def response_hook(span, request_obj, response)\n pass\n\n URLLibInstrumentor.instrument(\n request_hook=request_hook, response_hook=response_hook)\n )\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport types\nimport typing\n\n# from urllib import response\nfrom http import client\nfrom typing import Collection\nfrom urllib.request import ( # pylint: disable=no-name-in-module,import-error\n OpenerDirector,\n Request,\n)\n\nfrom opentelemetry import context\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.urllib.package import _instruments\nfrom opentelemetry.instrumentation.urllib.version import __version__\nfrom opentelemetry.instrumentation.utils import (\n _SUPPRESS_INSTRUMENTATION_KEY,\n http_status_to_status_code,\n)\nfrom opentelemetry.propagate import inject\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import Span, SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status\nfrom opentelemetry.util.http import remove_url_credentials\n\n# A key to a context variable to avoid creating duplicate spans when instrumenting\n# both, Session.request and Session.send, since Session.request calls into Session.send\n_SUPPRESS_HTTP_INSTRUMENTATION_KEY = context.create_key(\n \"suppress_http_instrumentation\"\n)\n\n_RequestHookT = typing.Optional[typing.Callable[[Span, Request], None]]\n_ResponseHookT = typing.Optional[\n typing.Callable[[Span, Request, client.HTTPResponse], None]\n]\n\n\nclass URLLibInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for urllib\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Instruments urllib module\n\n Args:\n **kwargs: Optional arguments\n ``tracer_provider``: a TracerProvider, defaults to global\n ``request_hook``: An optional callback invoked that is invoked right after a span is created.\n ``response_hook``: An optional callback which is invoked right before the span is finished processing a response\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n _instrument(\n tracer,\n request_hook=kwargs.get(\"request_hook\"),\n response_hook=kwargs.get(\"response_hook\"),\n )\n\n def _uninstrument(self, **kwargs):\n _uninstrument()\n\n def uninstrument_opener(\n self, opener: OpenerDirector\n ): # pylint: disable=no-self-use\n \"\"\"uninstrument_opener a specific instance of urllib.request.OpenerDirector\"\"\"\n _uninstrument_from(opener, restore_as_bound_func=True)\n\n\ndef _instrument(\n tracer,\n request_hook: _RequestHookT = None,\n response_hook: _ResponseHookT = None,\n):\n \"\"\"Enables tracing of all requests calls that go through\n :code:`urllib.Client._make_request`\"\"\"\n\n opener_open = OpenerDirector.open\n\n @functools.wraps(opener_open)\n def instrumented_open(opener, fullurl, data=None, timeout=None):\n\n if isinstance(fullurl, str):\n request_ = Request(fullurl, data)\n else:\n request_ = fullurl\n\n def get_or_create_headers():\n return getattr(request_, \"headers\", {})\n\n def call_wrapped():\n return opener_open(opener, request_, data=data, timeout=timeout)\n\n return _instrumented_open_call(\n opener, request_, call_wrapped, get_or_create_headers\n )\n\n def _instrumented_open_call(\n _, request, call_wrapped, get_or_create_headers\n ): # pylint: disable=too-many-locals\n if context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ) or context.get_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY):\n return call_wrapped()\n\n method = request.get_method().upper()\n url = request.full_url\n\n span_name = f\"HTTP {method}\".strip()\n\n url = remove_url_credentials(url)\n\n labels = {\n SpanAttributes.HTTP_METHOD: method,\n SpanAttributes.HTTP_URL: url,\n }\n\n with tracer.start_as_current_span(\n span_name, kind=SpanKind.CLIENT\n ) as span:\n exception = None\n if callable(request_hook):\n request_hook(span, request)\n if span.is_recording():\n span.set_attribute(SpanAttributes.HTTP_METHOD, method)\n span.set_attribute(SpanAttributes.HTTP_URL, url)\n\n headers = get_or_create_headers()\n inject(headers)\n\n token = context.attach(\n context.set_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY, True)\n )\n try:\n result = call_wrapped() # *** PROCEED\n except Exception as exc: # pylint: disable=W0703\n exception = exc\n result = getattr(exc, \"file\", None)\n finally:\n context.detach(token)\n\n if result is not None:\n\n code_ = result.getcode()\n labels[SpanAttributes.HTTP_STATUS_CODE] = str(code_)\n\n if span.is_recording():\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, code_)\n span.set_status(Status(http_status_to_status_code(code_)))\n\n ver_ = str(getattr(result, \"version\", \"\"))\n if ver_:\n labels[\n SpanAttributes.HTTP_FLAVOR\n ] = f\"{ver_[:1]}.{ver_[:-1]}\"\n\n if callable(response_hook):\n response_hook(span, request, result)\n\n if exception is not None:\n raise exception.with_traceback(exception.__traceback__)\n\n return result\n\n instrumented_open.opentelemetry_instrumentation_urllib_applied = True\n OpenerDirector.open = instrumented_open\n\n\ndef _uninstrument():\n \"\"\"Disables instrumentation of :code:`urllib` through this module.\n\n Note that this only works if no other module also patches urllib.\"\"\"\n _uninstrument_from(OpenerDirector)\n\n\ndef _uninstrument_from(instr_root, restore_as_bound_func=False):\n\n instr_func_name = \"open\"\n instr_func = getattr(instr_root, instr_func_name)\n if not getattr(\n instr_func,\n \"opentelemetry_instrumentation_urllib_applied\",\n False,\n ):\n return\n\n original = instr_func.__wrapped__ # pylint:disable=no-member\n if restore_as_bound_func:\n original = types.MethodType(original, instr_root)\n setattr(instr_root, instr_func_name, original)\n", "path": "instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py"}]} | 3,055 | 239 |
gh_patches_debug_30137 | rasdani/github-patches | git_diff | goauthentik__authentik-7325 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sending recovery email fails -- `Object of type MIMEImage is not JSON serializable`
**Describe the bug**
Fails both from the admin UI using the `Send Recovery Link` button or via the `Forgot password` self-service recovery flow.
**To Reproduce**
1. Attempt to send a recovery email
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
From the admin UI:

Attempting to do it using recovery flow:

**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
File "/authentik/flows/views/executor.py", line 287, in get
stage_response = self.current_stage_view.dispatch(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py", line 143, in dispatch
return handler(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/email/stage.py", line 142, in get
self.send_email()
File "/authentik/stages/email/stage.py", line 121, in send_email
send_mails(current_stage, message)
File "/authentik/stages/email/tasks.py", line 26, in send_mails
promise = lazy_group()
^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py", line 1568, in __call__
return self.apply_async(partial_args, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py", line 1598, in apply_async
results = list(self._apply_tasks(tasks, producer, app, p,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py", line 1775, in _apply_tasks
sig.apply_async(producer=producer, add_to_parent=False,
File "/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py", line 400, in apply_async
return _apply(args, kwargs, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/celery/app/task.py", line 594, in apply_async
return app.send_task(
^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/celery/app/base.py", line 798, in send_task
amqp.send_task_message(P, name, message, **options)
File "/ak-root/venv/lib/python3.11/site-packages/celery/app/amqp.py", line 517, in send_task_message
ret = producer.publish(
^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/kombu/messaging.py", line 175, in publish
body, content_type, content_encoding = self._prepare(
^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/kombu/messaging.py", line 268, in _prepare
body) = dumps(body, serializer=serializer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py", line 219, in dumps
with _reraise_errors(EncodeError):
File "/usr/local/lib/python3.11/contextlib.py", line 155, in __exit__
self.gen.throw(typ, value, traceback)
File "/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py", line 45, in _reraise_errors
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
File "/ak-root/venv/lib/python3.11/site-packages/kombu/exceptions.py", line 34, in reraise
raise value.with_traceback(tb)
File "/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py", line 41, in _reraise_errors
yield
File "/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py", line 220, in dumps
payload = encoder(data)
^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/kombu/utils/json.py", line 57, in dumps
return _dumps(s, cls=cls, **dict(default_kwargs, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/json/__init__.py", line 238, in dumps
**kw).encode(obj)
^^^^^^^^^^^
File "/usr/local/lib/python3.11/json/encoder.py", line 200, in encode
chunks = self.iterencode(o, _one_shot=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/json/encoder.py", line 258, in iterencode
return _iterencode(o, 0)
^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/kombu/utils/json.py", line 45, in default
return super().default(o)
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/json/encoder.py", line 180, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
kombu.exceptions.EncodeError: Object of type MIMEImage is not JSON serializable
```
**Version and Deployment (please complete the following information):**
- authentik version: 2023.10.1
- Deployment: docker-compose
</issue>
<code>
[start of authentik/stages/email/utils.py]
1 """email utils"""
2 from email.mime.image import MIMEImage
3 from functools import lru_cache
4 from pathlib import Path
5
6 from django.core.mail import EmailMultiAlternatives
7 from django.template.loader import render_to_string
8 from django.utils import translation
9
10
11 @lru_cache()
12 def logo_data():
13 """Get logo as MIME Image for emails"""
14 path = Path("web/icons/icon_left_brand.png")
15 if not path.exists():
16 path = Path("web/dist/assets/icons/icon_left_brand.png")
17 with open(path, "rb") as _logo_file:
18 logo = MIMEImage(_logo_file.read())
19 logo.add_header("Content-ID", "logo.png")
20 return logo
21
22
23 class TemplateEmailMessage(EmailMultiAlternatives):
24 """Wrapper around EmailMultiAlternatives with integrated template rendering"""
25
26 def __init__(self, template_name=None, template_context=None, language="", **kwargs):
27 with translation.override(language):
28 html_content = render_to_string(template_name, template_context)
29 super().__init__(**kwargs)
30 self.content_subtype = "html"
31 self.mixed_subtype = "related"
32 self.attach(logo_data())
33 self.attach_alternative(html_content, "text/html")
34
[end of authentik/stages/email/utils.py]
[start of authentik/stages/email/tasks.py]
1 """email stage tasks"""
2 from email.utils import make_msgid
3 from smtplib import SMTPException
4 from typing import Any, Optional
5
6 from celery import group
7 from django.core.mail import EmailMultiAlternatives
8 from django.core.mail.utils import DNS_NAME
9 from django.utils.text import slugify
10 from structlog.stdlib import get_logger
11
12 from authentik.events.models import Event, EventAction
13 from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
14 from authentik.root.celery import CELERY_APP
15 from authentik.stages.email.models import EmailStage
16
17 LOGGER = get_logger()
18
19
20 def send_mails(stage: EmailStage, *messages: list[EmailMultiAlternatives]):
21 """Wrapper to convert EmailMessage to dict and send it from worker"""
22 tasks = []
23 for message in messages:
24 tasks.append(send_mail.s(message.__dict__, stage.pk))
25 lazy_group = group(*tasks)
26 promise = lazy_group()
27 return promise
28
29
30 def get_email_body(email: EmailMultiAlternatives) -> str:
31 """Get the email's body. Will return HTML alt if set, otherwise plain text body"""
32 for alt_content, alt_type in email.alternatives:
33 if alt_type == "text/html":
34 return alt_content
35 return email.body
36
37
38 @CELERY_APP.task(
39 bind=True,
40 autoretry_for=(
41 SMTPException,
42 ConnectionError,
43 OSError,
44 ),
45 retry_backoff=True,
46 base=MonitoredTask,
47 )
48 def send_mail(self: MonitoredTask, message: dict[Any, Any], email_stage_pk: Optional[int] = None):
49 """Send Email for Email Stage. Retries are scheduled automatically."""
50 self.save_on_success = False
51 message_id = make_msgid(domain=DNS_NAME)
52 self.set_uid(slugify(message_id.replace(".", "_").replace("@", "_")))
53 try:
54 if not email_stage_pk:
55 stage: EmailStage = EmailStage(use_global_settings=True)
56 else:
57 stages = EmailStage.objects.filter(pk=email_stage_pk)
58 if not stages.exists():
59 self.set_status(
60 TaskResult(
61 TaskResultStatus.WARNING,
62 messages=["Email stage does not exist anymore. Discarding message."],
63 )
64 )
65 return
66 stage: EmailStage = stages.first()
67 try:
68 backend = stage.backend
69 except ValueError as exc:
70 LOGGER.warning("failed to get email backend", exc=exc)
71 self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
72 return
73 backend.open()
74 # Since django's EmailMessage objects are not JSON serialisable,
75 # we need to rebuild them from a dict
76 message_object = EmailMultiAlternatives()
77 for key, value in message.items():
78 setattr(message_object, key, value)
79 if not stage.use_global_settings:
80 message_object.from_email = stage.from_address
81 # Because we use the Message-ID as UID for the task, manually assign it
82 message_object.extra_headers["Message-ID"] = message_id
83
84 LOGGER.debug("Sending mail", to=message_object.to)
85 backend.send_messages([message_object])
86 Event.new(
87 EventAction.EMAIL_SENT,
88 message=f"Email to {', '.join(message_object.to)} sent",
89 subject=message_object.subject,
90 body=get_email_body(message_object),
91 from_email=message_object.from_email,
92 to_email=message_object.to,
93 ).save()
94 self.set_status(
95 TaskResult(
96 TaskResultStatus.SUCCESSFUL,
97 messages=["Successfully sent Mail."],
98 )
99 )
100 except (SMTPException, ConnectionError, OSError) as exc:
101 LOGGER.debug("Error sending email, retrying...", exc=exc)
102 self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
103 raise exc
104
[end of authentik/stages/email/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/stages/email/tasks.py b/authentik/stages/email/tasks.py
--- a/authentik/stages/email/tasks.py
+++ b/authentik/stages/email/tasks.py
@@ -13,6 +13,7 @@
from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
from authentik.root.celery import CELERY_APP
from authentik.stages.email.models import EmailStage
+from authentik.stages.email.utils import logo_data
LOGGER = get_logger()
@@ -81,6 +82,10 @@
# Because we use the Message-ID as UID for the task, manually assign it
message_object.extra_headers["Message-ID"] = message_id
+ # Add the logo (we can't add it in the previous message since MIMEImage
+ # can't be converted to json)
+ message_object.attach(logo_data())
+
LOGGER.debug("Sending mail", to=message_object.to)
backend.send_messages([message_object])
Event.new(
diff --git a/authentik/stages/email/utils.py b/authentik/stages/email/utils.py
--- a/authentik/stages/email/utils.py
+++ b/authentik/stages/email/utils.py
@@ -9,7 +9,7 @@
@lru_cache()
-def logo_data():
+def logo_data() -> MIMEImage:
"""Get logo as MIME Image for emails"""
path = Path("web/icons/icon_left_brand.png")
if not path.exists():
@@ -29,5 +29,4 @@
super().__init__(**kwargs)
self.content_subtype = "html"
self.mixed_subtype = "related"
- self.attach(logo_data())
self.attach_alternative(html_content, "text/html")
| {"golden_diff": "diff --git a/authentik/stages/email/tasks.py b/authentik/stages/email/tasks.py\n--- a/authentik/stages/email/tasks.py\n+++ b/authentik/stages/email/tasks.py\n@@ -13,6 +13,7 @@\n from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus\n from authentik.root.celery import CELERY_APP\n from authentik.stages.email.models import EmailStage\n+from authentik.stages.email.utils import logo_data\n \n LOGGER = get_logger()\n \n@@ -81,6 +82,10 @@\n # Because we use the Message-ID as UID for the task, manually assign it\n message_object.extra_headers[\"Message-ID\"] = message_id\n \n+ # Add the logo (we can't add it in the previous message since MIMEImage\n+ # can't be converted to json)\n+ message_object.attach(logo_data())\n+\n LOGGER.debug(\"Sending mail\", to=message_object.to)\n backend.send_messages([message_object])\n Event.new(\ndiff --git a/authentik/stages/email/utils.py b/authentik/stages/email/utils.py\n--- a/authentik/stages/email/utils.py\n+++ b/authentik/stages/email/utils.py\n@@ -9,7 +9,7 @@\n \n \n @lru_cache()\n-def logo_data():\n+def logo_data() -> MIMEImage:\n \"\"\"Get logo as MIME Image for emails\"\"\"\n path = Path(\"web/icons/icon_left_brand.png\")\n if not path.exists():\n@@ -29,5 +29,4 @@\n super().__init__(**kwargs)\n self.content_subtype = \"html\"\n self.mixed_subtype = \"related\"\n- self.attach(logo_data())\n self.attach_alternative(html_content, \"text/html\")\n", "issue": "sending recovery email fails -- `Object of type MIMEImage is not JSON serializable`\n**Describe the bug**\r\nFails both from the admin UI using the `Send Recovery Link` button or via the `Forgot password` self-service recovery flow.\r\n\r\n**To Reproduce**\r\n\r\n1. Attempt to send a recovery email\r\n4. See error\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nFrom the admin UI:\r\n\r\nAttempting to do it using recovery flow:\r\n\r\n\r\n**Logs**\r\n\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 287, in get\r\n stage_response = self.current_stage_view.dispatch(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py\", line 143, in dispatch\r\n return handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/email/stage.py\", line 142, in get\r\n self.send_email()\r\n File \"/authentik/stages/email/stage.py\", line 121, in send_email\r\n send_mails(current_stage, message)\r\n File \"/authentik/stages/email/tasks.py\", line 26, in send_mails\r\n promise = lazy_group()\r\n ^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py\", line 1568, in __call__\r\n return self.apply_async(partial_args, **options)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py\", line 1598, in apply_async\r\n results = list(self._apply_tasks(tasks, producer, app, p,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py\", line 1775, in _apply_tasks\r\n sig.apply_async(producer=producer, add_to_parent=False,\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/canvas.py\", line 400, in apply_async\r\n return _apply(args, kwargs, **options)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/app/task.py\", line 594, in apply_async\r\n return app.send_task(\r\n ^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/app/base.py\", line 798, in send_task\r\n amqp.send_task_message(P, name, message, **options)\r\n File \"/ak-root/venv/lib/python3.11/site-packages/celery/app/amqp.py\", line 517, in send_task_message\r\n ret = producer.publish(\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/messaging.py\", line 175, in publish\r\n body, content_type, content_encoding = self._prepare(\r\n ^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/messaging.py\", line 268, in _prepare\r\n body) = dumps(body, serializer=serializer)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py\", line 219, in dumps\r\n with _reraise_errors(EncodeError):\r\n File \"/usr/local/lib/python3.11/contextlib.py\", line 155, in __exit__\r\n self.gen.throw(typ, value, traceback)\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py\", line 45, in _reraise_errors\r\n reraise(wrapper, wrapper(exc), sys.exc_info()[2])\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/exceptions.py\", line 34, in reraise\r\n raise value.with_traceback(tb)\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py\", line 41, in _reraise_errors\r\n yield\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/serialization.py\", line 220, in dumps\r\n payload = encoder(data)\r\n ^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/utils/json.py\", line 57, in dumps\r\n return _dumps(s, cls=cls, **dict(default_kwargs, **kwargs))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n ^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/json/encoder.py\", line 200, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/json/encoder.py\", line 258, in iterencode\r\n return _iterencode(o, 0)\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/kombu/utils/json.py\", line 45, in default\r\n return super().default(o)\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/json/encoder.py\", line 180, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nkombu.exceptions.EncodeError: Object of type MIMEImage is not JSON serializable\r\n```\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.10.1\r\n- Deployment: docker-compose\r\n\n", "before_files": [{"content": "\"\"\"email utils\"\"\"\nfrom email.mime.image import MIMEImage\nfrom functools import lru_cache\nfrom pathlib import Path\n\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils import translation\n\n\n@lru_cache()\ndef logo_data():\n \"\"\"Get logo as MIME Image for emails\"\"\"\n path = Path(\"web/icons/icon_left_brand.png\")\n if not path.exists():\n path = Path(\"web/dist/assets/icons/icon_left_brand.png\")\n with open(path, \"rb\") as _logo_file:\n logo = MIMEImage(_logo_file.read())\n logo.add_header(\"Content-ID\", \"logo.png\")\n return logo\n\n\nclass TemplateEmailMessage(EmailMultiAlternatives):\n \"\"\"Wrapper around EmailMultiAlternatives with integrated template rendering\"\"\"\n\n def __init__(self, template_name=None, template_context=None, language=\"\", **kwargs):\n with translation.override(language):\n html_content = render_to_string(template_name, template_context)\n super().__init__(**kwargs)\n self.content_subtype = \"html\"\n self.mixed_subtype = \"related\"\n self.attach(logo_data())\n self.attach_alternative(html_content, \"text/html\")\n", "path": "authentik/stages/email/utils.py"}, {"content": "\"\"\"email stage tasks\"\"\"\nfrom email.utils import make_msgid\nfrom smtplib import SMTPException\nfrom typing import Any, Optional\n\nfrom celery import group\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.mail.utils import DNS_NAME\nfrom django.utils.text import slugify\nfrom structlog.stdlib import get_logger\n\nfrom authentik.events.models import Event, EventAction\nfrom authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus\nfrom authentik.root.celery import CELERY_APP\nfrom authentik.stages.email.models import EmailStage\n\nLOGGER = get_logger()\n\n\ndef send_mails(stage: EmailStage, *messages: list[EmailMultiAlternatives]):\n \"\"\"Wrapper to convert EmailMessage to dict and send it from worker\"\"\"\n tasks = []\n for message in messages:\n tasks.append(send_mail.s(message.__dict__, stage.pk))\n lazy_group = group(*tasks)\n promise = lazy_group()\n return promise\n\n\ndef get_email_body(email: EmailMultiAlternatives) -> str:\n \"\"\"Get the email's body. Will return HTML alt if set, otherwise plain text body\"\"\"\n for alt_content, alt_type in email.alternatives:\n if alt_type == \"text/html\":\n return alt_content\n return email.body\n\n\n@CELERY_APP.task(\n bind=True,\n autoretry_for=(\n SMTPException,\n ConnectionError,\n OSError,\n ),\n retry_backoff=True,\n base=MonitoredTask,\n)\ndef send_mail(self: MonitoredTask, message: dict[Any, Any], email_stage_pk: Optional[int] = None):\n \"\"\"Send Email for Email Stage. Retries are scheduled automatically.\"\"\"\n self.save_on_success = False\n message_id = make_msgid(domain=DNS_NAME)\n self.set_uid(slugify(message_id.replace(\".\", \"_\").replace(\"@\", \"_\")))\n try:\n if not email_stage_pk:\n stage: EmailStage = EmailStage(use_global_settings=True)\n else:\n stages = EmailStage.objects.filter(pk=email_stage_pk)\n if not stages.exists():\n self.set_status(\n TaskResult(\n TaskResultStatus.WARNING,\n messages=[\"Email stage does not exist anymore. Discarding message.\"],\n )\n )\n return\n stage: EmailStage = stages.first()\n try:\n backend = stage.backend\n except ValueError as exc:\n LOGGER.warning(\"failed to get email backend\", exc=exc)\n self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))\n return\n backend.open()\n # Since django's EmailMessage objects are not JSON serialisable,\n # we need to rebuild them from a dict\n message_object = EmailMultiAlternatives()\n for key, value in message.items():\n setattr(message_object, key, value)\n if not stage.use_global_settings:\n message_object.from_email = stage.from_address\n # Because we use the Message-ID as UID for the task, manually assign it\n message_object.extra_headers[\"Message-ID\"] = message_id\n\n LOGGER.debug(\"Sending mail\", to=message_object.to)\n backend.send_messages([message_object])\n Event.new(\n EventAction.EMAIL_SENT,\n message=f\"Email to {', '.join(message_object.to)} sent\",\n subject=message_object.subject,\n body=get_email_body(message_object),\n from_email=message_object.from_email,\n to_email=message_object.to,\n ).save()\n self.set_status(\n TaskResult(\n TaskResultStatus.SUCCESSFUL,\n messages=[\"Successfully sent Mail.\"],\n )\n )\n except (SMTPException, ConnectionError, OSError) as exc:\n LOGGER.debug(\"Error sending email, retrying...\", exc=exc)\n self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))\n raise exc\n", "path": "authentik/stages/email/tasks.py"}]} | 3,389 | 386 |
gh_patches_debug_15862 | rasdani/github-patches | git_diff | optuna__optuna-2265 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`study.trials_dataframe` call fails for multi-objective studies
When using the new multi-objective study framework in v2.4.0 (`optuna.create_study(..., directions=[...])`), calling the `trials_dataframe` message raises an exception caused by some incorrect column spec type comparison in the _dataframe framework. It appears the column aggregation for contains a bug when used with multiple objectives, which adds an extra `values` Tuple to the `values` Set during the initial mapping of `'value' -> 'values'` for a multi-objective study.
## Expected behavior
Accessing the `trials_dataframe` method of a study works the same for both single and multi-objective studies.
## Environment
- Optuna version: 2.3.0 -> 2.4.0
- Python version: 3.8
- OS: Ubuntu 20.04.1, Linux Kernel v5.4.0-56
## Error messages, stack traces, or logs
```
Traceback (most recent call last):
File "optuna_vis.py", line 12, in <module>
study_df = study.trials_dataframe()
File "/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/study.py", line 499, in trials_dataframe
return _trials_dataframe(self, attrs, multi_index)
File "/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py", line 80, in _trials_dataframe
columns: List[Tuple[str, str]] = sum(
File "/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py", line 81, in <genexpr>
(sorted(column_agg[k]) for k in attrs if k in column_agg), []
TypeError: '<' not supported between instances of 'str' and 'int'
```
By inspecting the `attr` field in the `_trials_dataframe` method (line 23, _dataframe.py), reveals an extra `values` item:
```
{'number': {('number', '')}, 'values': {('values', 1), ('values', ''), ('values', 0), ('values', 3), ('values', 2)}, 'datetime_start': {('datetime_start', '')}, 'datetime_complete': {('datetime_complete', '')}, 'duration': {('duration', '')}, 'params': {(<param_tuples>), ...}, 'user_attrs': {('user_attrs', 'total_x'), ('user_attrs', 'total_y'), ('user_attrs', '_time_'), ('user_attrs', 'value_total')}, 'system_attrs': {('system_attrs', 'nsga2:generation')}, 'state': {('state', '')}}
```
For context, I have defined 4 objectives in this study. After the first trial, there appears this `('values', '')` anomaly which causes the above exception.
## Steps to reproduce
1. Create a MPO study with 4 objectives.
2. Start your study and after at least 2 trials, try to call the `study.trials_dataframe()` method.
3. The above exception will be raised.
</issue>
<code>
[start of optuna/_dataframe.py]
1 import collections
2 from typing import Any
3 from typing import DefaultDict
4 from typing import Dict
5 from typing import List
6 from typing import Set
7 from typing import Tuple
8
9 import optuna
10 from optuna._imports import try_import
11 from optuna.trial._state import TrialState
12
13
14 with try_import() as _imports:
15 # `Study.trials_dataframe` is disabled if pandas is not available.
16 import pandas as pd
17
18 # Required for type annotation in `Study.trials_dataframe`.
19 if not _imports.is_successful():
20 pd = object # type: ignore # NOQA
21
22
23 def _trials_dataframe(
24 study: "optuna.Study", attrs: Tuple[str, ...], multi_index: bool
25 ) -> "pd.DataFrame":
26 _imports.check()
27
28 trials = study.get_trials(deepcopy=False)
29
30 # If no trials, return an empty dataframe.
31 if not len(trials):
32 return pd.DataFrame()
33
34 if "value" in attrs and study._is_multi_objective():
35 attrs = tuple("values" if attr == "value" else attr for attr in attrs)
36
37 attrs_to_df_columns: Dict[str, str] = collections.OrderedDict()
38 for attr in attrs:
39 if attr.startswith("_"):
40 # Python conventional underscores are omitted in the dataframe.
41 df_column = attr[1:]
42 else:
43 df_column = attr
44 attrs_to_df_columns[attr] = df_column
45
46 # column_agg is an aggregator of column names.
47 # Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.
48 # Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').
49 column_agg: DefaultDict[str, Set] = collections.defaultdict(set)
50 non_nested_attr = ""
51
52 def _create_record_and_aggregate_column(
53 trial: "optuna.trial.FrozenTrial",
54 ) -> Dict[Tuple[str, str], Any]:
55
56 record = {}
57 for attr, df_column in attrs_to_df_columns.items():
58 value = getattr(trial, attr)
59 if isinstance(value, TrialState):
60 # Convert TrialState to str and remove the common prefix.
61 value = str(value).split(".")[-1]
62 if isinstance(value, dict):
63 for nested_attr, nested_value in value.items():
64 record[(df_column, nested_attr)] = nested_value
65 column_agg[attr].add((df_column, nested_attr))
66 elif isinstance(value, list):
67 # Expand trial.values.
68 for nested_attr, nested_value in enumerate(value):
69 record[(df_column, nested_attr)] = nested_value
70 column_agg[attr].add((df_column, nested_attr))
71 else:
72 record[(df_column, non_nested_attr)] = value
73 column_agg[attr].add((df_column, non_nested_attr))
74 return record
75
76 records = list([_create_record_and_aggregate_column(trial) for trial in trials])
77
78 columns: List[Tuple[str, str]] = sum(
79 (sorted(column_agg[k]) for k in attrs if k in column_agg), []
80 )
81
82 df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))
83
84 if not multi_index:
85 # Flatten the `MultiIndex` columns where names are concatenated with underscores.
86 # Filtering is required to omit non-nested columns avoiding unwanted trailing
87 # underscores.
88 df.columns = ["_".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns]
89
90 return df
91
[end of optuna/_dataframe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/_dataframe.py b/optuna/_dataframe.py
--- a/optuna/_dataframe.py
+++ b/optuna/_dataframe.py
@@ -68,6 +68,12 @@
for nested_attr, nested_value in enumerate(value):
record[(df_column, nested_attr)] = nested_value
column_agg[attr].add((df_column, nested_attr))
+ elif attr == "values":
+ # trial.values should be None when the trial's state is FAIL or PRUNED.
+ assert value is None
+ for nested_attr in range(len(study.directions)):
+ record[(df_column, nested_attr)] = None
+ column_agg[attr].add((df_column, nested_attr))
else:
record[(df_column, non_nested_attr)] = value
column_agg[attr].add((df_column, non_nested_attr))
| {"golden_diff": "diff --git a/optuna/_dataframe.py b/optuna/_dataframe.py\n--- a/optuna/_dataframe.py\n+++ b/optuna/_dataframe.py\n@@ -68,6 +68,12 @@\n for nested_attr, nested_value in enumerate(value):\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n+ elif attr == \"values\":\n+ # trial.values should be None when the trial's state is FAIL or PRUNED.\n+ assert value is None\n+ for nested_attr in range(len(study.directions)):\n+ record[(df_column, nested_attr)] = None\n+ column_agg[attr].add((df_column, nested_attr))\n else:\n record[(df_column, non_nested_attr)] = value\n column_agg[attr].add((df_column, non_nested_attr))\n", "issue": "`study.trials_dataframe` call fails for multi-objective studies\nWhen using the new multi-objective study framework in v2.4.0 (`optuna.create_study(..., directions=[...])`), calling the `trials_dataframe` message raises an exception caused by some incorrect column spec type comparison in the _dataframe framework. It appears the column aggregation for contains a bug when used with multiple objectives, which adds an extra `values` Tuple to the `values` Set during the initial mapping of `'value' -> 'values'` for a multi-objective study.\r\n\r\n## Expected behavior\r\n\r\nAccessing the `trials_dataframe` method of a study works the same for both single and multi-objective studies.\r\n\r\n## Environment\r\n\r\n- Optuna version: 2.3.0 -> 2.4.0\r\n- Python version: 3.8\r\n- OS: Ubuntu 20.04.1, Linux Kernel v5.4.0-56\r\n\r\n## Error messages, stack traces, or logs\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"optuna_vis.py\", line 12, in <module>\r\n study_df = study.trials_dataframe()\r\n File \"/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/study.py\", line 499, in trials_dataframe\r\n return _trials_dataframe(self, attrs, multi_index)\r\n File \"/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py\", line 80, in _trials_dataframe\r\n columns: List[Tuple[str, str]] = sum(\r\n File \"/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py\", line 81, in <genexpr>\r\n (sorted(column_agg[k]) for k in attrs if k in column_agg), []\r\nTypeError: '<' not supported between instances of 'str' and 'int'\r\n```\r\n\r\nBy inspecting the `attr` field in the `_trials_dataframe` method (line 23, _dataframe.py), reveals an extra `values` item:\r\n\r\n```\r\n{'number': {('number', '')}, 'values': {('values', 1), ('values', ''), ('values', 0), ('values', 3), ('values', 2)}, 'datetime_start': {('datetime_start', '')}, 'datetime_complete': {('datetime_complete', '')}, 'duration': {('duration', '')}, 'params': {(<param_tuples>), ...}, 'user_attrs': {('user_attrs', 'total_x'), ('user_attrs', 'total_y'), ('user_attrs', '_time_'), ('user_attrs', 'value_total')}, 'system_attrs': {('system_attrs', 'nsga2:generation')}, 'state': {('state', '')}}\r\n```\r\n\r\nFor context, I have defined 4 objectives in this study. After the first trial, there appears this `('values', '')` anomaly which causes the above exception.\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a MPO study with 4 objectives.\r\n2. Start your study and after at least 2 trials, try to call the `study.trials_dataframe()` method.\r\n3. The above exception will be raised.\r\n\n", "before_files": [{"content": "import collections\nfrom typing import Any\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import List\nfrom typing import Set\nfrom typing import Tuple\n\nimport optuna\nfrom optuna._imports import try_import\nfrom optuna.trial._state import TrialState\n\n\nwith try_import() as _imports:\n # `Study.trials_dataframe` is disabled if pandas is not available.\n import pandas as pd\n\n# Required for type annotation in `Study.trials_dataframe`.\nif not _imports.is_successful():\n pd = object # type: ignore # NOQA\n\n\ndef _trials_dataframe(\n study: \"optuna.Study\", attrs: Tuple[str, ...], multi_index: bool\n) -> \"pd.DataFrame\":\n _imports.check()\n\n trials = study.get_trials(deepcopy=False)\n\n # If no trials, return an empty dataframe.\n if not len(trials):\n return pd.DataFrame()\n\n if \"value\" in attrs and study._is_multi_objective():\n attrs = tuple(\"values\" if attr == \"value\" else attr for attr in attrs)\n\n attrs_to_df_columns: Dict[str, str] = collections.OrderedDict()\n for attr in attrs:\n if attr.startswith(\"_\"):\n # Python conventional underscores are omitted in the dataframe.\n df_column = attr[1:]\n else:\n df_column = attr\n attrs_to_df_columns[attr] = df_column\n\n # column_agg is an aggregator of column names.\n # Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.\n # Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').\n column_agg: DefaultDict[str, Set] = collections.defaultdict(set)\n non_nested_attr = \"\"\n\n def _create_record_and_aggregate_column(\n trial: \"optuna.trial.FrozenTrial\",\n ) -> Dict[Tuple[str, str], Any]:\n\n record = {}\n for attr, df_column in attrs_to_df_columns.items():\n value = getattr(trial, attr)\n if isinstance(value, TrialState):\n # Convert TrialState to str and remove the common prefix.\n value = str(value).split(\".\")[-1]\n if isinstance(value, dict):\n for nested_attr, nested_value in value.items():\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n elif isinstance(value, list):\n # Expand trial.values.\n for nested_attr, nested_value in enumerate(value):\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n else:\n record[(df_column, non_nested_attr)] = value\n column_agg[attr].add((df_column, non_nested_attr))\n return record\n\n records = list([_create_record_and_aggregate_column(trial) for trial in trials])\n\n columns: List[Tuple[str, str]] = sum(\n (sorted(column_agg[k]) for k in attrs if k in column_agg), []\n )\n\n df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))\n\n if not multi_index:\n # Flatten the `MultiIndex` columns where names are concatenated with underscores.\n # Filtering is required to omit non-nested columns avoiding unwanted trailing\n # underscores.\n df.columns = [\"_\".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns]\n\n return df\n", "path": "optuna/_dataframe.py"}]} | 2,148 | 191 |
gh_patches_debug_2254 | rasdani/github-patches | git_diff | getsentry__sentry-1340 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
do we really need 'redis>=2.7.0,<2.9.0' ?
Hi
Recently I was trying to use sentry with [django-redis](https://github.com/niwibe/django-redis) as a cache backend and this can't be (easily) done with current versions of both django-redis and sentry since django-redis has a restriction for [redis>=2.10.0](https://github.com/niwibe/django-redis/blob/fcfd73d85d4fc3350d9cdacdb08546a5f4c9a66d/setup.py#L21).
A simple installation shows that sentry works fine with `redis==2.10.3` but I guess this would need a more thorough tests.
I briefly checked redis-py changelog and it seems the only possible backwards incompatible change is [this](https://github.com/andymccurdy/redis-py/blob/54e1040b576afb4155bf839483428c5edac14df0/CHANGES#L9-L15). Also I noticed that current version of sentry has a builtin redis cache backend, but it doesn't seem to cover other potential apps installed within the project.
I also posted similar issue on niwibe/django-redis#113.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """
3 Sentry
4 ======
5
6 Sentry is a realtime event logging and aggregation platform. It specializes
7 in monitoring errors and extracting all the information needed to do a proper
8 post-mortem without any of the hassle of the standard user feedback loop.
9
10 Sentry is a Server
11 ------------------
12
13 The Sentry package, at its core, is just a simple server and web UI. It will
14 handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
15 and all of the logic behind storage and aggregation.
16
17 That said, Sentry is not limited to Python. The primary implementation is in
18 Python, but it contains a full API for sending events from any language, in
19 any application.
20
21 :copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
22 :license: BSD, see LICENSE for more details.
23 """
24
25 from setuptools import setup, find_packages
26 from setuptools.command.test import test as TestCommand
27 import sys
28
29
30 # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
31 # in multiprocessing/util.py _exit_function when running `python
32 # setup.py test` (see
33 # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
34 for m in ('multiprocessing', 'billiard'):
35 try:
36 __import__(m)
37 except ImportError:
38 pass
39
40 setup_requires = []
41
42 if 'test' in sys.argv:
43 setup_requires.append('pytest')
44
45 dev_requires = [
46 'flake8>=2.0,<2.1',
47 ]
48
49 tests_require = [
50 'casscache',
51 'cqlsh',
52 'elasticsearch',
53 'exam>=0.5.1',
54 'eventlet',
55 'httpretty',
56 'pytest',
57 'pytest-cov>=1.4',
58 'pytest-django',
59 'pytest-timeout',
60 'python-coveralls',
61 'mock>=0.8.0',
62 'riak',
63 'unittest2',
64 ]
65
66
67 install_requires = [
68 'BeautifulSoup>=3.2.1,<3.3.0',
69 'celery>=3.0.15,<3.1.0',
70 'cssutils>=0.9.9,<0.10.0',
71 'Django>=1.5.8,<1.6',
72 'django-bitfield>=1.7.0,<1.8.0',
73 'django-celery>=3.0.11,<3.1.0',
74 'django-crispy-forms>=1.2.3,<1.3.0',
75 'django-paging>=0.2.5,<0.3.0',
76 'django-picklefield>=0.3.0,<0.4.0',
77 'django-recaptcha>=1.0.0,<1.1.0',
78 'django-social-auth>=0.7.28,<0.8.0',
79 'django-static-compiler>=0.3.0,<0.4.0',
80 'django-statsd-mozilla>=0.3.8.0,<0.3.9.0',
81 'django-sudo>=1.1.0,<1.2.0',
82 'django-templatetag-sugar>=0.1.0',
83 'djangorestframework>=2.3.8,<2.4.0',
84 'email-reply-parser>=0.2.0,<0.3.0',
85 'enum34>=0.9.18,<0.10.0',
86 'gunicorn>=0.17.2,<0.18.0',
87 'ipaddr>=2.1.11,<2.2.0',
88 'logan>=0.5.8.2,<0.6.0',
89 'nydus>=0.10.7,<0.11.0',
90 'progressbar>=2.2,<2.4',
91 'Pygments>=1.6.0,<1.7.0',
92 'python-dateutil>=1.5.0,<2.0.0',
93 'python-memcached>=1.53,<2.0.0',
94 'raven>=5.0.0',
95 'redis>=2.7.0,<2.9.0',
96 'simplejson>=3.1.0,<3.4.0',
97 'six>=1.6.0,<2.0.0',
98 'setproctitle>=1.1.7,<1.2.0',
99 'South==1.0.1',
100 'toronado>=0.0.4,<0.1.0',
101 'ua-parser>=0.3.5',
102 'urllib3>=1.7.1,<1.8.0',
103 ]
104
105 postgres_requires = [
106 'psycopg2>=2.5.0,<2.6.0',
107 ]
108
109 postgres_pypy_requires = [
110 'psycopg2cffi',
111 ]
112
113 mysql_requires = [
114 'MySQL-python>=1.2.0,<1.3.0',
115 ]
116
117
118 class PyTest(TestCommand):
119 def finalize_options(self):
120 TestCommand.finalize_options(self)
121 self.test_args = ['tests']
122 self.test_suite = True
123
124 def run_tests(self):
125 # import here, cause outside the eggs aren't loaded
126 import pytest
127 errno = pytest.main(self.test_args)
128 sys.exit(errno)
129
130
131 setup(
132 name='sentry',
133 version='7.0.0-DEV',
134 author='David Cramer',
135 author_email='[email protected]',
136 url='https://www.getsentry.com',
137 description='A realtime logging and aggregation server.',
138 long_description=open('README.rst').read(),
139 package_dir={'': 'src'},
140 packages=find_packages('src'),
141 zip_safe=False,
142 install_requires=install_requires,
143 extras_require={
144 'tests': tests_require,
145 'dev': dev_requires,
146 'postgres': install_requires + postgres_requires,
147 'postgres_pypy': install_requires + postgres_pypy_requires,
148 'mysql': install_requires + mysql_requires,
149 },
150 tests_require=tests_require,
151 cmdclass={'test': PyTest},
152 license='BSD',
153 include_package_data=True,
154 entry_points={
155 'console_scripts': [
156 'sentry = sentry.utils.runner:main',
157 ],
158 },
159 classifiers=[
160 'Framework :: Django',
161 'Intended Audience :: Developers',
162 'Intended Audience :: System Administrators',
163 'Operating System :: OS Independent',
164 'Topic :: Software Development'
165 ],
166 )
167
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -92,7 +92,7 @@
'python-dateutil>=1.5.0,<2.0.0',
'python-memcached>=1.53,<2.0.0',
'raven>=5.0.0',
- 'redis>=2.7.0,<2.9.0',
+ 'redis>=2.7.0,<2.11.0',
'simplejson>=3.1.0,<3.4.0',
'six>=1.6.0,<2.0.0',
'setproctitle>=1.1.7,<1.2.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -92,7 +92,7 @@\n 'python-dateutil>=1.5.0,<2.0.0',\n 'python-memcached>=1.53,<2.0.0',\n 'raven>=5.0.0',\n- 'redis>=2.7.0,<2.9.0',\n+ 'redis>=2.7.0,<2.11.0',\n 'simplejson>=3.1.0,<3.4.0',\n 'six>=1.6.0,<2.0.0',\n 'setproctitle>=1.1.7,<1.2.0',\n", "issue": "do we really need 'redis>=2.7.0,<2.9.0' ?\nHi\n\nRecently I was trying to use sentry with [django-redis](https://github.com/niwibe/django-redis) as a cache backend and this can't be (easily) done with current versions of both django-redis and sentry since django-redis has a restriction for [redis>=2.10.0](https://github.com/niwibe/django-redis/blob/fcfd73d85d4fc3350d9cdacdb08546a5f4c9a66d/setup.py#L21). \nA simple installation shows that sentry works fine with `redis==2.10.3` but I guess this would need a more thorough tests. \n\nI briefly checked redis-py changelog and it seems the only possible backwards incompatible change is [this](https://github.com/andymccurdy/redis-py/blob/54e1040b576afb4155bf839483428c5edac14df0/CHANGES#L9-L15). Also I noticed that current version of sentry has a builtin redis cache backend, but it doesn't seem to cover other potential apps installed within the project. \n\nI also posted similar issue on niwibe/django-redis#113.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nSentry\n======\n\nSentry is a realtime event logging and aggregation platform. It specializes\nin monitoring errors and extracting all the information needed to do a proper\npost-mortem without any of the hassle of the standard user feedback loop.\n\nSentry is a Server\n------------------\n\nThe Sentry package, at its core, is just a simple server and web UI. It will\nhandle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)\nand all of the logic behind storage and aggregation.\n\nThat said, Sentry is not limited to Python. The primary implementation is in\nPython, but it contains a full API for sending events from any language, in\nany application.\n\n:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport sys\n\n\n# Hack to prevent stupid \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when running `python\n# setup.py test` (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\nfor m in ('multiprocessing', 'billiard'):\n try:\n __import__(m)\n except ImportError:\n pass\n\nsetup_requires = []\n\nif 'test' in sys.argv:\n setup_requires.append('pytest')\n\ndev_requires = [\n 'flake8>=2.0,<2.1',\n]\n\ntests_require = [\n 'casscache',\n 'cqlsh',\n 'elasticsearch',\n 'exam>=0.5.1',\n 'eventlet',\n 'httpretty',\n 'pytest',\n 'pytest-cov>=1.4',\n 'pytest-django',\n 'pytest-timeout',\n 'python-coveralls',\n 'mock>=0.8.0',\n 'riak',\n 'unittest2',\n]\n\n\ninstall_requires = [\n 'BeautifulSoup>=3.2.1,<3.3.0',\n 'celery>=3.0.15,<3.1.0',\n 'cssutils>=0.9.9,<0.10.0',\n 'Django>=1.5.8,<1.6',\n 'django-bitfield>=1.7.0,<1.8.0',\n 'django-celery>=3.0.11,<3.1.0',\n 'django-crispy-forms>=1.2.3,<1.3.0',\n 'django-paging>=0.2.5,<0.3.0',\n 'django-picklefield>=0.3.0,<0.4.0',\n 'django-recaptcha>=1.0.0,<1.1.0',\n 'django-social-auth>=0.7.28,<0.8.0',\n 'django-static-compiler>=0.3.0,<0.4.0',\n 'django-statsd-mozilla>=0.3.8.0,<0.3.9.0',\n 'django-sudo>=1.1.0,<1.2.0',\n 'django-templatetag-sugar>=0.1.0',\n 'djangorestframework>=2.3.8,<2.4.0',\n 'email-reply-parser>=0.2.0,<0.3.0',\n 'enum34>=0.9.18,<0.10.0',\n 'gunicorn>=0.17.2,<0.18.0',\n 'ipaddr>=2.1.11,<2.2.0',\n 'logan>=0.5.8.2,<0.6.0',\n 'nydus>=0.10.7,<0.11.0',\n 'progressbar>=2.2,<2.4',\n 'Pygments>=1.6.0,<1.7.0',\n 'python-dateutil>=1.5.0,<2.0.0',\n 'python-memcached>=1.53,<2.0.0',\n 'raven>=5.0.0',\n 'redis>=2.7.0,<2.9.0',\n 'simplejson>=3.1.0,<3.4.0',\n 'six>=1.6.0,<2.0.0',\n 'setproctitle>=1.1.7,<1.2.0',\n 'South==1.0.1',\n 'toronado>=0.0.4,<0.1.0',\n 'ua-parser>=0.3.5',\n 'urllib3>=1.7.1,<1.8.0',\n]\n\npostgres_requires = [\n 'psycopg2>=2.5.0,<2.6.0',\n]\n\npostgres_pypy_requires = [\n 'psycopg2cffi',\n]\n\nmysql_requires = [\n 'MySQL-python>=1.2.0,<1.3.0',\n]\n\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nsetup(\n name='sentry',\n version='7.0.0-DEV',\n author='David Cramer',\n author_email='[email protected]',\n url='https://www.getsentry.com',\n description='A realtime logging and aggregation server.',\n long_description=open('README.rst').read(),\n package_dir={'': 'src'},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n 'tests': tests_require,\n 'dev': dev_requires,\n 'postgres': install_requires + postgres_requires,\n 'postgres_pypy': install_requires + postgres_pypy_requires,\n 'mysql': install_requires + mysql_requires,\n },\n tests_require=tests_require,\n cmdclass={'test': PyTest},\n license='BSD',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'sentry = sentry.utils.runner:main',\n ],\n },\n classifiers=[\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development'\n ],\n)\n", "path": "setup.py"}]} | 2,684 | 166 |
gh_patches_debug_35064 | rasdani/github-patches | git_diff | openai__gym-1653 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Taxi environment doesn't match paper

The map implemented in Taxi-v2 differs slightly from the one in the original paper (shown above). Namely, the paper example has a wall that prevents transitioning from (1,1) to (1,2), but the gym environment implemented doesn't. Any reason for that?
</issue>
<code>
[start of gym/envs/toy_text/taxi.py]
1 import sys
2 from contextlib import closing
3 from six import StringIO
4 from gym import utils
5 from gym.envs.toy_text import discrete
6 import numpy as np
7
8 MAP = [
9 "+---------+",
10 "|R: | : :G|",
11 "| : : : : |",
12 "| : : : : |",
13 "| | : | : |",
14 "|Y| : |B: |",
15 "+---------+",
16 ]
17
18
19 class TaxiEnv(discrete.DiscreteEnv):
20 """
21 The Taxi Problem
22 from "Hierarchical Reinforcement Learning with the MAXQ Value Function Decomposition"
23 by Tom Dietterich
24
25 Description:
26 There are four designated locations in the grid world indicated by R(ed), B(lue), G(reen), and Y(ellow). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drive to the passenger's location, pick up the passenger, drive to the passenger's destination (another one of the four specified locations), and then drop off the passenger. Once the passenger is dropped off, the episode ends.
27
28 Observations:
29 There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is the taxi), and 4 destination locations.
30
31 Actions:
32 There are 6 discrete deterministic actions:
33 - 0: move south
34 - 1: move north
35 - 2: move east
36 - 3: move west
37 - 4: pickup passenger
38 - 5: dropoff passenger
39
40 Rewards:
41 There is a reward of -1 for each action and an additional reward of +20 for delievering the passenger. There is a reward of -10 for executing actions "pickup" and "dropoff" illegally.
42
43
44 Rendering:
45 - blue: passenger
46 - magenta: destination
47 - yellow: empty taxi
48 - green: full taxi
49 - other letters (R, G, B and Y): locations for passengers and destinations
50
51 actions:
52 - 0: south
53 - 1: north
54 - 2: east
55 - 3: west
56 - 4: pickup
57 - 5: dropoff
58
59 state space is represented by:
60 (taxi_row, taxi_col, passenger_location, destination)
61 """
62 metadata = {'render.modes': ['human', 'ansi']}
63
64 def __init__(self):
65 self.desc = np.asarray(MAP, dtype='c')
66
67 self.locs = locs = [(0,0), (0,4), (4,0), (4,3)]
68
69 num_states = 500
70 num_rows = 5
71 num_columns = 5
72 max_row = num_rows - 1
73 max_col = num_columns - 1
74 initial_state_distrib = np.zeros(num_states)
75 num_actions = 6
76 P = {state: {action: []
77 for action in range(num_actions)} for state in range(num_states)}
78 for row in range(num_rows):
79 for col in range(num_columns):
80 for pass_idx in range(len(locs) + 1): # +1 for being inside taxi
81 for dest_idx in range(len(locs)):
82 state = self.encode(row, col, pass_idx, dest_idx)
83 if pass_idx < 4 and pass_idx != dest_idx:
84 initial_state_distrib[state] += 1
85 for action in range(num_actions):
86 # defaults
87 new_row, new_col, new_pass_idx = row, col, pass_idx
88 reward = -1 # default reward when there is no pickup/dropoff
89 done = False
90 taxi_loc = (row, col)
91
92 if action == 0:
93 new_row = min(row + 1, max_row)
94 elif action == 1:
95 new_row = max(row - 1, 0)
96 if action == 2 and self.desc[1 + row, 2 * col + 2] == b":":
97 new_col = min(col + 1, max_col)
98 elif action == 3 and self.desc[1 + row, 2 * col] == b":":
99 new_col = max(col - 1, 0)
100 elif action == 4: # pickup
101 if (pass_idx < 4 and taxi_loc == locs[pass_idx]):
102 new_pass_idx = 4
103 else: # passenger not at location
104 reward = -10
105 elif action == 5: # dropoff
106 if (taxi_loc == locs[dest_idx]) and pass_idx == 4:
107 new_pass_idx = dest_idx
108 done = True
109 reward = 20
110 elif (taxi_loc in locs) and pass_idx == 4:
111 new_pass_idx = locs.index(taxi_loc)
112 else: # dropoff at wrong location
113 reward = -10
114 new_state = self.encode(
115 new_row, new_col, new_pass_idx, dest_idx)
116 P[state][action].append(
117 (1.0, new_state, reward, done))
118 initial_state_distrib /= initial_state_distrib.sum()
119 discrete.DiscreteEnv.__init__(
120 self, num_states, num_actions, P, initial_state_distrib)
121
122 def encode(self, taxi_row, taxi_col, pass_loc, dest_idx):
123 # (5) 5, 5, 4
124 i = taxi_row
125 i *= 5
126 i += taxi_col
127 i *= 5
128 i += pass_loc
129 i *= 4
130 i += dest_idx
131 return i
132
133 def decode(self, i):
134 out = []
135 out.append(i % 4)
136 i = i // 4
137 out.append(i % 5)
138 i = i // 5
139 out.append(i % 5)
140 i = i // 5
141 out.append(i)
142 assert 0 <= i < 5
143 return reversed(out)
144
145 def render(self, mode='human'):
146 outfile = StringIO() if mode == 'ansi' else sys.stdout
147
148 out = self.desc.copy().tolist()
149 out = [[c.decode('utf-8') for c in line] for line in out]
150 taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s)
151
152 def ul(x): return "_" if x == " " else x
153 if pass_idx < 4:
154 out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
155 out[1 + taxi_row][2 * taxi_col + 1], 'yellow', highlight=True)
156 pi, pj = self.locs[pass_idx]
157 out[1 + pi][2 * pj + 1] = utils.colorize(out[1 + pi][2 * pj + 1], 'blue', bold=True)
158 else: # passenger in taxi
159 out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
160 ul(out[1 + taxi_row][2 * taxi_col + 1]), 'green', highlight=True)
161
162 di, dj = self.locs[dest_idx]
163 out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], 'magenta')
164 outfile.write("\n".join(["".join(row) for row in out]) + "\n")
165 if self.lastaction is not None:
166 outfile.write(" ({})\n".format(["South", "North", "East", "West", "Pickup", "Dropoff"][self.lastaction]))
167 else: outfile.write("\n")
168
169 # No need to return anything for human
170 if mode != 'human':
171 with closing(outfile):
172 return outfile.getvalue()
173
[end of gym/envs/toy_text/taxi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/envs/toy_text/taxi.py b/gym/envs/toy_text/taxi.py
--- a/gym/envs/toy_text/taxi.py
+++ b/gym/envs/toy_text/taxi.py
@@ -8,7 +8,7 @@
MAP = [
"+---------+",
"|R: | : :G|",
- "| : : : : |",
+ "| : | : : |",
"| : : : : |",
"| | : | : |",
"|Y| : |B: |",
@@ -23,12 +23,25 @@
by Tom Dietterich
Description:
- There are four designated locations in the grid world indicated by R(ed), B(lue), G(reen), and Y(ellow). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drive to the passenger's location, pick up the passenger, drive to the passenger's destination (another one of the four specified locations), and then drop off the passenger. Once the passenger is dropped off, the episode ends.
+ There are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drives to the passenger's location, picks up the passenger, drives to the passenger's destination (another one of the four specified locations), and then drops off the passenger. Once the passenger is dropped off, the episode ends.
Observations:
- There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is the taxi), and 4 destination locations.
+ There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is in the taxi), and 4 destination locations.
+
+ Passenger locations:
+ - 0: R(ed)
+ - 1: G(reen)
+ - 2: Y(ellow)
+ - 3: B(lue)
+ - 4: in taxi
- Actions:
+ Destinations:
+ - 0: R(ed)
+ - 1: G(reen)
+ - 2: Y(ellow)
+ - 3: B(lue)
+
+ Actions:
There are 6 discrete deterministic actions:
- 0: move south
- 1: move north
@@ -46,15 +59,8 @@
- magenta: destination
- yellow: empty taxi
- green: full taxi
- - other letters (R, G, B and Y): locations for passengers and destinations
-
- actions:
- - 0: south
- - 1: north
- - 2: east
- - 3: west
- - 4: pickup
- - 5: dropoff
+ - other letters (R, G, Y and B): locations for passengers and destinations
+
state space is represented by:
(taxi_row, taxi_col, passenger_location, destination)
| {"golden_diff": "diff --git a/gym/envs/toy_text/taxi.py b/gym/envs/toy_text/taxi.py\n--- a/gym/envs/toy_text/taxi.py\n+++ b/gym/envs/toy_text/taxi.py\n@@ -8,7 +8,7 @@\n MAP = [\n \"+---------+\",\n \"|R: | : :G|\",\n- \"| : : : : |\",\n+ \"| : | : : |\",\n \"| : : : : |\",\n \"| | : | : |\",\n \"|Y| : |B: |\",\n@@ -23,12 +23,25 @@\n by Tom Dietterich\n \n Description:\n- There are four designated locations in the grid world indicated by R(ed), B(lue), G(reen), and Y(ellow). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drive to the passenger's location, pick up the passenger, drive to the passenger's destination (another one of the four specified locations), and then drop off the passenger. Once the passenger is dropped off, the episode ends.\n+ There are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drives to the passenger's location, picks up the passenger, drives to the passenger's destination (another one of the four specified locations), and then drops off the passenger. Once the passenger is dropped off, the episode ends.\n \n Observations: \n- There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is the taxi), and 4 destination locations. \n+ There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is in the taxi), and 4 destination locations. \n+ \n+ Passenger locations:\n+ - 0: R(ed)\n+ - 1: G(reen)\n+ - 2: Y(ellow)\n+ - 3: B(lue)\n+ - 4: in taxi\n \n- Actions: \n+ Destinations:\n+ - 0: R(ed)\n+ - 1: G(reen)\n+ - 2: Y(ellow)\n+ - 3: B(lue)\n+ \n+ Actions:\n There are 6 discrete deterministic actions:\n - 0: move south\n - 1: move north\n@@ -46,15 +59,8 @@\n - magenta: destination\n - yellow: empty taxi\n - green: full taxi\n- - other letters (R, G, B and Y): locations for passengers and destinations\n-\n- actions:\n- - 0: south\n- - 1: north\n- - 2: east\n- - 3: west\n- - 4: pickup\n- - 5: dropoff\n+ - other letters (R, G, Y and B): locations for passengers and destinations\n+ \n \n state space is represented by:\n (taxi_row, taxi_col, passenger_location, destination)\n", "issue": "Taxi environment doesn't match paper\n\r\n\r\nThe map implemented in Taxi-v2 differs slightly from the one in the original paper (shown above). Namely, the paper example has a wall that prevents transitioning from (1,1) to (1,2), but the gym environment implemented doesn't. Any reason for that?\n", "before_files": [{"content": "import sys\nfrom contextlib import closing\nfrom six import StringIO\nfrom gym import utils\nfrom gym.envs.toy_text import discrete\nimport numpy as np\n\nMAP = [\n \"+---------+\",\n \"|R: | : :G|\",\n \"| : : : : |\",\n \"| : : : : |\",\n \"| | : | : |\",\n \"|Y| : |B: |\",\n \"+---------+\",\n]\n\n\nclass TaxiEnv(discrete.DiscreteEnv):\n \"\"\"\n The Taxi Problem\n from \"Hierarchical Reinforcement Learning with the MAXQ Value Function Decomposition\"\n by Tom Dietterich\n\n Description:\n There are four designated locations in the grid world indicated by R(ed), B(lue), G(reen), and Y(ellow). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drive to the passenger's location, pick up the passenger, drive to the passenger's destination (another one of the four specified locations), and then drop off the passenger. Once the passenger is dropped off, the episode ends.\n\n Observations: \n There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is the taxi), and 4 destination locations. \n \n Actions: \n There are 6 discrete deterministic actions:\n - 0: move south\n - 1: move north\n - 2: move east \n - 3: move west \n - 4: pickup passenger\n - 5: dropoff passenger\n \n Rewards: \n There is a reward of -1 for each action and an additional reward of +20 for delievering the passenger. There is a reward of -10 for executing actions \"pickup\" and \"dropoff\" illegally.\n \n\n Rendering:\n - blue: passenger\n - magenta: destination\n - yellow: empty taxi\n - green: full taxi\n - other letters (R, G, B and Y): locations for passengers and destinations\n\n actions:\n - 0: south\n - 1: north\n - 2: east\n - 3: west\n - 4: pickup\n - 5: dropoff\n\n state space is represented by:\n (taxi_row, taxi_col, passenger_location, destination)\n \"\"\"\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self):\n self.desc = np.asarray(MAP, dtype='c')\n\n self.locs = locs = [(0,0), (0,4), (4,0), (4,3)]\n\n num_states = 500\n num_rows = 5\n num_columns = 5\n max_row = num_rows - 1\n max_col = num_columns - 1\n initial_state_distrib = np.zeros(num_states)\n num_actions = 6\n P = {state: {action: []\n for action in range(num_actions)} for state in range(num_states)}\n for row in range(num_rows):\n for col in range(num_columns):\n for pass_idx in range(len(locs) + 1): # +1 for being inside taxi\n for dest_idx in range(len(locs)):\n state = self.encode(row, col, pass_idx, dest_idx)\n if pass_idx < 4 and pass_idx != dest_idx:\n initial_state_distrib[state] += 1\n for action in range(num_actions):\n # defaults\n new_row, new_col, new_pass_idx = row, col, pass_idx\n reward = -1 # default reward when there is no pickup/dropoff\n done = False\n taxi_loc = (row, col)\n\n if action == 0:\n new_row = min(row + 1, max_row)\n elif action == 1:\n new_row = max(row - 1, 0)\n if action == 2 and self.desc[1 + row, 2 * col + 2] == b\":\":\n new_col = min(col + 1, max_col)\n elif action == 3 and self.desc[1 + row, 2 * col] == b\":\":\n new_col = max(col - 1, 0)\n elif action == 4: # pickup\n if (pass_idx < 4 and taxi_loc == locs[pass_idx]):\n new_pass_idx = 4\n else: # passenger not at location\n reward = -10\n elif action == 5: # dropoff\n if (taxi_loc == locs[dest_idx]) and pass_idx == 4:\n new_pass_idx = dest_idx\n done = True\n reward = 20\n elif (taxi_loc in locs) and pass_idx == 4:\n new_pass_idx = locs.index(taxi_loc)\n else: # dropoff at wrong location\n reward = -10\n new_state = self.encode(\n new_row, new_col, new_pass_idx, dest_idx)\n P[state][action].append(\n (1.0, new_state, reward, done))\n initial_state_distrib /= initial_state_distrib.sum()\n discrete.DiscreteEnv.__init__(\n self, num_states, num_actions, P, initial_state_distrib)\n\n def encode(self, taxi_row, taxi_col, pass_loc, dest_idx):\n # (5) 5, 5, 4\n i = taxi_row\n i *= 5\n i += taxi_col\n i *= 5\n i += pass_loc\n i *= 4\n i += dest_idx\n return i\n\n def decode(self, i):\n out = []\n out.append(i % 4)\n i = i // 4\n out.append(i % 5)\n i = i // 5\n out.append(i % 5)\n i = i // 5\n out.append(i)\n assert 0 <= i < 5\n return reversed(out)\n\n def render(self, mode='human'):\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n\n out = self.desc.copy().tolist()\n out = [[c.decode('utf-8') for c in line] for line in out]\n taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s)\n\n def ul(x): return \"_\" if x == \" \" else x\n if pass_idx < 4:\n out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(\n out[1 + taxi_row][2 * taxi_col + 1], 'yellow', highlight=True)\n pi, pj = self.locs[pass_idx]\n out[1 + pi][2 * pj + 1] = utils.colorize(out[1 + pi][2 * pj + 1], 'blue', bold=True)\n else: # passenger in taxi\n out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(\n ul(out[1 + taxi_row][2 * taxi_col + 1]), 'green', highlight=True)\n\n di, dj = self.locs[dest_idx]\n out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], 'magenta')\n outfile.write(\"\\n\".join([\"\".join(row) for row in out]) + \"\\n\")\n if self.lastaction is not None:\n outfile.write(\" ({})\\n\".format([\"South\", \"North\", \"East\", \"West\", \"Pickup\", \"Dropoff\"][self.lastaction]))\n else: outfile.write(\"\\n\")\n\n # No need to return anything for human\n if mode != 'human':\n with closing(outfile):\n return outfile.getvalue()\n", "path": "gym/envs/toy_text/taxi.py"}]} | 2,802 | 730 |
gh_patches_debug_17222 | rasdani/github-patches | git_diff | openshift__openshift-ansible-5099 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error: unknown command "drain" for "oc" in « Drain Node for Kubelet upgrade » action
« [Drain Node for Kubelet upgrade](https://github.com/openshift/openshift-ansible/blob/release-3.6/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml#L27) » action execute: `oadm drain ...` but this command don't exists:
```
# oadm drain
Error: unknown command "drain" for "oc"
Run 'oc --help' for usage.
```
with this version:
```
# oc version
oc v3.6.0+c4dd4cf
kubernetes v1.6.1+5115d708d7
features: Basic-Auth GSSAPI Kerberos SPNEGO
```
To fix it, I need to add `adm` like this `oadm adm drain ...`
```
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index c93a5d8..a21fb7f 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
```
</issue>
<code>
[start of roles/openshift_cli/library/openshift_container_binary_sync.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # pylint: disable=missing-docstring,invalid-name
4
5 import random
6 import tempfile
7 import shutil
8 import os.path
9
10 # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
11 from ansible.module_utils.basic import * # noqa: F403
12
13
14 DOCUMENTATION = '''
15 ---
16 module: openshift_container_binary_sync
17 short_description: Copies OpenShift binaries out of the given image tag to host system.
18 '''
19
20
21 class BinarySyncError(Exception):
22 def __init__(self, msg):
23 super(BinarySyncError, self).__init__(msg)
24 self.msg = msg
25
26
27 # pylint: disable=too-few-public-methods
28 class BinarySyncer(object):
29 """
30 Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
31 a container onto the host system.
32 """
33
34 def __init__(self, module, image, tag):
35 self.module = module
36 self.changed = False
37 self.output = []
38 self.bin_dir = '/usr/local/bin'
39 self.image = image
40 self.tag = tag
41 self.temp_dir = None # TBD
42
43 def sync(self):
44 container_name = "openshift-cli-%s" % random.randint(1, 100000)
45 rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
46 container_name, '%s:%s' % (self.image, self.tag)])
47 if rc:
48 raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
49 (stdout, stderr))
50 self.output.append(stdout)
51 try:
52 self.temp_dir = tempfile.mkdtemp()
53 self.output.append("Using temp dir: %s" % self.temp_dir)
54
55 rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name,
56 self.temp_dir])
57 if rc:
58 raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
59 (stdout, stderr))
60
61 rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name,
62 self.temp_dir])
63 if rc:
64 raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
65 (stdout, stderr))
66
67 self._sync_binary('openshift')
68
69 # In older versions, oc was a symlink to openshift:
70 if os.path.islink(os.path.join(self.temp_dir, 'oc')):
71 self._sync_symlink('oc', 'openshift')
72 else:
73 self._sync_binary('oc')
74
75 # Ensure correct symlinks created:
76 self._sync_symlink('kubectl', 'openshift')
77 self._sync_symlink('oadm', 'openshift')
78 finally:
79 shutil.rmtree(self.temp_dir)
80 self.module.run_command(['docker', 'rm', container_name])
81
82 def _sync_symlink(self, binary_name, link_to):
83 """ Ensure the given binary name exists and links to the expected binary. """
84
85 # The symlink we are creating:
86 link_path = os.path.join(self.bin_dir, binary_name)
87
88 # The expected file we should be linking to:
89 link_dest = os.path.join(self.bin_dir, link_to)
90
91 if not os.path.exists(link_path) or \
92 not os.path.islink(link_path) or \
93 os.path.realpath(link_path) != os.path.realpath(link_dest):
94 if os.path.exists(link_path):
95 os.remove(link_path)
96 os.symlink(link_to, os.path.join(self.bin_dir, binary_name))
97 self.output.append("Symlinked %s to %s." % (link_path, link_dest))
98 self.changed = True
99
100 def _sync_binary(self, binary_name):
101 src_path = os.path.join(self.temp_dir, binary_name)
102 dest_path = os.path.join(self.bin_dir, binary_name)
103 incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
104 if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
105 shutil.move(src_path, dest_path)
106 self.output.append("Moved %s to %s." % (src_path, dest_path))
107 self.changed = True
108
109
110 def main():
111 module = AnsibleModule( # noqa: F405
112 argument_spec=dict(
113 image=dict(required=True),
114 tag=dict(required=True),
115 ),
116 supports_check_mode=True
117 )
118
119 image = module.params['image']
120 tag = module.params['tag']
121
122 binary_syncer = BinarySyncer(module, image, tag)
123
124 try:
125 binary_syncer.sync()
126 except BinarySyncError as ex:
127 module.fail_json(msg=ex.msg)
128
129 return module.exit_json(changed=binary_syncer.changed,
130 output=binary_syncer.output)
131
132
133 if __name__ == '__main__':
134 main()
135
[end of roles/openshift_cli/library/openshift_container_binary_sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -102,6 +102,11 @@
dest_path = os.path.join(self.bin_dir, binary_name)
incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
+
+ # See: https://github.com/openshift/openshift-ansible/issues/4965
+ if os.path.islink(dest_path):
+ os.unlink(dest_path)
+ self.output.append('Removed old symlink {} before copying binary.'.format(dest_path))
shutil.move(src_path, dest_path)
self.output.append("Moved %s to %s." % (src_path, dest_path))
self.changed = True
| {"golden_diff": "diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py\n--- a/roles/openshift_cli/library/openshift_container_binary_sync.py\n+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py\n@@ -102,6 +102,11 @@\n dest_path = os.path.join(self.bin_dir, binary_name)\n incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]\n if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:\n+\n+ # See: https://github.com/openshift/openshift-ansible/issues/4965\n+ if os.path.islink(dest_path):\n+ os.unlink(dest_path)\n+ self.output.append('Removed old symlink {} before copying binary.'.format(dest_path))\n shutil.move(src_path, dest_path)\n self.output.append(\"Moved %s to %s.\" % (src_path, dest_path))\n self.changed = True\n", "issue": "Error: unknown command \"drain\" for \"oc\" in \u00ab Drain Node for Kubelet upgrade \u00bb action\n\u00ab [Drain Node for Kubelet upgrade](https://github.com/openshift/openshift-ansible/blob/release-3.6/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml#L27) \u00bb action execute: `oadm drain ...` but this command don't exists:\r\n\r\n```\r\n# oadm drain\r\nError: unknown command \"drain\" for \"oc\"\r\nRun 'oc --help' for usage.\r\n```\r\n\r\nwith this version:\r\n\r\n```\r\n# oc version\r\noc v3.6.0+c4dd4cf\r\nkubernetes v1.6.1+5115d708d7\r\nfeatures: Basic-Auth GSSAPI Kerberos SPNEGO\r\n```\r\n\r\nTo fix it, I need to add `adm` like this `oadm adm drain ...`\r\n\r\n```\r\ndiff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml\r\nindex c93a5d8..a21fb7f 100644\r\n--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml\r\n+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml\r\n@@ -26,7 +26,7 @@\r\n\r\n - name: Drain Node for Kubelet upgrade\r\n command: >\r\n- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets\r\n+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets\r\n delegate_to: \"{{ groups.oo_first_master.0 }}\"\r\n register: l_upgrade_nodes_drain_result\r\n until: not l_upgrade_nodes_drain_result | failed\r\n```\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# pylint: disable=missing-docstring,invalid-name\n\nimport random\nimport tempfile\nimport shutil\nimport os.path\n\n# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import\nfrom ansible.module_utils.basic import * # noqa: F403\n\n\nDOCUMENTATION = '''\n---\nmodule: openshift_container_binary_sync\nshort_description: Copies OpenShift binaries out of the given image tag to host system.\n'''\n\n\nclass BinarySyncError(Exception):\n def __init__(self, msg):\n super(BinarySyncError, self).__init__(msg)\n self.msg = msg\n\n\n# pylint: disable=too-few-public-methods\nclass BinarySyncer(object):\n \"\"\"\n Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of\n a container onto the host system.\n \"\"\"\n\n def __init__(self, module, image, tag):\n self.module = module\n self.changed = False\n self.output = []\n self.bin_dir = '/usr/local/bin'\n self.image = image\n self.tag = tag\n self.temp_dir = None # TBD\n\n def sync(self):\n container_name = \"openshift-cli-%s\" % random.randint(1, 100000)\n rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',\n container_name, '%s:%s' % (self.image, self.tag)])\n if rc:\n raise BinarySyncError(\"Error creating temporary docker container. stdout=%s, stderr=%s\" %\n (stdout, stderr))\n self.output.append(stdout)\n try:\n self.temp_dir = tempfile.mkdtemp()\n self.output.append(\"Using temp dir: %s\" % self.temp_dir)\n\n rc, stdout, stderr = self.module.run_command(['docker', 'cp', \"%s:/usr/bin/openshift\" % container_name,\n self.temp_dir])\n if rc:\n raise BinarySyncError(\"Error copying file from docker container: stdout=%s, stderr=%s\" %\n (stdout, stderr))\n\n rc, stdout, stderr = self.module.run_command(['docker', 'cp', \"%s:/usr/bin/oc\" % container_name,\n self.temp_dir])\n if rc:\n raise BinarySyncError(\"Error copying file from docker container: stdout=%s, stderr=%s\" %\n (stdout, stderr))\n\n self._sync_binary('openshift')\n\n # In older versions, oc was a symlink to openshift:\n if os.path.islink(os.path.join(self.temp_dir, 'oc')):\n self._sync_symlink('oc', 'openshift')\n else:\n self._sync_binary('oc')\n\n # Ensure correct symlinks created:\n self._sync_symlink('kubectl', 'openshift')\n self._sync_symlink('oadm', 'openshift')\n finally:\n shutil.rmtree(self.temp_dir)\n self.module.run_command(['docker', 'rm', container_name])\n\n def _sync_symlink(self, binary_name, link_to):\n \"\"\" Ensure the given binary name exists and links to the expected binary. \"\"\"\n\n # The symlink we are creating:\n link_path = os.path.join(self.bin_dir, binary_name)\n\n # The expected file we should be linking to:\n link_dest = os.path.join(self.bin_dir, link_to)\n\n if not os.path.exists(link_path) or \\\n not os.path.islink(link_path) or \\\n os.path.realpath(link_path) != os.path.realpath(link_dest):\n if os.path.exists(link_path):\n os.remove(link_path)\n os.symlink(link_to, os.path.join(self.bin_dir, binary_name))\n self.output.append(\"Symlinked %s to %s.\" % (link_path, link_dest))\n self.changed = True\n\n def _sync_binary(self, binary_name):\n src_path = os.path.join(self.temp_dir, binary_name)\n dest_path = os.path.join(self.bin_dir, binary_name)\n incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]\n if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:\n shutil.move(src_path, dest_path)\n self.output.append(\"Moved %s to %s.\" % (src_path, dest_path))\n self.changed = True\n\n\ndef main():\n module = AnsibleModule( # noqa: F405\n argument_spec=dict(\n image=dict(required=True),\n tag=dict(required=True),\n ),\n supports_check_mode=True\n )\n\n image = module.params['image']\n tag = module.params['tag']\n\n binary_syncer = BinarySyncer(module, image, tag)\n\n try:\n binary_syncer.sync()\n except BinarySyncError as ex:\n module.fail_json(msg=ex.msg)\n\n return module.exit_json(changed=binary_syncer.changed,\n output=binary_syncer.output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "roles/openshift_cli/library/openshift_container_binary_sync.py"}]} | 2,419 | 242 |
gh_patches_debug_17616 | rasdani/github-patches | git_diff | python-discord__bot-655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Write unit tests for `bot/rules/mentions.py`
Write unit tests for [`bot/rules/mentions.py`](../blob/master/bot/rules/mentions.py).
## Implementation details
Please make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.
## Additional information
If you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.
</issue>
<code>
[start of bot/rules/attachments.py]
1 from typing import Dict, Iterable, List, Optional, Tuple
2
3 from discord import Member, Message
4
5
6 async def apply(
7 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
9 """Detects total attachments exceeding the limit sent by a single user."""
10 relevant_messages = [last_message] + [
11 msg
12 for msg in recent_messages
13 if (
14 msg.author == last_message.author
15 and len(msg.attachments) > 0
16 )
17 ]
18 total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
19
20 if total_recent_attachments > config['max']:
21 return (
22 f"sent {total_recent_attachments} attachments in {config['max']}s",
23 (last_message.author,),
24 relevant_messages
25 )
26 return None
27
[end of bot/rules/attachments.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py
--- a/bot/rules/attachments.py
+++ b/bot/rules/attachments.py
@@ -7,14 +7,14 @@
last_message: Message, recent_messages: List[Message], config: Dict[str, int]
) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
"""Detects total attachments exceeding the limit sent by a single user."""
- relevant_messages = [last_message] + [
+ relevant_messages = tuple(
msg
for msg in recent_messages
if (
msg.author == last_message.author
and len(msg.attachments) > 0
)
- ]
+ )
total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
if total_recent_attachments > config['max']:
| {"golden_diff": "diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py\n--- a/bot/rules/attachments.py\n+++ b/bot/rules/attachments.py\n@@ -7,14 +7,14 @@\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n- relevant_messages = [last_message] + [\n+ relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n- ]\n+ )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n \n if total_recent_attachments > config['max']:\n", "issue": "Write unit tests for `bot/rules/mentions.py`\nWrite unit tests for [`bot/rules/mentions.py`](../blob/master/bot/rules/mentions.py).\n\n## Implementation details\nPlease make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.\n\n## Additional information\nIf you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.\n\n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = [last_message] + [\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n ]\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}]} | 954 | 185 |
gh_patches_debug_40605 | rasdani/github-patches | git_diff | prowler-cloud__prowler-2272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: cloudwatch_log_group_retention_policy_specific_days_enabled not measuring "never expire"
### Steps to Reproduce
When having log groups in cloudwatch that are set to "never expire", the mentioned check sees it as "0 days"
### Expected behavior
Check should be able to detect the "never expire" log group retention setting
### Actual Result with Screenshots or Logs


### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 on Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
</issue>
<code>
[start of prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py]
1 from prowler.config.config import get_config_var
2 from prowler.lib.check.models import Check, Check_Report_AWS
3 from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
4
5
6 class cloudwatch_log_group_retention_policy_specific_days_enabled(Check):
7 def execute(self):
8 findings = []
9 specific_retention_days = get_config_var("log_group_retention_days")
10 for log_group in logs_client.log_groups:
11 report = Check_Report_AWS(self.metadata())
12 report.region = log_group.region
13 report.resource_id = log_group.name
14 report.resource_arn = log_group.arn
15 report.resource_tags = log_group.tags
16 if log_group.retention_days < specific_retention_days:
17 report.status = "FAIL"
18 report.status_extended = f"Log Group {log_group.name} has less than {specific_retention_days} days retention period ({log_group.retention_days} days)."
19 else:
20 report.status = "PASS"
21 report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days."
22 findings.append(report)
23 return findings
24
[end of prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py]
[start of prowler/providers/aws/services/cloudwatch/cloudwatch_service.py]
1 import threading
2 from datetime import datetime, timezone
3 from typing import Optional
4
5 from pydantic import BaseModel
6
7 from prowler.lib.logger import logger
8 from prowler.lib.scan_filters.scan_filters import is_resource_filtered
9 from prowler.providers.aws.aws_provider import generate_regional_clients
10
11
12 ################## CloudWatch
13 class CloudWatch:
14 def __init__(self, audit_info):
15 self.service = "cloudwatch"
16 self.session = audit_info.audit_session
17 self.audited_account = audit_info.audited_account
18 self.audit_resources = audit_info.audit_resources
19 self.region = list(
20 generate_regional_clients(
21 self.service, audit_info, global_service=True
22 ).keys()
23 )[0]
24 self.regional_clients = generate_regional_clients(self.service, audit_info)
25 self.metric_alarms = []
26 self.__threading_call__(self.__describe_alarms__)
27 self.__list_tags_for_resource__()
28
29 def __get_session__(self):
30 return self.session
31
32 def __threading_call__(self, call):
33 threads = []
34 for regional_client in self.regional_clients.values():
35 threads.append(threading.Thread(target=call, args=(regional_client,)))
36 for t in threads:
37 t.start()
38 for t in threads:
39 t.join()
40
41 def __describe_alarms__(self, regional_client):
42 logger.info("CloudWatch - Describing alarms...")
43 try:
44 describe_alarms_paginator = regional_client.get_paginator("describe_alarms")
45 for page in describe_alarms_paginator.paginate():
46 for alarm in page["MetricAlarms"]:
47 if not self.audit_resources or (
48 is_resource_filtered(alarm["AlarmArn"], self.audit_resources)
49 ):
50 metric_name = None
51 if "MetricName" in alarm:
52 metric_name = alarm["MetricName"]
53 namespace = None
54 if "Namespace" in alarm:
55 namespace = alarm["Namespace"]
56 self.metric_alarms.append(
57 MetricAlarm(
58 arn=alarm["AlarmArn"],
59 name=alarm["AlarmName"],
60 metric=metric_name,
61 name_space=namespace,
62 region=regional_client.region,
63 )
64 )
65 except Exception as error:
66 logger.error(
67 f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
68 )
69
70 def __list_tags_for_resource__(self):
71 logger.info("CloudWatch - List Tags...")
72 try:
73 for metric_alarm in self.metric_alarms:
74 regional_client = self.regional_clients[metric_alarm.region]
75 response = regional_client.list_tags_for_resource(
76 ResourceARN=metric_alarm.arn
77 )["Tags"]
78 metric_alarm.tags = response
79 except Exception as error:
80 logger.error(
81 f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
82 )
83
84
85 ################## CloudWatch Logs
86 class Logs:
87 def __init__(self, audit_info):
88 self.service = "logs"
89 self.session = audit_info.audit_session
90 self.audited_account = audit_info.audited_account
91 self.audit_resources = audit_info.audit_resources
92 self.regional_clients = generate_regional_clients(self.service, audit_info)
93 self.metric_filters = []
94 self.log_groups = []
95 self.__threading_call__(self.__describe_metric_filters__)
96 self.__threading_call__(self.__describe_log_groups__)
97 if (
98 "cloudwatch_log_group_no_secrets_in_logs"
99 in audit_info.audit_metadata.expected_checks
100 ):
101 self.events_per_log_group_threshold = (
102 1000 # The threshold for number of events to return per log group.
103 )
104 self.__threading_call__(self.__get_log_events__)
105 self.__list_tags_for_resource__()
106
107 def __get_session__(self):
108 return self.session
109
110 def __threading_call__(self, call):
111 threads = []
112 for regional_client in self.regional_clients.values():
113 threads.append(threading.Thread(target=call, args=(regional_client,)))
114 for t in threads:
115 t.start()
116 for t in threads:
117 t.join()
118
119 def __describe_metric_filters__(self, regional_client):
120 logger.info("CloudWatch Logs - Describing metric filters...")
121 try:
122 describe_metric_filters_paginator = regional_client.get_paginator(
123 "describe_metric_filters"
124 )
125 for page in describe_metric_filters_paginator.paginate():
126 for filter in page["metricFilters"]:
127 if not self.audit_resources or (
128 is_resource_filtered(filter["filterName"], self.audit_resources)
129 ):
130 self.metric_filters.append(
131 MetricFilter(
132 name=filter["filterName"],
133 metric=filter["metricTransformations"][0]["metricName"],
134 pattern=filter.get("filterPattern", ""),
135 log_group=filter["logGroupName"],
136 region=regional_client.region,
137 )
138 )
139 except Exception as error:
140 logger.error(
141 f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
142 )
143
144 def __describe_log_groups__(self, regional_client):
145 logger.info("CloudWatch Logs - Describing log groups...")
146 try:
147 describe_log_groups_paginator = regional_client.get_paginator(
148 "describe_log_groups"
149 )
150 for page in describe_log_groups_paginator.paginate():
151 for log_group in page["logGroups"]:
152 if not self.audit_resources or (
153 is_resource_filtered(log_group["arn"], self.audit_resources)
154 ):
155 kms = None
156 retention_days = 0
157 if "kmsKeyId" in log_group:
158 kms = log_group["kmsKeyId"]
159 if "retentionInDays" in log_group:
160 retention_days = log_group["retentionInDays"]
161 self.log_groups.append(
162 LogGroup(
163 arn=log_group["arn"],
164 name=log_group["logGroupName"],
165 retention_days=retention_days,
166 kms_id=kms,
167 region=regional_client.region,
168 )
169 )
170 except Exception as error:
171 logger.error(
172 f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
173 )
174
175 def __get_log_events__(self, regional_client):
176 regional_log_groups = [
177 log_group
178 for log_group in self.log_groups
179 if log_group.region == regional_client.region
180 ]
181 total_log_groups = len(regional_log_groups)
182 logger.info(
183 f"CloudWatch Logs - Retrieving log events for {total_log_groups} log groups in {regional_client.region}..."
184 )
185 try:
186 for count, log_group in enumerate(regional_log_groups, start=1):
187 events = regional_client.filter_log_events(
188 logGroupName=log_group.name,
189 limit=self.events_per_log_group_threshold,
190 )["events"]
191 for event in events:
192 if event["logStreamName"] not in log_group.log_streams:
193 log_group.log_streams[event["logStreamName"]] = []
194 log_group.log_streams[event["logStreamName"]].append(event)
195 if count % 10 == 0:
196 logger.info(
197 f"CloudWatch Logs - Retrieved log events for {count}/{total_log_groups} log groups in {regional_client.region}..."
198 )
199 except Exception as error:
200 logger.error(
201 f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
202 )
203 logger.info(
204 f"CloudWatch Logs - Finished retrieving log events in {regional_client.region}..."
205 )
206
207 def __list_tags_for_resource__(self):
208 logger.info("CloudWatch Logs - List Tags...")
209 try:
210 for log_group in self.log_groups:
211 regional_client = self.regional_clients[log_group.region]
212 response = regional_client.list_tags_log_group(
213 logGroupName=log_group.name
214 )["tags"]
215 log_group.tags = [response]
216 except Exception as error:
217 logger.error(
218 f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
219 )
220
221
222 class MetricAlarm(BaseModel):
223 arn: str
224 name: str
225 metric: Optional[str]
226 name_space: Optional[str]
227 region: str
228 tags: Optional[list] = []
229
230
231 class MetricFilter(BaseModel):
232 name: str
233 metric: str
234 pattern: str
235 log_group: str
236 region: str
237
238
239 class LogGroup(BaseModel):
240 arn: str
241 name: str
242 retention_days: int
243 kms_id: Optional[str]
244 region: str
245 log_streams: dict[
246 str, list[str]
247 ] = {} # Log stream name as the key, array of events as the value
248 tags: Optional[list] = []
249
250
251 def convert_to_cloudwatch_timestamp_format(epoch_time):
252 date_time = datetime.fromtimestamp(
253 epoch_time / 1000, datetime.now(timezone.utc).astimezone().tzinfo
254 )
255 datetime_str = date_time.strftime(
256 "%Y-%m-%dT%H:%M:%S.!%f!%z"
257 ) # use exclamation marks as placeholders to convert datetime str to cloudwatch timestamp str
258 datetime_parts = datetime_str.split("!")
259 return (
260 datetime_parts[0]
261 + datetime_parts[1][:-3]
262 + datetime_parts[2][:-2]
263 + ":"
264 + datetime_parts[2][-2:]
265 ) # Removes the microseconds, and places a ':' character in the timezone offset
266
[end of prowler/providers/aws/services/cloudwatch/cloudwatch_service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py
@@ -13,11 +13,17 @@
report.resource_id = log_group.name
report.resource_arn = log_group.arn
report.resource_tags = log_group.tags
- if log_group.retention_days < specific_retention_days:
+ if (
+ log_group.never_expire is False
+ and log_group.retention_days < specific_retention_days
+ ):
report.status = "FAIL"
report.status_extended = f"Log Group {log_group.name} has less than {specific_retention_days} days retention period ({log_group.retention_days} days)."
else:
report.status = "PASS"
- report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days."
+ if log_group.never_expire is True:
+ report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it never expires."
+ else:
+ report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days."
findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py
@@ -152,17 +152,18 @@
if not self.audit_resources or (
is_resource_filtered(log_group["arn"], self.audit_resources)
):
- kms = None
- retention_days = 0
- if "kmsKeyId" in log_group:
- kms = log_group["kmsKeyId"]
- if "retentionInDays" in log_group:
- retention_days = log_group["retentionInDays"]
+ never_expire = False
+ kms = log_group.get("kmsKeyId")
+ retention_days = log_group.get("retentionInDays")
+ if not retention_days:
+ never_expire = True
+ retention_days = 9999
self.log_groups.append(
LogGroup(
arn=log_group["arn"],
name=log_group["logGroupName"],
retention_days=retention_days,
+ never_expire=never_expire,
kms_id=kms,
region=regional_client.region,
)
@@ -240,6 +241,7 @@
arn: str
name: str
retention_days: int
+ never_expire: bool
kms_id: Optional[str]
region: str
log_streams: dict[
| {"golden_diff": "diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py\n--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py\n+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py\n@@ -13,11 +13,17 @@\n report.resource_id = log_group.name\n report.resource_arn = log_group.arn\n report.resource_tags = log_group.tags\n- if log_group.retention_days < specific_retention_days:\n+ if (\n+ log_group.never_expire is False\n+ and log_group.retention_days < specific_retention_days\n+ ):\n report.status = \"FAIL\"\n report.status_extended = f\"Log Group {log_group.name} has less than {specific_retention_days} days retention period ({log_group.retention_days} days).\"\n else:\n report.status = \"PASS\"\n- report.status_extended = f\"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days.\"\n+ if log_group.never_expire is True:\n+ report.status_extended = f\"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it never expires.\"\n+ else:\n+ report.status_extended = f\"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days.\"\n findings.append(report)\n return findings\ndiff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py\n--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py\n+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py\n@@ -152,17 +152,18 @@\n if not self.audit_resources or (\n is_resource_filtered(log_group[\"arn\"], self.audit_resources)\n ):\n- kms = None\n- retention_days = 0\n- if \"kmsKeyId\" in log_group:\n- kms = log_group[\"kmsKeyId\"]\n- if \"retentionInDays\" in log_group:\n- retention_days = log_group[\"retentionInDays\"]\n+ never_expire = False\n+ kms = log_group.get(\"kmsKeyId\")\n+ retention_days = log_group.get(\"retentionInDays\")\n+ if not retention_days:\n+ never_expire = True\n+ retention_days = 9999\n self.log_groups.append(\n LogGroup(\n arn=log_group[\"arn\"],\n name=log_group[\"logGroupName\"],\n retention_days=retention_days,\n+ never_expire=never_expire,\n kms_id=kms,\n region=regional_client.region,\n )\n@@ -240,6 +241,7 @@\n arn: str\n name: str\n retention_days: int\n+ never_expire: bool\n kms_id: Optional[str]\n region: str\n log_streams: dict[\n", "issue": "[Bug]: cloudwatch_log_group_retention_policy_specific_days_enabled not measuring \"never expire\"\n### Steps to Reproduce\n\nWhen having log groups in cloudwatch that are set to \"never expire\", the mentioned check sees it as \"0 days\"\n\n### Expected behavior\n\nCheck should be able to detect the \"never expire\" log group retention setting\n\n### Actual Result with Screenshots or Logs\n\n\r\n\r\n\n\n### How did you install Prowler?\n\nFrom pip package (pip install prowler)\n\n### Environment Resource\n\nWorkstation\n\n### OS used\n\nWSL2 on Windows 11\n\n### Prowler version\n\nProwler 3.4.0 (it is the latest version, yay!)\n\n### Pip version\n\npip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.config.config import get_config_var\nfrom prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.cloudwatch.logs_client import logs_client\n\n\nclass cloudwatch_log_group_retention_policy_specific_days_enabled(Check):\n def execute(self):\n findings = []\n specific_retention_days = get_config_var(\"log_group_retention_days\")\n for log_group in logs_client.log_groups:\n report = Check_Report_AWS(self.metadata())\n report.region = log_group.region\n report.resource_id = log_group.name\n report.resource_arn = log_group.arn\n report.resource_tags = log_group.tags\n if log_group.retention_days < specific_retention_days:\n report.status = \"FAIL\"\n report.status_extended = f\"Log Group {log_group.name} has less than {specific_retention_days} days retention period ({log_group.retention_days} days).\"\n else:\n report.status = \"PASS\"\n report.status_extended = f\"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days.\"\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py"}, {"content": "import threading\nfrom datetime import datetime, timezone\nfrom typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom prowler.lib.logger import logger\nfrom prowler.lib.scan_filters.scan_filters import is_resource_filtered\nfrom prowler.providers.aws.aws_provider import generate_regional_clients\n\n\n################## CloudWatch\nclass CloudWatch:\n def __init__(self, audit_info):\n self.service = \"cloudwatch\"\n self.session = audit_info.audit_session\n self.audited_account = audit_info.audited_account\n self.audit_resources = audit_info.audit_resources\n self.region = list(\n generate_regional_clients(\n self.service, audit_info, global_service=True\n ).keys()\n )[0]\n self.regional_clients = generate_regional_clients(self.service, audit_info)\n self.metric_alarms = []\n self.__threading_call__(self.__describe_alarms__)\n self.__list_tags_for_resource__()\n\n def __get_session__(self):\n return self.session\n\n def __threading_call__(self, call):\n threads = []\n for regional_client in self.regional_clients.values():\n threads.append(threading.Thread(target=call, args=(regional_client,)))\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n def __describe_alarms__(self, regional_client):\n logger.info(\"CloudWatch - Describing alarms...\")\n try:\n describe_alarms_paginator = regional_client.get_paginator(\"describe_alarms\")\n for page in describe_alarms_paginator.paginate():\n for alarm in page[\"MetricAlarms\"]:\n if not self.audit_resources or (\n is_resource_filtered(alarm[\"AlarmArn\"], self.audit_resources)\n ):\n metric_name = None\n if \"MetricName\" in alarm:\n metric_name = alarm[\"MetricName\"]\n namespace = None\n if \"Namespace\" in alarm:\n namespace = alarm[\"Namespace\"]\n self.metric_alarms.append(\n MetricAlarm(\n arn=alarm[\"AlarmArn\"],\n name=alarm[\"AlarmName\"],\n metric=metric_name,\n name_space=namespace,\n region=regional_client.region,\n )\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __list_tags_for_resource__(self):\n logger.info(\"CloudWatch - List Tags...\")\n try:\n for metric_alarm in self.metric_alarms:\n regional_client = self.regional_clients[metric_alarm.region]\n response = regional_client.list_tags_for_resource(\n ResourceARN=metric_alarm.arn\n )[\"Tags\"]\n metric_alarm.tags = response\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n\n################## CloudWatch Logs\nclass Logs:\n def __init__(self, audit_info):\n self.service = \"logs\"\n self.session = audit_info.audit_session\n self.audited_account = audit_info.audited_account\n self.audit_resources = audit_info.audit_resources\n self.regional_clients = generate_regional_clients(self.service, audit_info)\n self.metric_filters = []\n self.log_groups = []\n self.__threading_call__(self.__describe_metric_filters__)\n self.__threading_call__(self.__describe_log_groups__)\n if (\n \"cloudwatch_log_group_no_secrets_in_logs\"\n in audit_info.audit_metadata.expected_checks\n ):\n self.events_per_log_group_threshold = (\n 1000 # The threshold for number of events to return per log group.\n )\n self.__threading_call__(self.__get_log_events__)\n self.__list_tags_for_resource__()\n\n def __get_session__(self):\n return self.session\n\n def __threading_call__(self, call):\n threads = []\n for regional_client in self.regional_clients.values():\n threads.append(threading.Thread(target=call, args=(regional_client,)))\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n def __describe_metric_filters__(self, regional_client):\n logger.info(\"CloudWatch Logs - Describing metric filters...\")\n try:\n describe_metric_filters_paginator = regional_client.get_paginator(\n \"describe_metric_filters\"\n )\n for page in describe_metric_filters_paginator.paginate():\n for filter in page[\"metricFilters\"]:\n if not self.audit_resources or (\n is_resource_filtered(filter[\"filterName\"], self.audit_resources)\n ):\n self.metric_filters.append(\n MetricFilter(\n name=filter[\"filterName\"],\n metric=filter[\"metricTransformations\"][0][\"metricName\"],\n pattern=filter.get(\"filterPattern\", \"\"),\n log_group=filter[\"logGroupName\"],\n region=regional_client.region,\n )\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __describe_log_groups__(self, regional_client):\n logger.info(\"CloudWatch Logs - Describing log groups...\")\n try:\n describe_log_groups_paginator = regional_client.get_paginator(\n \"describe_log_groups\"\n )\n for page in describe_log_groups_paginator.paginate():\n for log_group in page[\"logGroups\"]:\n if not self.audit_resources or (\n is_resource_filtered(log_group[\"arn\"], self.audit_resources)\n ):\n kms = None\n retention_days = 0\n if \"kmsKeyId\" in log_group:\n kms = log_group[\"kmsKeyId\"]\n if \"retentionInDays\" in log_group:\n retention_days = log_group[\"retentionInDays\"]\n self.log_groups.append(\n LogGroup(\n arn=log_group[\"arn\"],\n name=log_group[\"logGroupName\"],\n retention_days=retention_days,\n kms_id=kms,\n region=regional_client.region,\n )\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __get_log_events__(self, regional_client):\n regional_log_groups = [\n log_group\n for log_group in self.log_groups\n if log_group.region == regional_client.region\n ]\n total_log_groups = len(regional_log_groups)\n logger.info(\n f\"CloudWatch Logs - Retrieving log events for {total_log_groups} log groups in {regional_client.region}...\"\n )\n try:\n for count, log_group in enumerate(regional_log_groups, start=1):\n events = regional_client.filter_log_events(\n logGroupName=log_group.name,\n limit=self.events_per_log_group_threshold,\n )[\"events\"]\n for event in events:\n if event[\"logStreamName\"] not in log_group.log_streams:\n log_group.log_streams[event[\"logStreamName\"]] = []\n log_group.log_streams[event[\"logStreamName\"]].append(event)\n if count % 10 == 0:\n logger.info(\n f\"CloudWatch Logs - Retrieved log events for {count}/{total_log_groups} log groups in {regional_client.region}...\"\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n logger.info(\n f\"CloudWatch Logs - Finished retrieving log events in {regional_client.region}...\"\n )\n\n def __list_tags_for_resource__(self):\n logger.info(\"CloudWatch Logs - List Tags...\")\n try:\n for log_group in self.log_groups:\n regional_client = self.regional_clients[log_group.region]\n response = regional_client.list_tags_log_group(\n logGroupName=log_group.name\n )[\"tags\"]\n log_group.tags = [response]\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n\nclass MetricAlarm(BaseModel):\n arn: str\n name: str\n metric: Optional[str]\n name_space: Optional[str]\n region: str\n tags: Optional[list] = []\n\n\nclass MetricFilter(BaseModel):\n name: str\n metric: str\n pattern: str\n log_group: str\n region: str\n\n\nclass LogGroup(BaseModel):\n arn: str\n name: str\n retention_days: int\n kms_id: Optional[str]\n region: str\n log_streams: dict[\n str, list[str]\n ] = {} # Log stream name as the key, array of events as the value\n tags: Optional[list] = []\n\n\ndef convert_to_cloudwatch_timestamp_format(epoch_time):\n date_time = datetime.fromtimestamp(\n epoch_time / 1000, datetime.now(timezone.utc).astimezone().tzinfo\n )\n datetime_str = date_time.strftime(\n \"%Y-%m-%dT%H:%M:%S.!%f!%z\"\n ) # use exclamation marks as placeholders to convert datetime str to cloudwatch timestamp str\n datetime_parts = datetime_str.split(\"!\")\n return (\n datetime_parts[0]\n + datetime_parts[1][:-3]\n + datetime_parts[2][:-2]\n + \":\"\n + datetime_parts[2][-2:]\n ) # Removes the microseconds, and places a ':' character in the timezone offset\n", "path": "prowler/providers/aws/services/cloudwatch/cloudwatch_service.py"}]} | 3,982 | 736 |
gh_patches_debug_36081 | rasdani/github-patches | git_diff | getredash__redash-221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User can't add another dashboard after creating one without refreshing
</issue>
<code>
[start of redash/tasks.py]
1 import time
2 import datetime
3 import logging
4 import redis
5 from celery import Task
6 from celery.result import AsyncResult
7 from celery.utils.log import get_task_logger
8 from redash import redis_connection, models, statsd_client
9 from redash.utils import gen_query_hash
10 from redash.worker import celery
11 from redash.data.query_runner import get_query_runner
12
13 logger = get_task_logger(__name__)
14
15
16 class BaseTask(Task):
17 abstract = True
18
19 def after_return(self, *args, **kwargs):
20 models.db.close_db(None)
21
22 def __call__(self, *args, **kwargs):
23 models.db.connect_db()
24 return super(BaseTask, self).__call__(*args, **kwargs)
25
26
27 class QueryTask(object):
28 MAX_RETRIES = 5
29
30 # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
31 STATUSES = {
32 'PENDING': 1,
33 'STARTED': 2,
34 'SUCCESS': 3,
35 'FAILURE': 4,
36 'REVOKED': 4
37 }
38
39 def __init__(self, job_id=None, async_result=None):
40 if async_result:
41 self._async_result = async_result
42 else:
43 self._async_result = AsyncResult(job_id, app=celery)
44
45 @property
46 def id(self):
47 return self._async_result.id
48
49 @classmethod
50 def add_task(cls, query, data_source, scheduled=False):
51 query_hash = gen_query_hash(query)
52 logging.info("[Manager][%s] Inserting job", query_hash)
53 try_count = 0
54 job = None
55
56 while try_count < cls.MAX_RETRIES:
57 try_count += 1
58
59 pipe = redis_connection.pipeline()
60 try:
61 pipe.watch('query_hash_job:%s' % query_hash)
62 job_id = pipe.get('query_hash_job:%s' % query_hash)
63 if job_id:
64 logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
65
66 job = cls(job_id=job_id)
67 else:
68 pipe.multi()
69
70 if scheduled:
71 queue_name = data_source.queue_name
72 else:
73 queue_name = data_source.scheduled_queue_name
74
75 result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)
76 job = cls(async_result=result)
77 logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
78 pipe.set('query_hash_job:%s' % query_hash, job.id)
79 pipe.execute()
80 break
81
82 except redis.WatchError:
83 continue
84
85 if not job:
86 logging.error("[Manager][%s] Failed adding job for query.", query_hash)
87
88 return job
89
90 def to_dict(self):
91 if self._async_result.status == 'STARTED':
92 updated_at = self._async_result.result.get('start_time', 0)
93 else:
94 updated_at = 0
95
96 if self._async_result.failed() and isinstance(self._async_result.result, Exception):
97 error = self._async_result.result.message
98 elif self._async_result.status == 'REVOKED':
99 error = 'Query execution cancelled.'
100 else:
101 error = ''
102
103 if self._async_result.successful():
104 query_result_id = self._async_result.result
105 else:
106 query_result_id = None
107
108 return {
109 'id': self._async_result.id,
110 'updated_at': updated_at,
111 'status': self.STATUSES[self._async_result.status],
112 'error': error,
113 'query_result_id': query_result_id,
114 }
115
116 def cancel(self):
117 return self._async_result.revoke(terminate=True)
118
119
120 @celery.task(base=BaseTask)
121 def refresh_queries():
122 # self.status['last_refresh_at'] = time.time()
123 # self._save_status()
124
125 logger.info("Refreshing queries...")
126
127 outdated_queries_count = 0
128 for query in models.Query.outdated_queries():
129 # TODO: this should go into lower priority
130 QueryTask.add_task(query.query, query.data_source, scheduled=True)
131 outdated_queries_count += 1
132
133 statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
134 # TODO: decide if we still need this
135 # statsd_client.gauge('manager.queue_size', self.redis_connection.zcard('jobs'))
136
137 logger.info("Done refreshing queries. Found %d outdated queries." % outdated_queries_count)
138
139 status = redis_connection.hgetall('redash:status')
140 now = time.time()
141
142 redis_connection.hmset('redash:status', {
143 'outdated_queries_count': outdated_queries_count,
144 'last_refresh_at': now
145 })
146
147 statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
148
149 @celery.task(bind=True, base=BaseTask, track_started=True)
150 def execute_query(self, query, data_source_id):
151 # TODO: maybe this should be a class?
152 start_time = time.time()
153
154 logger.info("Loading data source (%d)...", data_source_id)
155
156 # TODO: we should probably cache data sources in Redis
157 data_source = models.DataSource.get_by_id(data_source_id)
158
159 self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})
160
161 logger.info("Executing query:\n%s", query)
162
163 query_hash = gen_query_hash(query)
164 query_runner = get_query_runner(data_source.type, data_source.options)
165
166 if getattr(query_runner, 'annotate_query', True):
167 # TODO: anotate with queu ename
168 annotated_query = "/* Task Id: %s, Query hash: %s */ %s" % \
169 (self.request.id, query_hash, query)
170 else:
171 annotated_query = query
172
173 with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):
174 data, error = query_runner(annotated_query)
175
176 run_time = time.time() - start_time
177 logger.info("Query finished... data length=%s, error=%s", data and len(data), error)
178
179 self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
180
181 # TODO: it is possible that storing the data will fail, and we will need to retry
182 # while we already marked the job as done
183 # Delete query_hash
184 redis_connection.delete('query_hash_job:%s' % query_hash)
185
186 if not error:
187 query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
188 else:
189 raise Exception(error)
190
191 return query_result.id
192
193
[end of redash/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/tasks.py b/redash/tasks.py
--- a/redash/tasks.py
+++ b/redash/tasks.py
@@ -58,8 +58,8 @@
pipe = redis_connection.pipeline()
try:
- pipe.watch('query_hash_job:%s' % query_hash)
- job_id = pipe.get('query_hash_job:%s' % query_hash)
+ pipe.watch(cls._job_lock_id(query_hash, data_source.id))
+ job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)
@@ -75,7 +75,7 @@
result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)
job = cls(async_result=result)
logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
- pipe.set('query_hash_job:%s' % query_hash, job.id)
+ pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id)
pipe.execute()
break
@@ -116,6 +116,9 @@
def cancel(self):
return self._async_result.revoke(terminate=True)
+ @staticmethod
+ def _job_lock_id(query_hash, data_source_id):
+ return "query_hash_job:%s:%s" % (data_source_id, query_hash)
@celery.task(base=BaseTask)
def refresh_queries():
@@ -178,11 +181,11 @@
self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})
- # TODO: it is possible that storing the data will fail, and we will need to retry
- # while we already marked the job as done
# Delete query_hash
- redis_connection.delete('query_hash_job:%s' % query_hash)
+ redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
+ # TODO: it is possible that storing the data will fail, and we will need to retry
+ # while we already marked the job as done
if not error:
query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())
else:
| {"golden_diff": "diff --git a/redash/tasks.py b/redash/tasks.py\n--- a/redash/tasks.py\n+++ b/redash/tasks.py\n@@ -58,8 +58,8 @@\n \n pipe = redis_connection.pipeline()\n try:\n- pipe.watch('query_hash_job:%s' % query_hash)\n- job_id = pipe.get('query_hash_job:%s' % query_hash)\n+ pipe.watch(cls._job_lock_id(query_hash, data_source.id))\n+ job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))\n if job_id:\n logging.info(\"[Manager][%s] Found existing job: %s\", query_hash, job_id)\n \n@@ -75,7 +75,7 @@\n result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)\n job = cls(async_result=result)\n logging.info(\"[Manager][%s] Created new job: %s\", query_hash, job.id)\n- pipe.set('query_hash_job:%s' % query_hash, job.id)\n+ pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id)\n pipe.execute()\n break\n \n@@ -116,6 +116,9 @@\n def cancel(self):\n return self._async_result.revoke(terminate=True)\n \n+ @staticmethod\n+ def _job_lock_id(query_hash, data_source_id):\n+ return \"query_hash_job:%s:%s\" % (data_source_id, query_hash)\n \n @celery.task(base=BaseTask)\n def refresh_queries():\n@@ -178,11 +181,11 @@\n \n self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})\n \n- # TODO: it is possible that storing the data will fail, and we will need to retry\n- # while we already marked the job as done\n # Delete query_hash\n- redis_connection.delete('query_hash_job:%s' % query_hash)\n+ redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))\n \n+ # TODO: it is possible that storing the data will fail, and we will need to retry\n+ # while we already marked the job as done\n if not error:\n query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())\n else:\n", "issue": "User can't add another dashboard after creating one without refreshing\n\n", "before_files": [{"content": "import time\nimport datetime\nimport logging\nimport redis\nfrom celery import Task\nfrom celery.result import AsyncResult\nfrom celery.utils.log import get_task_logger\nfrom redash import redis_connection, models, statsd_client\nfrom redash.utils import gen_query_hash\nfrom redash.worker import celery\nfrom redash.data.query_runner import get_query_runner\n\nlogger = get_task_logger(__name__)\n\n\nclass BaseTask(Task):\n abstract = True\n\n def after_return(self, *args, **kwargs):\n models.db.close_db(None)\n\n def __call__(self, *args, **kwargs):\n models.db.connect_db()\n return super(BaseTask, self).__call__(*args, **kwargs)\n\n\nclass QueryTask(object):\n MAX_RETRIES = 5\n\n # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this\n STATUSES = {\n 'PENDING': 1,\n 'STARTED': 2,\n 'SUCCESS': 3,\n 'FAILURE': 4,\n 'REVOKED': 4\n }\n\n def __init__(self, job_id=None, async_result=None):\n if async_result:\n self._async_result = async_result\n else:\n self._async_result = AsyncResult(job_id, app=celery)\n\n @property\n def id(self):\n return self._async_result.id\n\n @classmethod\n def add_task(cls, query, data_source, scheduled=False):\n query_hash = gen_query_hash(query)\n logging.info(\"[Manager][%s] Inserting job\", query_hash)\n try_count = 0\n job = None\n\n while try_count < cls.MAX_RETRIES:\n try_count += 1\n\n pipe = redis_connection.pipeline()\n try:\n pipe.watch('query_hash_job:%s' % query_hash)\n job_id = pipe.get('query_hash_job:%s' % query_hash)\n if job_id:\n logging.info(\"[Manager][%s] Found existing job: %s\", query_hash, job_id)\n\n job = cls(job_id=job_id)\n else:\n pipe.multi()\n\n if scheduled:\n queue_name = data_source.queue_name\n else:\n queue_name = data_source.scheduled_queue_name\n\n result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name)\n job = cls(async_result=result)\n logging.info(\"[Manager][%s] Created new job: %s\", query_hash, job.id)\n pipe.set('query_hash_job:%s' % query_hash, job.id)\n pipe.execute()\n break\n\n except redis.WatchError:\n continue\n\n if not job:\n logging.error(\"[Manager][%s] Failed adding job for query.\", query_hash)\n\n return job\n\n def to_dict(self):\n if self._async_result.status == 'STARTED':\n updated_at = self._async_result.result.get('start_time', 0)\n else:\n updated_at = 0\n\n if self._async_result.failed() and isinstance(self._async_result.result, Exception):\n error = self._async_result.result.message\n elif self._async_result.status == 'REVOKED':\n error = 'Query execution cancelled.'\n else:\n error = ''\n\n if self._async_result.successful():\n query_result_id = self._async_result.result\n else:\n query_result_id = None\n\n return {\n 'id': self._async_result.id,\n 'updated_at': updated_at,\n 'status': self.STATUSES[self._async_result.status],\n 'error': error,\n 'query_result_id': query_result_id,\n }\n\n def cancel(self):\n return self._async_result.revoke(terminate=True)\n\n\[email protected](base=BaseTask)\ndef refresh_queries():\n # self.status['last_refresh_at'] = time.time()\n # self._save_status()\n\n logger.info(\"Refreshing queries...\")\n\n outdated_queries_count = 0\n for query in models.Query.outdated_queries():\n # TODO: this should go into lower priority\n QueryTask.add_task(query.query, query.data_source, scheduled=True)\n outdated_queries_count += 1\n\n statsd_client.gauge('manager.outdated_queries', outdated_queries_count)\n # TODO: decide if we still need this\n # statsd_client.gauge('manager.queue_size', self.redis_connection.zcard('jobs'))\n\n logger.info(\"Done refreshing queries. Found %d outdated queries.\" % outdated_queries_count)\n\n status = redis_connection.hgetall('redash:status')\n now = time.time()\n\n redis_connection.hmset('redash:status', {\n 'outdated_queries_count': outdated_queries_count,\n 'last_refresh_at': now\n })\n\n statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))\n\[email protected](bind=True, base=BaseTask, track_started=True)\ndef execute_query(self, query, data_source_id):\n # TODO: maybe this should be a class?\n start_time = time.time()\n\n logger.info(\"Loading data source (%d)...\", data_source_id)\n\n # TODO: we should probably cache data sources in Redis\n data_source = models.DataSource.get_by_id(data_source_id)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'custom_message': ''})\n\n logger.info(\"Executing query:\\n%s\", query)\n\n query_hash = gen_query_hash(query)\n query_runner = get_query_runner(data_source.type, data_source.options)\n\n if getattr(query_runner, 'annotate_query', True):\n # TODO: anotate with queu ename\n annotated_query = \"/* Task Id: %s, Query hash: %s */ %s\" % \\\n (self.request.id, query_hash, query)\n else:\n annotated_query = query\n\n with statsd_client.timer('query_runner.{}.{}.run_time'.format(data_source.type, data_source.name)):\n data, error = query_runner(annotated_query)\n\n run_time = time.time() - start_time\n logger.info(\"Query finished... data length=%s, error=%s\", data and len(data), error)\n\n self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''})\n\n # TODO: it is possible that storing the data will fail, and we will need to retry\n # while we already marked the job as done\n # Delete query_hash\n redis_connection.delete('query_hash_job:%s' % query_hash)\n\n if not error:\n query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow())\n else:\n raise Exception(error)\n\n return query_result.id\n\n", "path": "redash/tasks.py"}]} | 2,495 | 529 |
gh_patches_debug_5761 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1456 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Data error for Aruba
For a few days in a row now (possibly longer), data for the island country of Aruba has been offline. I went to check back on the source: https://www.webaruba.com/ and saw that the figures for electricity generation under the "Aruba's renewable energy monitor" block giving 0 MW for everything.

When I click on [View ->], it takes me to [this page](https://webaruba.com/renewable-energy-dashboard/aruba)

Real-time data for Aruba's electricity generation is present there but not on the front page.
</issue>
<code>
[start of parsers/AW.py]
1 #!/usr/bin/env python3
2
3 import arrow
4 import requests
5 import datetime
6
7
8 def fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 r = session or requests.session()
13 url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'
14 # User agent is mandatory or services answers 404
15 headers = {'user-agent': 'electricitymap.org'}
16 response = r.get(url, headers=headers)
17 aruba_json = response.json()
18 top_data = aruba_json['dashboard_top_data']
19
20 # Values currenlty used from service
21 fossil = top_data['Fossil']
22 wind = top_data['Wind']
23 solar = top_data['TotalSolar']
24
25 # We're using Fossil data to get timestamp in correct time zone
26 local_date_time = datetime.datetime.strptime(fossil['timestamp'], "%Y-%m-%d %H:%M:%S.%f")
27 zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')
28
29 data = {
30 'zoneKey': zone_key,
31 'datetime': zone_date_time.datetime,
32 'production': {
33 'oil': fossil['value'],
34 'wind': wind['value'],
35 'solar': solar['value'],
36 },
37 'storage': {},
38 'source': 'webaruba.com',
39 }
40
41 return data
42
43
44 if __name__ == '__main__':
45 print(fetch_production())
46
[end of parsers/AW.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/AW.py b/parsers/AW.py
--- a/parsers/AW.py
+++ b/parsers/AW.py
@@ -30,9 +30,9 @@
'zoneKey': zone_key,
'datetime': zone_date_time.datetime,
'production': {
- 'oil': fossil['value'],
- 'wind': wind['value'],
- 'solar': solar['value'],
+ 'oil': float(fossil['value']),
+ 'wind': float(wind['value']),
+ 'solar': float(solar['value']),
},
'storage': {},
'source': 'webaruba.com',
| {"golden_diff": "diff --git a/parsers/AW.py b/parsers/AW.py\n--- a/parsers/AW.py\n+++ b/parsers/AW.py\n@@ -30,9 +30,9 @@\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n- 'oil': fossil['value'],\n- 'wind': wind['value'],\n- 'solar': solar['value'],\n+ 'oil': float(fossil['value']),\n+ 'wind': float(wind['value']),\n+ 'solar': float(solar['value']),\n },\n 'storage': {},\n 'source': 'webaruba.com',\n", "issue": "Data error for Aruba\nFor a few days in a row now (possibly longer), data for the island country of Aruba has been offline. I went to check back on the source: https://www.webaruba.com/ and saw that the figures for electricity generation under the \"Aruba's renewable energy monitor\" block giving 0 MW for everything.\r\n\r\n\r\n\r\nWhen I click on [View ->], it takes me to [this page](https://webaruba.com/renewable-energy-dashboard/aruba)\r\n\r\n\r\n\r\nReal-time data for Aruba's electricity generation is present there but not on the front page.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport requests\nimport datetime\n\n\ndef fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n r = session or requests.session()\n url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'\n # User agent is mandatory or services answers 404\n headers = {'user-agent': 'electricitymap.org'}\n response = r.get(url, headers=headers)\n aruba_json = response.json()\n top_data = aruba_json['dashboard_top_data']\n\n # Values currenlty used from service\n fossil = top_data['Fossil']\n wind = top_data['Wind']\n solar = top_data['TotalSolar']\n\n # We're using Fossil data to get timestamp in correct time zone\n local_date_time = datetime.datetime.strptime(fossil['timestamp'], \"%Y-%m-%d %H:%M:%S.%f\")\n zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n 'oil': fossil['value'],\n 'wind': wind['value'],\n 'solar': solar['value'],\n },\n 'storage': {},\n 'source': 'webaruba.com',\n }\n\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AW.py"}]} | 1,203 | 145 |
gh_patches_debug_25582 | rasdani/github-patches | git_diff | cal-itp__benefits-211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Send Content-Security-Policy header
The Content-Security-Policy (CSP) header (with the `frame-ancestors` directive) replaces the now deprecated `X-Frame-Options` header, to instruct the browser about appropriate actions to perform if a site is included inside an `<iframe>`.
See more at https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
We already have Django's built-in [Clickjacking/`X-Frame-Options` features enabled](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py#L52). Since this app should never be run from an `<iframe>`, let's create another Middleware that sets the CSP header like so:
```
Content-Security-Policy: default-src 'self'; frame-ancestors 'none';
```
</issue>
<code>
[start of benefits/settings.py]
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8
9 # SECURITY WARNING: keep the secret key used in production secret!
10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
11
12 # SECURITY WARNING: don't run with debug turned on in production!
13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
14
15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
16
17 ALLOWED_HOSTS = []
18
19 if DEBUG:
20 ALLOWED_HOSTS.extend(["*"])
21 else:
22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split(",")
23 ALLOWED_HOSTS.extend(hosts)
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 ]
34
35 if ADMIN:
36 INSTALLED_APPS.extend(
37 [
38 "django.contrib.admin",
39 "django.contrib.auth",
40 "django.contrib.contenttypes",
41 "django.contrib.messages",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.middleware.locale.LocaleMiddleware",
49 "benefits.core.middleware.Healthcheck",
50 "django.middleware.common.CommonMiddleware",
51 "django.middleware.csrf.CsrfViewMiddleware",
52 "django.middleware.clickjacking.XFrameOptionsMiddleware",
53 "benefits.core.middleware.DebugSession",
54 "benefits.core.middleware.ChangedLanguageEvent",
55 ]
56
57 if ADMIN:
58 MIDDLEWARE.extend(
59 [
60 "django.contrib.auth.middleware.AuthenticationMiddleware",
61 "django.contrib.messages.middleware.MessageMiddleware",
62 ]
63 )
64
65 CSRF_COOKIE_AGE = None
66 CSRF_COOKIE_SAMESITE = "Strict"
67 CSRF_COOKIE_HTTPONLY = True
68
69 SESSION_COOKIE_SAMESITE = "Strict"
70 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
71 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
72
73 if not DEBUG:
74 CSRF_COOKIE_SECURE = True
75 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
76 SESSION_COOKIE_SECURE = True
77
78 ROOT_URLCONF = "benefits.urls"
79
80 template_ctx_processors = [
81 "django.template.context_processors.request",
82 "benefits.core.context_processors.analytics",
83 ]
84
85 if DEBUG:
86 template_ctx_processors.extend(
87 [
88 "django.template.context_processors.debug",
89 "benefits.core.context_processors.debug",
90 ]
91 )
92
93 if ADMIN:
94 template_ctx_processors.extend(
95 [
96 "django.contrib.auth.context_processors.auth",
97 "django.contrib.messages.context_processors.messages",
98 ]
99 )
100
101 TEMPLATES = [
102 {
103 "BACKEND": "django.template.backends.django.DjangoTemplates",
104 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
105 "APP_DIRS": True,
106 "OPTIONS": {
107 "context_processors": template_ctx_processors,
108 },
109 },
110 ]
111
112 WSGI_APPLICATION = "benefits.wsgi.application"
113
114 DATABASES = {
115 "default": {
116 "ENGINE": "django.db.backends.sqlite3",
117 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
118 }
119 }
120
121 # Password validation
122
123 AUTH_PASSWORD_VALIDATORS = []
124
125 if ADMIN:
126 AUTH_PASSWORD_VALIDATORS.extend(
127 [
128 {
129 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
130 },
131 {
132 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
133 },
134 {
135 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
136 },
137 {
138 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
139 },
140 ]
141 )
142
143 # Internationalization
144
145 LANGUAGE_CODE = "en"
146
147 LANGUAGE_COOKIE_HTTPONLY = True
148 LANGUAGE_COOKIE_SAMESITE = "Strict"
149 LANGUAGE_COOKIE_SECURE = True
150
151 LANGUAGES = [("en", "English"), ("es", "Español")]
152
153 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
154
155 USE_I18N = True
156 USE_L10N = True
157
158 TIME_ZONE = "UTC"
159 USE_TZ = True
160
161 # Static files (CSS, JavaScript, Images)
162
163 STATIC_URL = "/static/"
164 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
165 STATIC_ROOT = os.path.join(BASE_DIR, "static")
166
167 # Logging configuration
168
169 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
170 LOGGING = {
171 "version": 1,
172 "disable_existing_loggers": False,
173 "formatters": {
174 "default": {
175 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
176 "datefmt": "%d/%b/%Y %H:%M:%S",
177 "style": "{",
178 },
179 },
180 "handlers": {
181 "default": {"class": "logging.StreamHandler", "formatter": "default"},
182 },
183 "root": {
184 "handlers": ["default"],
185 "level": LOG_LEVEL,
186 },
187 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
188 }
189
190 # Analytics configuration
191
192 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
193
[end of benefits/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -50,6 +50,7 @@
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
+ "csp.middleware.CSPMiddleware",
"benefits.core.middleware.DebugSession",
"benefits.core.middleware.ChangedLanguageEvent",
]
@@ -188,3 +189,35 @@
# Analytics configuration
ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
+
+# Content Security Policy
+# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
+
+# In particular, note that the inner single-quotes are required!
+# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
+
+CSP_DEFAULT_SRC = ["'self'"]
+
+CSP_CONNECT_SRC = ["'self'", "https://api.amplitude.com/"]
+
+CSP_FONT_SRC = ["https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
+
+CSP_FRAME_ANCESTORS = ["'none'"]
+CSP_FRAME_SRC = ["'none'"]
+
+CSP_SCRIPT_SRC_ELEM = [
+ "'unsafe-inline'",
+ "https://california.azureedge.net/cdt/statetemplate/",
+ "https://cdn.amplitude.com/libs/",
+ "https://code.jquery.com/",
+ "*.littlepay.com",
+]
+
+CSP_STYLE_SRC = ["'unsafe-inline'"]
+
+CSP_STYLE_SRC_ELEM = [
+ "'self'",
+ "'unsafe-inline'",
+ "https://california.azureedge.net/cdt/statetemplate/",
+ "https://fonts.googleapis.com/css",
+]
| {"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -50,6 +50,7 @@\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n+ \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n ]\n@@ -188,3 +189,35 @@\n # Analytics configuration\n \n ANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n+\n+# Content Security Policy\n+# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n+\n+# In particular, note that the inner single-quotes are required!\n+# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n+\n+CSP_DEFAULT_SRC = [\"'self'\"]\n+\n+CSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\n+\n+CSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n+\n+CSP_FRAME_ANCESTORS = [\"'none'\"]\n+CSP_FRAME_SRC = [\"'none'\"]\n+\n+CSP_SCRIPT_SRC_ELEM = [\n+ \"'unsafe-inline'\",\n+ \"https://california.azureedge.net/cdt/statetemplate/\",\n+ \"https://cdn.amplitude.com/libs/\",\n+ \"https://code.jquery.com/\",\n+ \"*.littlepay.com\",\n+]\n+\n+CSP_STYLE_SRC = [\"'unsafe-inline'\"]\n+\n+CSP_STYLE_SRC_ELEM = [\n+ \"'self'\",\n+ \"'unsafe-inline'\",\n+ \"https://california.azureedge.net/cdt/statetemplate/\",\n+ \"https://fonts.googleapis.com/css\",\n+]\n", "issue": "Send Content-Security-Policy header\nThe Content-Security-Policy (CSP) header (with the `frame-ancestors` directive) replaces the now deprecated `X-Frame-Options` header, to instruct the browser about appropriate actions to perform if a site is included inside an `<iframe>`.\r\n\r\nSee more at https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\r\n\r\nWe already have Django's built-in [Clickjacking/`X-Frame-Options` features enabled](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py#L52). Since this app should never be run from an `<iframe>`, let's create another Middleware that sets the CSP header like so:\r\n\r\n```\r\nContent-Security-Policy: default-src 'self'; frame-ancestors 'none';\r\n```\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}]} | 2,319 | 397 |
gh_patches_debug_49143 | rasdani/github-patches | git_diff | vyperlang__vyper-2513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test fail with web3.py 5.21.0
### Version Information
* vyper Version (output of `vyper --version`): latest master (cff69d63)
* OS: macos
* Python Version (output of `python --version`): 3.9.6
### What's your issue about?
tests fail
tests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...
FAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...
FAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...
FAILED tests/parser/features/test_assert.py::test_assest_reason_revert
misses the string "execution reverted"
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import os
4 import subprocess
5
6 from setuptools import find_packages, setup
7
8 __version__ = "0.3.0"
9
10 extras_require = {
11 "test": [
12 "pytest>=5.4,<6.0",
13 "pytest-cov>=2.10,<3.0",
14 "pytest-instafail>=0.4,<1.0",
15 "pytest-xdist>=1.32,<2.0",
16 "eth-tester[py-evm]>=0.5.0b1,<0.6",
17 "py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
18 "web3==5.12.3",
19 "tox>=3.15,<4.0",
20 "lark-parser==0.10.0",
21 "hypothesis[lark]>=5.37.1,<6.0",
22 ],
23 "lint": [
24 "black==21.9b0",
25 "flake8==3.9.2",
26 "flake8-bugbear==20.1.4",
27 "flake8-use-fstring==1.1",
28 "isort==5.9.3",
29 "mypy==0.910",
30 ],
31 "docs": ["recommonmark", "sphinx>=3.0,<4.0", "sphinx_rtd_theme>=0.5,<0.6"],
32 "dev": ["ipython", "pre-commit", "pyinstaller", "twine"],
33 }
34
35 extras_require["dev"] = (
36 extras_require["test"] + extras_require["lint"] + extras_require["docs"] + extras_require["dev"]
37 )
38
39 hash_file_rel_path = os.path.join("vyper", "vyper_git_version.txt")
40 hashfile = os.path.relpath(hash_file_rel_path)
41
42 try:
43 commithash = subprocess.check_output("git rev-parse HEAD".split())
44 commithash_str = commithash.decode("utf-8").strip()
45 with open(hashfile, "w") as fh:
46 fh.write(f"{__version__}\n{commithash_str}")
47 except subprocess.CalledProcessError:
48 pass
49
50 with open("README.md", "r") as f:
51 long_description = f.read()
52
53 setup(
54 name="vyper",
55 version=__version__,
56 description="Vyper: the Pythonic Programming Language for the EVM",
57 long_description=long_description,
58 long_description_content_type="text/markdown",
59 author="Vyper Team",
60 author_email="",
61 url="https://github.com/vyperlang/vyper",
62 license="Apache License 2.0",
63 keywords="ethereum evm smart contract language",
64 include_package_data=True,
65 packages=find_packages(exclude=("tests", "docs")),
66 python_requires=">=3.7,<3.10",
67 py_modules=["vyper"],
68 install_requires=[
69 "asttokens==2.0.4",
70 "pycryptodome>=3.5.1,<4",
71 "semantic-version==2.8.5",
72 "cached-property==1.5.2 ; python_version<'3.8'",
73 ],
74 setup_requires=["pytest-runner"],
75 tests_require=extras_require["test"],
76 extras_require=extras_require,
77 entry_points={
78 "console_scripts": [
79 "vyper=vyper.cli.vyper_compile:_parse_cli_args",
80 "vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
81 "vyper-lll=vyper.cli.vyper_lll:_parse_cli_args",
82 "vyper-json=vyper.cli.vyper_json:_parse_cli_args",
83 ]
84 },
85 classifiers=[
86 "Intended Audience :: Developers",
87 "License :: OSI Approved :: Apache Software License",
88 "Programming Language :: Python :: 3.7",
89 "Programming Language :: Python :: 3.8",
90 "Programming Language :: Python :: 3.9",
91 ],
92 data_files=[("", [hash_file_rel_path])],
93 )
94
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"pytest-xdist>=1.32,<2.0",
"eth-tester[py-evm]>=0.5.0b1,<0.6",
"py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
- "web3==5.12.3",
+ "web3==5.21.0",
"tox>=3.15,<4.0",
"lark-parser==0.10.0",
"hypothesis[lark]>=5.37.1,<6.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n- \"web3==5.12.3\",\n+ \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n", "issue": "test fail with web3.py 5.21.0\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master (cff69d63)\r\n* OS: macos\r\n* Python Version (output of `python --version`): 3.9.6\r\n\r\n### What's your issue about?\r\n\r\ntests fail\r\n\r\ntests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...\r\nFAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...\r\nFAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...\r\nFAILED tests/parser/features/test_assert.py::test_assest_reason_revert\r\n\r\nmisses the string \"execution reverted\"\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.12.3\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}]} | 1,766 | 178 |
gh_patches_debug_10810 | rasdani/github-patches | git_diff | sunpy__sunpy-4430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong plot in goes hek m25 example
<!--
We know asking good questions takes effort, and we appreciate your time.
Thank you.
Please be aware that everyone has to follow our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst
Also that these comments are hidden when you submit this github issue.
Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue!
-->
### Description
<!-- Provide a general description of the bug. -->
The plot in timeseries example gallery of sunpy for goes and hek flare data looks very different when same code is run on master.
### Expected behavior
<!-- What did you expect to happen. -->
It should look like this
https://docs.sunpy.org/en/stable/_images/sphx_glr_goes_hek_m25_001.png
### Actual behavior
<!--
What actually happened.
Was the output confusing or poorly described?
-->

### Steps to Reproduce
<!--
Please include **code** that reproduces the issue whenever possible.
The best reproductions are self-contained scripts with minimal dependencies.
-->
https://docs.sunpy.org/en/stable/generated/gallery/time_series/goes_hek_m25.html#sphx-glr-generated-gallery-time-series-goes-hek-m25-py run this example.
### System Details
<!--
We at least need to know the sunpy version you are using.
We provide a short function (``sunpy.util.system_info()``) that will provide most of the below information.
This step is optional but strongly recommended.
-->
- SunPy Version: master, 2.0.1, 2.0.rc2 all are giving error
- Astropy Version: 4.0.1.post1
- Python Version: 3.8.5, 3.7.5 both were giving error
- OS information: Ubuntu 18.04 LTS
</issue>
<code>
[start of examples/time_series/goes_hek_m25.py]
1 """
2 ==============================
3 Flare times on a GOES XRS plot
4 ==============================
5
6 How to plot flare times as provided by the HEK on a GOES XRS plot.
7 """
8 import matplotlib.pyplot as plt
9
10 from sunpy.net import Fido
11 from sunpy.net import attrs as a
12 from sunpy.net import hek
13 from sunpy.time import TimeRange, parse_time
14 from sunpy.timeseries import TimeSeries
15
16 ###############################################################################
17 # Let's first grab GOES XRS data for a particular time of interest
18 tr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])
19 results = Fido.search(a.Time(tr), a.Instrument.xrs)
20
21 ###############################################################################
22 # Then download the data and load it into a TimeSeries
23 files = Fido.fetch(results)
24 goes = TimeSeries(files)
25
26 ###############################################################################
27 # Next lets grab the HEK flare data for this time from the NOAA Space Weather
28 # Prediction Center (SWPC)
29 client = hek.HEKClient()
30 flares_hek = client.search(hek.attrs.Time(tr.start, tr.end),
31 hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')
32
33 ###############################################################################
34 # Lets plot everything together
35 fig, ax = plt.subplots()
36 goes.plot()
37 ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
38 ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
39 parse_time(flares_hek[0].get('event_endtime')).plot_date,
40 alpha=0.2, label=flares_hek[0].get('fl_goescls'))
41 ax.legend(loc=2)
42 ax.set_yscale('log')
43 plt.show()
44
[end of examples/time_series/goes_hek_m25.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/time_series/goes_hek_m25.py b/examples/time_series/goes_hek_m25.py
--- a/examples/time_series/goes_hek_m25.py
+++ b/examples/time_series/goes_hek_m25.py
@@ -34,9 +34,9 @@
# Lets plot everything together
fig, ax = plt.subplots()
goes.plot()
-ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
-ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
- parse_time(flares_hek[0].get('event_endtime')).plot_date,
+ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)
+ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,
+ parse_time(flares_hek[0].get('event_endtime')).datetime,
alpha=0.2, label=flares_hek[0].get('fl_goescls'))
ax.legend(loc=2)
ax.set_yscale('log')
| {"golden_diff": "diff --git a/examples/time_series/goes_hek_m25.py b/examples/time_series/goes_hek_m25.py\n--- a/examples/time_series/goes_hek_m25.py\n+++ b/examples/time_series/goes_hek_m25.py\n@@ -34,9 +34,9 @@\n # Lets plot everything together\n fig, ax = plt.subplots()\n goes.plot()\n-ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)\n-ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,\n- parse_time(flares_hek[0].get('event_endtime')).plot_date,\n+ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)\n+ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,\n+ parse_time(flares_hek[0].get('event_endtime')).datetime,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\n ax.legend(loc=2)\n ax.set_yscale('log')\n", "issue": "Wrong plot in goes hek m25 example\n<!--\r\nWe know asking good questions takes effort, and we appreciate your time.\r\nThank you.\r\n\r\nPlease be aware that everyone has to follow our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst\r\n\r\nAlso that these comments are hidden when you submit this github issue.\r\n\r\nPlease have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue!\r\n-->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nThe plot in timeseries example gallery of sunpy for goes and hek flare data looks very different when same code is run on master.\r\n\r\n### Expected behavior\r\n<!-- What did you expect to happen. -->\r\nIt should look like this\r\nhttps://docs.sunpy.org/en/stable/_images/sphx_glr_goes_hek_m25_001.png\r\n\r\n### Actual behavior\r\n<!--\r\nWhat actually happened.\r\nWas the output confusing or poorly described?\r\n-->\r\n\r\n### Steps to Reproduce\r\n<!--\r\nPlease include **code** that reproduces the issue whenever possible.\r\nThe best reproductions are self-contained scripts with minimal dependencies.\r\n-->\r\n\r\nhttps://docs.sunpy.org/en/stable/generated/gallery/time_series/goes_hek_m25.html#sphx-glr-generated-gallery-time-series-goes-hek-m25-py run this example.\r\n\r\n### System Details\r\n<!--\r\nWe at least need to know the sunpy version you are using.\r\nWe provide a short function (``sunpy.util.system_info()``) that will provide most of the below information.\r\nThis step is optional but strongly recommended.\r\n-->\r\n- SunPy Version: master, 2.0.1, 2.0.rc2 all are giving error\r\n- Astropy Version: 4.0.1.post1\r\n- Python Version: 3.8.5, 3.7.5 both were giving error\r\n- OS information: Ubuntu 18.04 LTS\r\n\n", "before_files": [{"content": "\"\"\"\n==============================\nFlare times on a GOES XRS plot\n==============================\n\nHow to plot flare times as provided by the HEK on a GOES XRS plot.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\nfrom sunpy.net import hek\nfrom sunpy.time import TimeRange, parse_time\nfrom sunpy.timeseries import TimeSeries\n\n###############################################################################\n# Let's first grab GOES XRS data for a particular time of interest\ntr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])\nresults = Fido.search(a.Time(tr), a.Instrument.xrs)\n\n###############################################################################\n# Then download the data and load it into a TimeSeries\nfiles = Fido.fetch(results)\ngoes = TimeSeries(files)\n\n###############################################################################\n# Next lets grab the HEK flare data for this time from the NOAA Space Weather\n# Prediction Center (SWPC)\nclient = hek.HEKClient()\nflares_hek = client.search(hek.attrs.Time(tr.start, tr.end),\n hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')\n\n###############################################################################\n# Lets plot everything together\nfig, ax = plt.subplots()\ngoes.plot()\nax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)\nax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,\n parse_time(flares_hek[0].get('event_endtime')).plot_date,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\nax.legend(loc=2)\nax.set_yscale('log')\nplt.show()\n", "path": "examples/time_series/goes_hek_m25.py"}]} | 1,522 | 255 |
gh_patches_debug_13115 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10394 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
</issue>
<code>
[start of bit_manipulation/missing_number.py]
1 def find_missing_number(nums: list[int]) -> int:
2 """
3 Finds the missing number in a list of consecutive integers.
4
5 Args:
6 nums: A list of integers.
7
8 Returns:
9 The missing number.
10
11 Example:
12 >>> find_missing_number([0, 1, 3, 4])
13 2
14 >>> find_missing_number([1, 3, 4, 5, 6])
15 2
16 >>> find_missing_number([6, 5, 4, 2, 1])
17 3
18 >>> find_missing_number([6, 1, 5, 3, 4])
19 2
20 """
21 low = min(nums)
22 high = max(nums)
23 missing_number = high
24
25 for i in range(low, high):
26 missing_number ^= i ^ nums[i - low]
27
28 return missing_number
29
[end of bit_manipulation/missing_number.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py
--- a/bit_manipulation/missing_number.py
+++ b/bit_manipulation/missing_number.py
@@ -11,6 +11,12 @@
Example:
>>> find_missing_number([0, 1, 3, 4])
2
+ >>> find_missing_number([4, 3, 1, 0])
+ 2
+ >>> find_missing_number([-4, -3, -1, 0])
+ -2
+ >>> find_missing_number([-2, 2, 1, 3, 0])
+ -1
>>> find_missing_number([1, 3, 4, 5, 6])
2
>>> find_missing_number([6, 5, 4, 2, 1])
@@ -26,3 +32,9 @@
missing_number ^= i ^ nums[i - low]
return missing_number
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
| {"golden_diff": "diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py\n--- a/bit_manipulation/missing_number.py\n+++ b/bit_manipulation/missing_number.py\n@@ -11,6 +11,12 @@\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n+ >>> find_missing_number([4, 3, 1, 0])\n+ 2\n+ >>> find_missing_number([-4, -3, -1, 0])\n+ -2\n+ >>> find_missing_number([-2, 2, 1, 3, 0])\n+ -1\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n@@ -26,3 +32,9 @@\n missing_number ^= i ^ nums[i - low]\n \n return missing_number\n+\n+\n+if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n 3\n >>> find_missing_number([6, 1, 5, 3, 4])\n 2\n \"\"\"\n low = min(nums)\n high = max(nums)\n missing_number = high\n\n for i in range(low, high):\n missing_number ^= i ^ nums[i - low]\n\n return missing_number\n", "path": "bit_manipulation/missing_number.py"}]} | 1,622 | 251 |
gh_patches_debug_26597 | rasdani/github-patches | git_diff | python-discord__site-577 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SystemExit: 1
Sentry Issue: [SITE-1G](https://sentry.io/organizations/python-discord/issues/2623186847/?referrer=github_integration)
```
SystemExit: 1
(36 additional frame(s) were not displayed)
...
File "urllib3/connectionpool.py", line 1010, in _validate_conn
conn.connect()
File "urllib3/connection.py", line 353, in connect
conn = self._new_conn()
File "urllib3/connection.py", line 169, in _new_conn
conn = connection.create_connection(
File "urllib3/util/connection.py", line 86, in create_connection
sock.connect(sa)
File "gunicorn/workers/base.py", line 201, in handle_abort
sys.exit(1)
```
</issue>
<code>
[start of pydis_site/constants.py]
1 import os
2
3 GIT_SHA = os.environ.get("GIT_SHA", "development")
4 GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
5
[end of pydis_site/constants.py]
[start of pydis_site/apps/home/views/home.py]
1 import logging
2 from typing import Dict, List
3
4 import requests
5 from django.core.handlers.wsgi import WSGIRequest
6 from django.http import HttpResponse
7 from django.shortcuts import render
8 from django.utils import timezone
9 from django.views import View
10
11 from pydis_site.apps.home.models import RepositoryMetadata
12 from pydis_site.constants import GITHUB_TOKEN
13
14 log = logging.getLogger(__name__)
15
16
17 class HomeView(View):
18 """The main landing page for the website."""
19
20 github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
21 repository_cache_ttl = 3600
22
23 # Which of our GitHub repos should be displayed on the front page, and in which order?
24 repos = [
25 "python-discord/site",
26 "python-discord/bot",
27 "python-discord/snekbox",
28 "python-discord/sir-lancebot",
29 "python-discord/metricity",
30 "python-discord/django-simple-bulma",
31 ]
32
33 def __init__(self):
34 """Clean up stale RepositoryMetadata."""
35 RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()
36
37 # If no token is defined (for example in local development), then
38 # it does not make sense to pass the Authorization header. More
39 # specifically, GitHub will reject any requests from us due to the
40 # invalid header. We can make a limited number of anonymous requests
41 # though, which is useful for testing.
42 if GITHUB_TOKEN:
43 self.headers = {"Authorization": f"token {GITHUB_TOKEN}"}
44 else:
45 self.headers = {}
46
47 def _get_api_data(self) -> Dict[str, Dict[str, str]]:
48 """
49 Call the GitHub API and get information about our repos.
50
51 If we're unable to get that info for any reason, return an empty dict.
52 """
53 repo_dict = {}
54
55 # Fetch the data from the GitHub API
56 api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()
57
58 # Process the API data into our dict
59 for repo in api_data:
60 try:
61 full_name = repo["full_name"]
62
63 if full_name in self.repos:
64 repo_dict[full_name] = {
65 "full_name": repo["full_name"],
66 "description": repo["description"],
67 "language": repo["language"],
68 "forks_count": repo["forks_count"],
69 "stargazers_count": repo["stargazers_count"],
70 }
71 # Something is not right about the API data we got back from GitHub.
72 except (TypeError, ConnectionError, KeyError) as e:
73 log.error(
74 "Unable to parse the GitHub repository metadata from response!",
75 extra={
76 'api_data': api_data,
77 'error': e
78 }
79 )
80 continue
81
82 return repo_dict
83
84 def _get_repo_data(self) -> List[RepositoryMetadata]:
85 """Build a list of RepositoryMetadata objects that we can use to populate the front page."""
86 # First off, load the timestamp of the least recently updated entry.
87 last_update = (
88 RepositoryMetadata.objects.values_list("last_updated", flat=True)
89 .order_by("last_updated").first()
90 )
91
92 # If we did not retrieve any results here, we should import them!
93 if last_update is None:
94
95 # Try to get new data from the API. If it fails, we'll return an empty list.
96 # In this case, we simply don't display our projects on the site.
97 api_repositories = self._get_api_data()
98
99 # Create all the repodata records in the database.
100 return RepositoryMetadata.objects.bulk_create(
101 RepositoryMetadata(
102 repo_name=api_data["full_name"],
103 description=api_data["description"],
104 forks=api_data["forks_count"],
105 stargazers=api_data["stargazers_count"],
106 language=api_data["language"],
107 )
108 for api_data in api_repositories.values()
109 )
110
111 # If the data is stale, we should refresh it.
112 if (timezone.now() - last_update).seconds > self.repository_cache_ttl:
113 # Try to get new data from the API. If it fails, return the cached data.
114 api_repositories = self._get_api_data()
115
116 if not api_repositories:
117 return RepositoryMetadata.objects.all()
118
119 # Update or create all RepoData objects in self.repos
120 database_repositories = []
121 for api_data in api_repositories.values():
122 repo_data, _created = RepositoryMetadata.objects.update_or_create(
123 repo_name=api_data["full_name"],
124 defaults={
125 'repo_name': api_data["full_name"],
126 'description': api_data["description"],
127 'forks': api_data["forks_count"],
128 'stargazers': api_data["stargazers_count"],
129 'language': api_data["language"],
130 }
131 )
132 database_repositories.append(repo_data)
133 return database_repositories
134
135 # Otherwise, if the data is fresher than 2 minutes old, we should just return it.
136 else:
137 return RepositoryMetadata.objects.all()
138
139 def get(self, request: WSGIRequest) -> HttpResponse:
140 """Collect repo data and render the homepage view."""
141 repo_data = self._get_repo_data()
142 return render(request, "home/index.html", {"repo_data": repo_data})
143
144
145 def timeline(request: WSGIRequest) -> HttpResponse:
146 """Render timeline view."""
147 return render(request, 'home/timeline.html')
148
[end of pydis_site/apps/home/views/home.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py
--- a/pydis_site/apps/home/views/home.py
+++ b/pydis_site/apps/home/views/home.py
@@ -9,7 +9,7 @@
from django.views import View
from pydis_site.apps.home.models import RepositoryMetadata
-from pydis_site.constants import GITHUB_TOKEN
+from pydis_site.constants import GITHUB_TOKEN, TIMEOUT_PERIOD
log = logging.getLogger(__name__)
@@ -51,9 +51,16 @@
If we're unable to get that info for any reason, return an empty dict.
"""
repo_dict = {}
-
- # Fetch the data from the GitHub API
- api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()
+ try:
+ # Fetch the data from the GitHub API
+ api_data: List[dict] = requests.get(
+ self.github_api,
+ headers=self.headers,
+ timeout=TIMEOUT_PERIOD
+ ).json()
+ except requests.exceptions.Timeout:
+ log.error("Request to fetch GitHub repository metadata for timed out!")
+ return repo_dict
# Process the API data into our dict
for repo in api_data:
diff --git a/pydis_site/constants.py b/pydis_site/constants.py
--- a/pydis_site/constants.py
+++ b/pydis_site/constants.py
@@ -2,3 +2,5 @@
GIT_SHA = os.environ.get("GIT_SHA", "development")
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
+# How long to wait for synchronous requests before timing out
+TIMEOUT_PERIOD = int(os.environ.get("TIMEOUT_PERIOD", 5))
| {"golden_diff": "diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py\n--- a/pydis_site/apps/home/views/home.py\n+++ b/pydis_site/apps/home/views/home.py\n@@ -9,7 +9,7 @@\n from django.views import View\n \n from pydis_site.apps.home.models import RepositoryMetadata\n-from pydis_site.constants import GITHUB_TOKEN\n+from pydis_site.constants import GITHUB_TOKEN, TIMEOUT_PERIOD\n \n log = logging.getLogger(__name__)\n \n@@ -51,9 +51,16 @@\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n-\n- # Fetch the data from the GitHub API\n- api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n+ try:\n+ # Fetch the data from the GitHub API\n+ api_data: List[dict] = requests.get(\n+ self.github_api,\n+ headers=self.headers,\n+ timeout=TIMEOUT_PERIOD\n+ ).json()\n+ except requests.exceptions.Timeout:\n+ log.error(\"Request to fetch GitHub repository metadata for timed out!\")\n+ return repo_dict\n \n # Process the API data into our dict\n for repo in api_data:\ndiff --git a/pydis_site/constants.py b/pydis_site/constants.py\n--- a/pydis_site/constants.py\n+++ b/pydis_site/constants.py\n@@ -2,3 +2,5 @@\n \n GIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n GITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n+# How long to wait for synchronous requests before timing out\n+TIMEOUT_PERIOD = int(os.environ.get(\"TIMEOUT_PERIOD\", 5))\n", "issue": "SystemExit: 1\nSentry Issue: [SITE-1G](https://sentry.io/organizations/python-discord/issues/2623186847/?referrer=github_integration)\n\n```\nSystemExit: 1\n(36 additional frame(s) were not displayed)\n...\n File \"urllib3/connectionpool.py\", line 1010, in _validate_conn\n conn.connect()\n File \"urllib3/connection.py\", line 353, in connect\n conn = self._new_conn()\n File \"urllib3/connection.py\", line 169, in _new_conn\n conn = connection.create_connection(\n File \"urllib3/util/connection.py\", line 86, in create_connection\n sock.connect(sa)\n File \"gunicorn/workers/base.py\", line 201, in handle_abort\n sys.exit(1)\n```\n", "before_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\nGITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\nfrom pydis_site.constants import GITHUB_TOKEN\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n # If no token is defined (for example in local development), then\n # it does not make sense to pass the Authorization header. More\n # specifically, GitHub will reject any requests from us due to the\n # invalid header. We can make a limited number of anonymous requests\n # though, which is useful for testing.\n if GITHUB_TOKEN:\n self.headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n else:\n self.headers = {}\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n # First off, load the timestamp of the least recently updated entry.\n last_update = (\n RepositoryMetadata.objects.values_list(\"last_updated\", flat=True)\n .order_by(\"last_updated\").first()\n )\n\n # If we did not retrieve any results here, we should import them!\n if last_update is None:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n return RepositoryMetadata.objects.bulk_create(\n RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n for api_data in api_repositories.values()\n )\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - last_update).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n database_repositories = []\n for api_data in api_repositories.values():\n repo_data, _created = RepositoryMetadata.objects.update_or_create(\n repo_name=api_data[\"full_name\"],\n defaults={\n 'repo_name': api_data[\"full_name\"],\n 'description': api_data[\"description\"],\n 'forks': api_data[\"forks_count\"],\n 'stargazers': api_data[\"stargazers_count\"],\n 'language': api_data[\"language\"],\n }\n )\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}]} | 2,307 | 370 |
gh_patches_debug_37082 | rasdani/github-patches | git_diff | mars-project__mars-1215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Column pruning leads to a wrong result
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
``` Python
pd_df = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce'),
'd': list('abaaaddce')})
pd_df.to_csv('test.csv', index=False)
in_df = md.read_csv('test.csv')
df1 = in_df.groupby('d').agg({'b': 'min'})
df2 = in_df[in_df.d.isin(df1.index)]
df2.execute()
Out[5]:
b d
0 1 a
1 3 b
2 4 a
3 5 a
4 6 a
5 5 d
6 4 d
7 4 c
8 4 e
```
the output only has two columns which should be 4.
</issue>
<code>
[start of mars/optimizes/tileable_graph/column_pruning.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from ...utils import copy_tileables
18 from ...dataframe.utils import parse_index
19 from ...dataframe.groupby.aggregation import DataFrameGroupByAgg
20 from ...dataframe.datasource.read_csv import DataFrameReadCSV
21 from .core import TileableOptimizeRule, register
22
23
24 class GroupbyPruneReadCSV(TileableOptimizeRule):
25 """
26 An experimental implementation for tileable optimization rule.
27 This rule works only when groupby aggregation operation follows the read CSV files,
28 we can prune the columns that not used by the following operations when read the files.
29 """
30 def match(self, node):
31 if isinstance(node.inputs[0].op, DataFrameReadCSV) and \
32 node.inputs[0] not in self._optimizer_context.result_tileables:
33 by_data = node.op.groupby_params.get('by')
34 by_columns = by_data if isinstance(by_data, (list, tuple)) else [by_data]
35 if isinstance(node.op.func, (str, list)):
36 # Passing func name(s) means perform on all columns.
37 return False
38 elif len(set(by_columns + list(node.op.func))) == len(node.inputs[0].op.usecols or node.inputs[0].dtypes):
39 # If performs on all columns, no need to prune.
40 return False
41 return True
42 return False
43
44 def apply(self, node):
45 by_data = node.op.groupby_params.get('by')
46 by_columns = by_data if isinstance(by_data, (list, tuple)) else [by_data]
47 agg_columns = list(node.op.func)
48 input_node = node.inputs[0]
49 selected_columns = [c for c in list(input_node.dtypes.index) if c in by_columns + agg_columns]
50 if input_node in self._optimizer_context:
51 new_input = self._optimizer_context[input_node]
52 selected_columns = [
53 c for c in list(input_node.dtypes.index) if c in selected_columns + new_input.op._usecols]
54 else:
55 new_input = copy_tileables([input_node])[0].data
56
57 new_input._shape = (input_node.shape[0], len(selected_columns))
58 new_input._dtypes = input_node.dtypes[selected_columns]
59 new_input._columns_value = parse_index(new_input._dtypes.index, store_data=True)
60 new_input.op._usecols = selected_columns
61 new_node = copy_tileables([node], inputs=[new_input])[0].data
62
63 self._optimizer_context[node] = new_node
64 self._optimizer_context[input_node] = new_input
65 return new_node
66
67
68 register(DataFrameGroupByAgg, GroupbyPruneReadCSV)
69
[end of mars/optimizes/tileable_graph/column_pruning.py]
[start of mars/dataframe/base/standardize_range_index.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import pandas as pd
18
19 from ... import opcodes as OperandDef
20 from ...utils import lazy_import
21 from ...serialize import Int32Field
22 from ..operands import DataFrameOperandMixin, DataFrameOperand
23
24
25 cudf = lazy_import('cudf', globals=globals())
26
27
28 class ChunkStandardizeRangeIndex(DataFrameOperand, DataFrameOperandMixin):
29 _op_type_ = OperandDef.STANDARDIZE_RANGE_INDEX
30
31 _axis = Int32Field('axis')
32
33 def __init__(self, prepare_inputs=None, axis=None, object_type=None, **kwargs):
34 super().__init__(__prepare_inputs=prepare_inputs, _axis=axis, _object_type=object_type, **kwargs)
35
36 @property
37 def axis(self):
38 return self._axis
39
40 @classmethod
41 def execute(cls, ctx, op):
42 xdf = cudf if op.gpu else pd
43 in_data = ctx[op.inputs[-1].key].copy()
44 input_keys = [c.key for c in op.inputs[:-1]]
45 metas = ctx.get_chunk_metas(input_keys)
46 index_start = sum([m.chunk_shape[op.axis] for m in metas])
47 if op.axis == 0:
48 in_data.index = xdf.RangeIndex(index_start, index_start + len(in_data))
49 else:
50 in_data.columns = xdf.RangeIndex(index_start, index_start + in_data.shape[1])
51 ctx[op.outputs[0].key] = in_data
52
[end of mars/dataframe/base/standardize_range_index.py]
[start of mars/optimizes/tileable_graph/core.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import weakref
18 from collections import defaultdict
19
20 from ...graph import DAG
21 from ...graph_builder import TileableGraphBuilder
22 from ...utils import copy_tileables, kernel_mode, enter_build_mode
23
24 _rules = defaultdict(list)
25
26 tileable_optimized = weakref.WeakKeyDictionary()
27
28
29 class TileableOptimizeRule(object):
30 def __init__(self, optimized_context):
31 self._optimizer_context = optimized_context
32
33 def match(self, node):
34 raise NotImplementedError
35
36 def apply(self, node):
37 raise NotImplementedError
38
39
40 class OptimizeContext(weakref.WeakKeyDictionary):
41 def __init__(self, dict=None):
42 weakref.WeakKeyDictionary.__init__(self, dict=dict)
43 self._result_tileables = []
44
45 @property
46 def result_tileables(self):
47 return self._result_tileables
48
49 def append_result_tileables(self, tileables):
50 self._result_tileables.extend(tileables)
51
52
53 class OptimizeIntegratedTileableGraphBuilder(TileableGraphBuilder):
54 def __init__(self, **kw):
55 self._optimizer_context = OptimizeContext()
56 super().__init__(**kw)
57 self._node_processor = self._apply_rules(self._node_processor, self._optimizer_context)
58
59 @staticmethod
60 def _apply_rules(node_processor, optimizer_context):
61 def inner(node):
62 node = node_processor(node) if node_processor is not None else node
63 if type(node.op) in _rules:
64 for rule in _rules[type(node.op)]:
65 ruler = rule(optimizer_context)
66 if ruler.match(node):
67 node = rule(optimizer_context).apply(node)
68 return node
69
70 return inner
71
72 def _mapping_tileables(self, tileables):
73 for t in tileables:
74 if t in self._optimizer_context:
75 tileable_optimized[t] = self._optimizer_context[t]
76
77 def _replace_copied_tilebale(self, graph):
78 if len(self._optimizer_context) == 0:
79 return graph
80
81 new_graph = DAG()
82 reversed_mapping = weakref.WeakKeyDictionary((v, k) for k, v in self._optimizer_context.items())
83 for n in graph.topological_iter():
84 if n in reversed_mapping:
85 new_node = n
86 elif any(inp in self._optimizer_context for inp in n.inputs):
87 new_inputs = [self._optimizer_context.get(i, i) for i in n.inputs]
88 new_tileables = copy_tileables(n.op.outputs, inputs=new_inputs)
89 for t, new_t in zip(n.op.outputs, new_tileables):
90 self._optimizer_context[t] = new_t.data
91 if t is n:
92 new_node = new_t.data
93 else:
94 new_node = n
95 new_graph.add_node(new_node)
96 for inp in new_node.inputs:
97 new_graph.add_node(inp)
98 new_graph.add_edge(inp, new_node)
99 return new_graph
100
101 @kernel_mode
102 @enter_build_mode
103 def build(self, tileables, tileable_graph=None):
104 self._optimizer_context.append_result_tileables(tileables)
105 graph = super().build(tileables, tileable_graph=tileable_graph)
106 graph = self._replace_copied_tilebale(graph)
107 self._mapping_tileables(tileables)
108 return graph
109
110
111 def register(op_type, rule):
112 _rules[op_type].append(rule)
113
[end of mars/optimizes/tileable_graph/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mars/dataframe/base/standardize_range_index.py b/mars/dataframe/base/standardize_range_index.py
--- a/mars/dataframe/base/standardize_range_index.py
+++ b/mars/dataframe/base/standardize_range_index.py
@@ -31,7 +31,7 @@
_axis = Int32Field('axis')
def __init__(self, prepare_inputs=None, axis=None, object_type=None, **kwargs):
- super().__init__(__prepare_inputs=prepare_inputs, _axis=axis, _object_type=object_type, **kwargs)
+ super().__init__(_prepare_inputs=prepare_inputs, _axis=axis, _object_type=object_type, **kwargs)
@property
def axis(self):
diff --git a/mars/optimizes/tileable_graph/column_pruning.py b/mars/optimizes/tileable_graph/column_pruning.py
--- a/mars/optimizes/tileable_graph/column_pruning.py
+++ b/mars/optimizes/tileable_graph/column_pruning.py
@@ -62,7 +62,7 @@
self._optimizer_context[node] = new_node
self._optimizer_context[input_node] = new_input
- return new_node
+ return node
register(DataFrameGroupByAgg, GroupbyPruneReadCSV)
diff --git a/mars/optimizes/tileable_graph/core.py b/mars/optimizes/tileable_graph/core.py
--- a/mars/optimizes/tileable_graph/core.py
+++ b/mars/optimizes/tileable_graph/core.py
@@ -79,15 +79,19 @@
return graph
new_graph = DAG()
- reversed_mapping = weakref.WeakKeyDictionary((v, k) for k, v in self._optimizer_context.items())
+ replaced_tileables = weakref.WeakKeyDictionary()
for n in graph.topological_iter():
- if n in reversed_mapping:
- new_node = n
- elif any(inp in self._optimizer_context for inp in n.inputs):
- new_inputs = [self._optimizer_context.get(i, i) for i in n.inputs]
+ if graph.count_predecessors(n) == 0:
+ if n in self._optimizer_context and \
+ all(suc in self._optimizer_context for suc in graph.successors(n)):
+ replaced_tileables[n] = new_node = self._optimizer_context[n]
+ else:
+ new_node = n
+ elif any(inp in replaced_tileables for inp in n.inputs):
+ new_inputs = [replaced_tileables.get(i, i) for i in n.inputs]
new_tileables = copy_tileables(n.op.outputs, inputs=new_inputs)
for t, new_t in zip(n.op.outputs, new_tileables):
- self._optimizer_context[t] = new_t.data
+ replaced_tileables[t] = new_t.data
if t is n:
new_node = new_t.data
else:
@@ -96,6 +100,7 @@
for inp in new_node.inputs:
new_graph.add_node(inp)
new_graph.add_edge(inp, new_node)
+ self._optimizer_context.update(replaced_tileables)
return new_graph
@kernel_mode
| {"golden_diff": "diff --git a/mars/dataframe/base/standardize_range_index.py b/mars/dataframe/base/standardize_range_index.py\n--- a/mars/dataframe/base/standardize_range_index.py\n+++ b/mars/dataframe/base/standardize_range_index.py\n@@ -31,7 +31,7 @@\n _axis = Int32Field('axis')\n \n def __init__(self, prepare_inputs=None, axis=None, object_type=None, **kwargs):\n- super().__init__(__prepare_inputs=prepare_inputs, _axis=axis, _object_type=object_type, **kwargs)\n+ super().__init__(_prepare_inputs=prepare_inputs, _axis=axis, _object_type=object_type, **kwargs)\n \n @property\n def axis(self):\ndiff --git a/mars/optimizes/tileable_graph/column_pruning.py b/mars/optimizes/tileable_graph/column_pruning.py\n--- a/mars/optimizes/tileable_graph/column_pruning.py\n+++ b/mars/optimizes/tileable_graph/column_pruning.py\n@@ -62,7 +62,7 @@\n \n self._optimizer_context[node] = new_node\n self._optimizer_context[input_node] = new_input\n- return new_node\n+ return node\n \n \n register(DataFrameGroupByAgg, GroupbyPruneReadCSV)\ndiff --git a/mars/optimizes/tileable_graph/core.py b/mars/optimizes/tileable_graph/core.py\n--- a/mars/optimizes/tileable_graph/core.py\n+++ b/mars/optimizes/tileable_graph/core.py\n@@ -79,15 +79,19 @@\n return graph\n \n new_graph = DAG()\n- reversed_mapping = weakref.WeakKeyDictionary((v, k) for k, v in self._optimizer_context.items())\n+ replaced_tileables = weakref.WeakKeyDictionary()\n for n in graph.topological_iter():\n- if n in reversed_mapping:\n- new_node = n\n- elif any(inp in self._optimizer_context for inp in n.inputs):\n- new_inputs = [self._optimizer_context.get(i, i) for i in n.inputs]\n+ if graph.count_predecessors(n) == 0:\n+ if n in self._optimizer_context and \\\n+ all(suc in self._optimizer_context for suc in graph.successors(n)):\n+ replaced_tileables[n] = new_node = self._optimizer_context[n]\n+ else:\n+ new_node = n\n+ elif any(inp in replaced_tileables for inp in n.inputs):\n+ new_inputs = [replaced_tileables.get(i, i) for i in n.inputs]\n new_tileables = copy_tileables(n.op.outputs, inputs=new_inputs)\n for t, new_t in zip(n.op.outputs, new_tileables):\n- self._optimizer_context[t] = new_t.data\n+ replaced_tileables[t] = new_t.data\n if t is n:\n new_node = new_t.data\n else:\n@@ -96,6 +100,7 @@\n for inp in new_node.inputs:\n new_graph.add_node(inp)\n new_graph.add_edge(inp, new_node)\n+ self._optimizer_context.update(replaced_tileables)\n return new_graph\n \n @kernel_mode\n", "issue": "[BUG] Column pruning leads to a wrong result\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\n``` Python\r\npd_df = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],\r\n 'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],\r\n 'c': list('aabaaddce'),\r\n 'd': list('abaaaddce')})\r\npd_df.to_csv('test.csv', index=False)\r\nin_df = md.read_csv('test.csv')\r\ndf1 = in_df.groupby('d').agg({'b': 'min'})\r\ndf2 = in_df[in_df.d.isin(df1.index)]\r\ndf2.execute()\r\nOut[5]: \r\n b d\r\n0 1 a\r\n1 3 b\r\n2 4 a\r\n3 5 a\r\n4 6 a\r\n5 5 d\r\n6 4 d\r\n7 4 c\r\n8 4 e\r\n```\r\n\r\nthe output only has two columns which should be 4.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ...utils import copy_tileables\nfrom ...dataframe.utils import parse_index\nfrom ...dataframe.groupby.aggregation import DataFrameGroupByAgg\nfrom ...dataframe.datasource.read_csv import DataFrameReadCSV\nfrom .core import TileableOptimizeRule, register\n\n\nclass GroupbyPruneReadCSV(TileableOptimizeRule):\n \"\"\"\n An experimental implementation for tileable optimization rule.\n This rule works only when groupby aggregation operation follows the read CSV files,\n we can prune the columns that not used by the following operations when read the files.\n \"\"\"\n def match(self, node):\n if isinstance(node.inputs[0].op, DataFrameReadCSV) and \\\n node.inputs[0] not in self._optimizer_context.result_tileables:\n by_data = node.op.groupby_params.get('by')\n by_columns = by_data if isinstance(by_data, (list, tuple)) else [by_data]\n if isinstance(node.op.func, (str, list)):\n # Passing func name(s) means perform on all columns.\n return False\n elif len(set(by_columns + list(node.op.func))) == len(node.inputs[0].op.usecols or node.inputs[0].dtypes):\n # If performs on all columns, no need to prune.\n return False\n return True\n return False\n\n def apply(self, node):\n by_data = node.op.groupby_params.get('by')\n by_columns = by_data if isinstance(by_data, (list, tuple)) else [by_data]\n agg_columns = list(node.op.func)\n input_node = node.inputs[0]\n selected_columns = [c for c in list(input_node.dtypes.index) if c in by_columns + agg_columns]\n if input_node in self._optimizer_context:\n new_input = self._optimizer_context[input_node]\n selected_columns = [\n c for c in list(input_node.dtypes.index) if c in selected_columns + new_input.op._usecols]\n else:\n new_input = copy_tileables([input_node])[0].data\n\n new_input._shape = (input_node.shape[0], len(selected_columns))\n new_input._dtypes = input_node.dtypes[selected_columns]\n new_input._columns_value = parse_index(new_input._dtypes.index, store_data=True)\n new_input.op._usecols = selected_columns\n new_node = copy_tileables([node], inputs=[new_input])[0].data\n\n self._optimizer_context[node] = new_node\n self._optimizer_context[input_node] = new_input\n return new_node\n\n\nregister(DataFrameGroupByAgg, GroupbyPruneReadCSV)\n", "path": "mars/optimizes/tileable_graph/column_pruning.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\n\nfrom ... import opcodes as OperandDef\nfrom ...utils import lazy_import\nfrom ...serialize import Int32Field\nfrom ..operands import DataFrameOperandMixin, DataFrameOperand\n\n\ncudf = lazy_import('cudf', globals=globals())\n\n\nclass ChunkStandardizeRangeIndex(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = OperandDef.STANDARDIZE_RANGE_INDEX\n\n _axis = Int32Field('axis')\n\n def __init__(self, prepare_inputs=None, axis=None, object_type=None, **kwargs):\n super().__init__(__prepare_inputs=prepare_inputs, _axis=axis, _object_type=object_type, **kwargs)\n\n @property\n def axis(self):\n return self._axis\n\n @classmethod\n def execute(cls, ctx, op):\n xdf = cudf if op.gpu else pd\n in_data = ctx[op.inputs[-1].key].copy()\n input_keys = [c.key for c in op.inputs[:-1]]\n metas = ctx.get_chunk_metas(input_keys)\n index_start = sum([m.chunk_shape[op.axis] for m in metas])\n if op.axis == 0:\n in_data.index = xdf.RangeIndex(index_start, index_start + len(in_data))\n else:\n in_data.columns = xdf.RangeIndex(index_start, index_start + in_data.shape[1])\n ctx[op.outputs[0].key] = in_data\n", "path": "mars/dataframe/base/standardize_range_index.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport weakref\nfrom collections import defaultdict\n\nfrom ...graph import DAG\nfrom ...graph_builder import TileableGraphBuilder\nfrom ...utils import copy_tileables, kernel_mode, enter_build_mode\n\n_rules = defaultdict(list)\n\ntileable_optimized = weakref.WeakKeyDictionary()\n\n\nclass TileableOptimizeRule(object):\n def __init__(self, optimized_context):\n self._optimizer_context = optimized_context\n\n def match(self, node):\n raise NotImplementedError\n\n def apply(self, node):\n raise NotImplementedError\n\n\nclass OptimizeContext(weakref.WeakKeyDictionary):\n def __init__(self, dict=None):\n weakref.WeakKeyDictionary.__init__(self, dict=dict)\n self._result_tileables = []\n\n @property\n def result_tileables(self):\n return self._result_tileables\n\n def append_result_tileables(self, tileables):\n self._result_tileables.extend(tileables)\n\n\nclass OptimizeIntegratedTileableGraphBuilder(TileableGraphBuilder):\n def __init__(self, **kw):\n self._optimizer_context = OptimizeContext()\n super().__init__(**kw)\n self._node_processor = self._apply_rules(self._node_processor, self._optimizer_context)\n\n @staticmethod\n def _apply_rules(node_processor, optimizer_context):\n def inner(node):\n node = node_processor(node) if node_processor is not None else node\n if type(node.op) in _rules:\n for rule in _rules[type(node.op)]:\n ruler = rule(optimizer_context)\n if ruler.match(node):\n node = rule(optimizer_context).apply(node)\n return node\n\n return inner\n\n def _mapping_tileables(self, tileables):\n for t in tileables:\n if t in self._optimizer_context:\n tileable_optimized[t] = self._optimizer_context[t]\n\n def _replace_copied_tilebale(self, graph):\n if len(self._optimizer_context) == 0:\n return graph\n\n new_graph = DAG()\n reversed_mapping = weakref.WeakKeyDictionary((v, k) for k, v in self._optimizer_context.items())\n for n in graph.topological_iter():\n if n in reversed_mapping:\n new_node = n\n elif any(inp in self._optimizer_context for inp in n.inputs):\n new_inputs = [self._optimizer_context.get(i, i) for i in n.inputs]\n new_tileables = copy_tileables(n.op.outputs, inputs=new_inputs)\n for t, new_t in zip(n.op.outputs, new_tileables):\n self._optimizer_context[t] = new_t.data\n if t is n:\n new_node = new_t.data\n else:\n new_node = n\n new_graph.add_node(new_node)\n for inp in new_node.inputs:\n new_graph.add_node(inp)\n new_graph.add_edge(inp, new_node)\n return new_graph\n\n @kernel_mode\n @enter_build_mode\n def build(self, tileables, tileable_graph=None):\n self._optimizer_context.append_result_tileables(tileables)\n graph = super().build(tileables, tileable_graph=tileable_graph)\n graph = self._replace_copied_tilebale(graph)\n self._mapping_tileables(tileables)\n return graph\n\n\ndef register(op_type, rule):\n _rules[op_type].append(rule)\n", "path": "mars/optimizes/tileable_graph/core.py"}]} | 3,415 | 716 |
gh_patches_debug_8363 | rasdani/github-patches | git_diff | spyder-ide__spyder-8950 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IOError raised when completion plugins fail to start
## Description
### What steps will reproduce the problem?
<!--- You can use Markdown here --->
raise IOError('Could not start %s' % self)
OSError: Could not start <spyder.utils.introspection.plugin_client.PluginClient object at 0xb5fc7c5e8>"
### Traceback
```python-traceback
File "/Users/jimmyxiao/anaconda3/lib/python3.7/site-packages/spyder/utils/introspection/plugin_client.py", line 158, in _on_finished
self.run()
File "/Users/jimmyxiao/anaconda3/lib/python3.7/site-packages/spyder/utils/introspection/plugin_client.py", line 104, in run
raise IOError('Could not start %s' % self)
OSError: Could not start <spyder.utils.introspection.plugin_client.PluginClient object at 0xb5fc7c5e8>
WARNING:spyder.widgets.github.backend:Failed to send bug report on Github. response={'code': 502, 'json': {'message': 'Server Error'}}
```
## Versions
* Spyder version: 3.3.2
* Python version: 3.7.1
* Qt version: 5.9.6
* PyQt5 version: 5.9.2
* Operating System: Darwin 18.2.0
### Dependencies
```
pyflakes >=0.6.0 : 2.0.0 (OK)
pycodestyle >=2.3 : 2.4.0 (OK)
pygments >=2.0 : 2.3.1 (OK)
sphinx >=0.6.6 : 1.8.2 (OK)
rope >=0.9.4 : 0.11.0 (OK)
jedi >=0.9.0 : 0.13.2 (OK)
psutil >=0.3 : 5.4.8 (OK)
nbconvert >=4.0 : 5.4.0 (OK)
pandas >=0.13.1 : 0.23.4 (OK)
numpy >=1.7 : 1.15.4 (OK)
sympy >=0.7.3 : 1.3 (OK)
cython >=0.21 : 0.29.2 (OK)
qtconsole >=4.2.0 : 4.4.3 (OK)
IPython >=4.0 : 7.2.0 (OK)
matplotlib >=2.0.0: 3.0.2 (OK)
pylint >=0.25 : 2.2.2 (OK)
```
</issue>
<code>
[start of spyder/utils/introspection/plugin_client.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 # Local imports
8 import imp
9 import os
10 import os.path as osp
11 import sys
12 import uuid
13
14 # Third party imports
15 from qtpy.QtCore import (QObject, QProcess, QProcessEnvironment,
16 QSocketNotifier, QTimer, Signal)
17 from qtpy.QtWidgets import QApplication
18 import zmq
19
20 # Local imports
21 from spyder.config.base import debug_print, get_module_path
22
23
24 # Heartbeat timer in milliseconds
25 HEARTBEAT = 1000
26
27
28 class AsyncClient(QObject):
29
30 """
31 A class which handles a connection to a client through a QProcess.
32 """
33
34 # Emitted when the client has initialized.
35 initialized = Signal()
36
37 # Emitted when the client errors.
38 errored = Signal()
39
40 # Emitted when a request response is received.
41 received = Signal(object)
42
43 def __init__(self, target, executable=None, name=None,
44 extra_args=None, libs=None, cwd=None, env=None):
45 super(AsyncClient, self).__init__()
46 self.executable = executable or sys.executable
47 self.extra_args = extra_args
48 self.target = target
49 self.name = name or self
50 self.libs = libs
51 self.cwd = cwd
52 self.env = env
53 self.is_initialized = False
54 self.closing = False
55 self.notifier = None
56 self.process = None
57 self.context = zmq.Context()
58 QApplication.instance().aboutToQuit.connect(self.close)
59
60 # Set up the heartbeat timer.
61 self.timer = QTimer(self)
62 self.timer.timeout.connect(self._heartbeat)
63
64 def run(self):
65 """Handle the connection with the server.
66 """
67 # Set up the zmq port.
68 self.socket = self.context.socket(zmq.PAIR)
69 self.port = self.socket.bind_to_random_port('tcp://*')
70
71 # Set up the process.
72 self.process = QProcess(self)
73 if self.cwd:
74 self.process.setWorkingDirectory(self.cwd)
75 p_args = ['-u', self.target, str(self.port)]
76 if self.extra_args is not None:
77 p_args += self.extra_args
78
79 # Set up environment variables.
80 processEnvironment = QProcessEnvironment()
81 env = self.process.systemEnvironment()
82 if (self.env and 'PYTHONPATH' not in self.env) or self.env is None:
83 python_path = osp.dirname(get_module_path('spyder'))
84 # Add the libs to the python path.
85 for lib in self.libs:
86 try:
87 path = osp.dirname(imp.find_module(lib)[1])
88 python_path = osp.pathsep.join([python_path, path])
89 except ImportError:
90 pass
91 env.append("PYTHONPATH=%s" % python_path)
92 if self.env:
93 env.update(self.env)
94 for envItem in env:
95 envName, separator, envValue = envItem.partition('=')
96 processEnvironment.insert(envName, envValue)
97 self.process.setProcessEnvironment(processEnvironment)
98
99 # Start the process and wait for started.
100 self.process.start(self.executable, p_args)
101 self.process.finished.connect(self._on_finished)
102 running = self.process.waitForStarted()
103 if not running:
104 raise IOError('Could not start %s' % self)
105
106 # Set up the socket notifer.
107 fid = self.socket.getsockopt(zmq.FD)
108 self.notifier = QSocketNotifier(fid, QSocketNotifier.Read, self)
109 self.notifier.activated.connect(self._on_msg_received)
110
111 def request(self, func_name, *args, **kwargs):
112 """Send a request to the server.
113
114 The response will be a dictionary the 'request_id' and the
115 'func_name' as well as a 'result' field with the object returned by
116 the function call or or an 'error' field with a traceback.
117 """
118 if not self.is_initialized:
119 return
120 request_id = uuid.uuid4().hex
121 request = dict(func_name=func_name,
122 args=args,
123 kwargs=kwargs,
124 request_id=request_id)
125 self._send(request)
126 return request_id
127
128 def close(self):
129 """Cleanly close the connection to the server.
130 """
131 self.closing = True
132 self.is_initialized = False
133 self.timer.stop()
134
135 if self.notifier is not None:
136 self.notifier.activated.disconnect(self._on_msg_received)
137 self.notifier.setEnabled(False)
138 self.notifier = None
139
140 self.request('server_quit')
141
142 if self.process is not None:
143 self.process.waitForFinished(1000)
144 self.process.close()
145 self.context.destroy()
146
147 def _on_finished(self):
148 """Handle a finished signal from the process.
149 """
150 if self.closing:
151 return
152 if self.is_initialized:
153 debug_print('Restarting %s' % self.name)
154 debug_print(self.process.readAllStandardOutput())
155 debug_print(self.process.readAllStandardError())
156 self.is_initialized = False
157 self.notifier.setEnabled(False)
158 self.run()
159 else:
160 debug_print('Errored %s' % self.name)
161 debug_print(self.process.readAllStandardOutput())
162 debug_print(self.process.readAllStandardError())
163 self.errored.emit()
164
165 def _on_msg_received(self):
166 """Handle a message trigger from the socket.
167 """
168 self.notifier.setEnabled(False)
169 while 1:
170 try:
171 resp = self.socket.recv_pyobj(flags=zmq.NOBLOCK)
172 except zmq.ZMQError:
173 self.notifier.setEnabled(True)
174 return
175 if not self.is_initialized:
176 self.is_initialized = True
177 debug_print('Initialized %s' % self.name)
178 self.initialized.emit()
179 self.timer.start(HEARTBEAT)
180 continue
181 resp['name'] = self.name
182 self.received.emit(resp)
183
184 def _heartbeat(self):
185 """Send a heartbeat to keep the server alive.
186 """
187 self._send(dict(func_name='server_heartbeat'))
188
189 def _send(self, obj):
190 """Send an object to the server.
191 """
192 try:
193 self.socket.send_pyobj(obj, zmq.NOBLOCK)
194 except Exception as e:
195 debug_print(e)
196 self.is_initialized = False
197 self._on_finished()
198
199
200 class PluginClient(AsyncClient):
201
202 def __init__(self, plugin_name, executable=None, env=None,
203 extra_path=None):
204 cwd = os.path.dirname(__file__)
205 super(PluginClient, self).__init__(
206 'plugin_server.py',
207 executable=executable, cwd=cwd, env=env,
208 extra_args=[plugin_name], libs=[plugin_name])
209 self.name = plugin_name
210
211
212 if __name__ == '__main__':
213 app = QApplication(sys.argv)
214 plugin = PluginClient('jedi')
215 plugin.run()
216
217 def handle_return(value):
218 print(value) # spyder: test-skip
219 if value['func_name'] == 'foo':
220 app.quit()
221 else:
222 plugin.request('foo')
223
224 def handle_errored():
225 print('errored') # spyder: test-skip
226 sys.exit(1)
227
228 def start():
229 print('start') # spyder: test-skip
230 plugin.request('validate')
231
232 plugin.errored.connect(handle_errored)
233 plugin.received.connect(handle_return)
234 plugin.initialized.connect(start)
235
236 app.exec_()
237
[end of spyder/utils/introspection/plugin_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spyder/utils/introspection/plugin_client.py b/spyder/utils/introspection/plugin_client.py
--- a/spyder/utils/introspection/plugin_client.py
+++ b/spyder/utils/introspection/plugin_client.py
@@ -101,7 +101,10 @@
self.process.finished.connect(self._on_finished)
running = self.process.waitForStarted()
if not running:
- raise IOError('Could not start %s' % self)
+ # Don't raise an error if the plugin fails to start
+ # Fixes issue 8934
+ debug_print('Could not start %s' % self)
+ return
# Set up the socket notifer.
fid = self.socket.getsockopt(zmq.FD)
| {"golden_diff": "diff --git a/spyder/utils/introspection/plugin_client.py b/spyder/utils/introspection/plugin_client.py\n--- a/spyder/utils/introspection/plugin_client.py\n+++ b/spyder/utils/introspection/plugin_client.py\n@@ -101,7 +101,10 @@\n self.process.finished.connect(self._on_finished)\n running = self.process.waitForStarted()\n if not running:\n- raise IOError('Could not start %s' % self)\n+ # Don't raise an error if the plugin fails to start\n+ # Fixes issue 8934\n+ debug_print('Could not start %s' % self)\n+ return\n \n # Set up the socket notifer.\n fid = self.socket.getsockopt(zmq.FD)\n", "issue": "IOError raised when completion plugins fail to start\n## Description\n\n### What steps will reproduce the problem?\n\n<!--- You can use Markdown here --->\n\nraise IOError('Could not start %s' % self)\nOSError: Could not start <spyder.utils.introspection.plugin_client.PluginClient object at 0xb5fc7c5e8>\"\n\n### Traceback\n```python-traceback\n File \"/Users/jimmyxiao/anaconda3/lib/python3.7/site-packages/spyder/utils/introspection/plugin_client.py\", line 158, in _on_finished\n self.run()\n File \"/Users/jimmyxiao/anaconda3/lib/python3.7/site-packages/spyder/utils/introspection/plugin_client.py\", line 104, in run\n raise IOError('Could not start %s' % self)\nOSError: Could not start <spyder.utils.introspection.plugin_client.PluginClient object at 0xb5fc7c5e8>\nWARNING:spyder.widgets.github.backend:Failed to send bug report on Github. response={'code': 502, 'json': {'message': 'Server Error'}}\n```\n\n## Versions\n\n* Spyder version: 3.3.2 \n* Python version: 3.7.1\n* Qt version: 5.9.6\n* PyQt5 version: 5.9.2\n* Operating System: Darwin 18.2.0\n\n### Dependencies\n\n```\npyflakes >=0.6.0 : 2.0.0 (OK)\npycodestyle >=2.3 : 2.4.0 (OK)\npygments >=2.0 : 2.3.1 (OK)\nsphinx >=0.6.6 : 1.8.2 (OK)\nrope >=0.9.4 : 0.11.0 (OK)\njedi >=0.9.0 : 0.13.2 (OK)\npsutil >=0.3 : 5.4.8 (OK)\nnbconvert >=4.0 : 5.4.0 (OK)\npandas >=0.13.1 : 0.23.4 (OK)\nnumpy >=1.7 : 1.15.4 (OK)\nsympy >=0.7.3 : 1.3 (OK)\ncython >=0.21 : 0.29.2 (OK)\nqtconsole >=4.2.0 : 4.4.3 (OK)\nIPython >=4.0 : 7.2.0 (OK)\nmatplotlib >=2.0.0: 3.0.2 (OK)\npylint >=0.25 : 2.2.2 (OK)\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n# Local imports\nimport imp\nimport os\nimport os.path as osp\nimport sys\nimport uuid\n\n# Third party imports\nfrom qtpy.QtCore import (QObject, QProcess, QProcessEnvironment,\n QSocketNotifier, QTimer, Signal)\nfrom qtpy.QtWidgets import QApplication\nimport zmq\n\n# Local imports\nfrom spyder.config.base import debug_print, get_module_path\n\n\n# Heartbeat timer in milliseconds\nHEARTBEAT = 1000\n\n\nclass AsyncClient(QObject):\n\n \"\"\"\n A class which handles a connection to a client through a QProcess.\n \"\"\"\n\n # Emitted when the client has initialized.\n initialized = Signal()\n\n # Emitted when the client errors.\n errored = Signal()\n\n # Emitted when a request response is received.\n received = Signal(object)\n\n def __init__(self, target, executable=None, name=None,\n extra_args=None, libs=None, cwd=None, env=None):\n super(AsyncClient, self).__init__()\n self.executable = executable or sys.executable\n self.extra_args = extra_args\n self.target = target\n self.name = name or self\n self.libs = libs\n self.cwd = cwd\n self.env = env\n self.is_initialized = False\n self.closing = False\n self.notifier = None\n self.process = None\n self.context = zmq.Context()\n QApplication.instance().aboutToQuit.connect(self.close)\n\n # Set up the heartbeat timer.\n self.timer = QTimer(self)\n self.timer.timeout.connect(self._heartbeat)\n\n def run(self):\n \"\"\"Handle the connection with the server.\n \"\"\"\n # Set up the zmq port.\n self.socket = self.context.socket(zmq.PAIR)\n self.port = self.socket.bind_to_random_port('tcp://*')\n\n # Set up the process.\n self.process = QProcess(self)\n if self.cwd:\n self.process.setWorkingDirectory(self.cwd)\n p_args = ['-u', self.target, str(self.port)]\n if self.extra_args is not None:\n p_args += self.extra_args\n\n # Set up environment variables.\n processEnvironment = QProcessEnvironment()\n env = self.process.systemEnvironment()\n if (self.env and 'PYTHONPATH' not in self.env) or self.env is None:\n python_path = osp.dirname(get_module_path('spyder'))\n # Add the libs to the python path.\n for lib in self.libs:\n try:\n path = osp.dirname(imp.find_module(lib)[1])\n python_path = osp.pathsep.join([python_path, path])\n except ImportError:\n pass\n env.append(\"PYTHONPATH=%s\" % python_path)\n if self.env:\n env.update(self.env)\n for envItem in env:\n envName, separator, envValue = envItem.partition('=')\n processEnvironment.insert(envName, envValue)\n self.process.setProcessEnvironment(processEnvironment)\n\n # Start the process and wait for started.\n self.process.start(self.executable, p_args)\n self.process.finished.connect(self._on_finished)\n running = self.process.waitForStarted()\n if not running:\n raise IOError('Could not start %s' % self)\n\n # Set up the socket notifer.\n fid = self.socket.getsockopt(zmq.FD)\n self.notifier = QSocketNotifier(fid, QSocketNotifier.Read, self)\n self.notifier.activated.connect(self._on_msg_received)\n\n def request(self, func_name, *args, **kwargs):\n \"\"\"Send a request to the server.\n\n The response will be a dictionary the 'request_id' and the\n 'func_name' as well as a 'result' field with the object returned by\n the function call or or an 'error' field with a traceback.\n \"\"\"\n if not self.is_initialized:\n return\n request_id = uuid.uuid4().hex\n request = dict(func_name=func_name,\n args=args,\n kwargs=kwargs,\n request_id=request_id)\n self._send(request)\n return request_id\n\n def close(self):\n \"\"\"Cleanly close the connection to the server.\n \"\"\"\n self.closing = True\n self.is_initialized = False\n self.timer.stop()\n\n if self.notifier is not None:\n self.notifier.activated.disconnect(self._on_msg_received)\n self.notifier.setEnabled(False)\n self.notifier = None\n\n self.request('server_quit')\n\n if self.process is not None:\n self.process.waitForFinished(1000)\n self.process.close()\n self.context.destroy()\n\n def _on_finished(self):\n \"\"\"Handle a finished signal from the process.\n \"\"\"\n if self.closing:\n return\n if self.is_initialized:\n debug_print('Restarting %s' % self.name)\n debug_print(self.process.readAllStandardOutput())\n debug_print(self.process.readAllStandardError())\n self.is_initialized = False\n self.notifier.setEnabled(False)\n self.run()\n else:\n debug_print('Errored %s' % self.name)\n debug_print(self.process.readAllStandardOutput())\n debug_print(self.process.readAllStandardError())\n self.errored.emit()\n\n def _on_msg_received(self):\n \"\"\"Handle a message trigger from the socket.\n \"\"\"\n self.notifier.setEnabled(False)\n while 1:\n try:\n resp = self.socket.recv_pyobj(flags=zmq.NOBLOCK)\n except zmq.ZMQError:\n self.notifier.setEnabled(True)\n return\n if not self.is_initialized:\n self.is_initialized = True\n debug_print('Initialized %s' % self.name)\n self.initialized.emit()\n self.timer.start(HEARTBEAT)\n continue\n resp['name'] = self.name\n self.received.emit(resp)\n\n def _heartbeat(self):\n \"\"\"Send a heartbeat to keep the server alive.\n \"\"\"\n self._send(dict(func_name='server_heartbeat'))\n\n def _send(self, obj):\n \"\"\"Send an object to the server.\n \"\"\"\n try:\n self.socket.send_pyobj(obj, zmq.NOBLOCK)\n except Exception as e:\n debug_print(e)\n self.is_initialized = False\n self._on_finished()\n\n\nclass PluginClient(AsyncClient):\n\n def __init__(self, plugin_name, executable=None, env=None,\n extra_path=None):\n cwd = os.path.dirname(__file__)\n super(PluginClient, self).__init__(\n 'plugin_server.py',\n executable=executable, cwd=cwd, env=env,\n extra_args=[plugin_name], libs=[plugin_name])\n self.name = plugin_name\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n plugin = PluginClient('jedi')\n plugin.run()\n\n def handle_return(value):\n print(value) # spyder: test-skip\n if value['func_name'] == 'foo':\n app.quit()\n else:\n plugin.request('foo')\n\n def handle_errored():\n print('errored') # spyder: test-skip\n sys.exit(1)\n\n def start():\n print('start') # spyder: test-skip\n plugin.request('validate')\n\n plugin.errored.connect(handle_errored)\n plugin.received.connect(handle_return)\n plugin.initialized.connect(start)\n\n app.exec_()\n", "path": "spyder/utils/introspection/plugin_client.py"}]} | 3,396 | 169 |
gh_patches_debug_18480 | rasdani/github-patches | git_diff | litestar-org__litestar-1718 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: template autoescaping behavior
### Summary
I appreciate this framework having a built-in choice between Jinja and Mako. The documentation however makes no mention of a significant difference in the Litestar behavior between the two -- that using the Jinja engine will autoescape for you, whereas Mako will not.
</issue>
<code>
[start of litestar/contrib/mako.py]
1 from __future__ import annotations
2
3 from functools import partial
4 from typing import TYPE_CHECKING, Any, Callable
5
6 from litestar.exceptions import MissingDependencyException, TemplateNotFoundException
7 from litestar.template.base import (
8 TemplateEngineProtocol,
9 TemplateProtocol,
10 csrf_token,
11 url_for,
12 url_for_static_asset,
13 )
14
15 __all__ = ("MakoTemplate", "MakoTemplateEngine")
16
17
18 try:
19 import mako # noqa: F401
20 except ImportError as e:
21 raise MissingDependencyException("mako") from e
22
23
24 from mako.exceptions import TemplateLookupException as MakoTemplateNotFound
25 from mako.lookup import TemplateLookup
26
27 if TYPE_CHECKING:
28 from mako.template import Template as _MakoTemplate
29 from pydantic import DirectoryPath
30
31
32 class MakoTemplate(TemplateProtocol):
33 """Mako template, implementing ``TemplateProtocol``"""
34
35 def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):
36 """Initialize a template.
37
38 Args:
39 template: Base ``MakoTemplate`` used by the underlying mako-engine
40 template_callables: List of callables passed to the template
41 """
42 super().__init__()
43 self.template = template
44 self.template_callables = template_callables
45
46 def render(self, *args: Any, **kwargs: Any) -> str:
47 """Render a template.
48
49 Args:
50 args: Positional arguments passed to the engines ``render`` function
51 kwargs: Keyword arguments passed to the engines ``render`` function
52
53 Returns:
54 Rendered template as a string
55 """
56 for callable_key, template_callable in self.template_callables:
57 kwargs_copy = {**kwargs}
58 kwargs[callable_key] = partial(template_callable, kwargs_copy)
59
60 return str(self.template.render(*args, **kwargs))
61
62
63 class MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):
64 """Mako based TemplateEngine."""
65
66 def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:
67 """Initialize template engine.
68
69 Args:
70 directory: Direct path or list of directory paths from which to serve templates.
71 """
72 super().__init__(directory=directory)
73 self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])
74 self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []
75 self.register_template_callable(key="url_for_static_asset", template_callable=url_for_static_asset) # type: ignore
76 self.register_template_callable(key="csrf_token", template_callable=csrf_token) # type: ignore
77 self.register_template_callable(key="url_for", template_callable=url_for) # type: ignore
78
79 def get_template(self, template_name: str) -> MakoTemplate:
80 """Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.
81
82 Args:
83 template_name: A dotted path
84
85 Returns:
86 MakoTemplate instance
87
88 Raises:
89 TemplateNotFoundException: if no template is found.
90 """
91 try:
92 return MakoTemplate(
93 template=self.engine.get_template(template_name), template_callables=self._template_callables
94 )
95 except MakoTemplateNotFound as exc:
96 raise TemplateNotFoundException(template_name=template_name) from exc
97
98 def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:
99 """Register a callable on the template engine.
100
101 Args:
102 key: The callable key, i.e. the value to use inside the template to call the callable.
103 template_callable: A callable to register.
104
105 Returns:
106 None
107 """
108 self._template_callables.append((key, template_callable))
109
[end of litestar/contrib/mako.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/mako.py b/litestar/contrib/mako.py
--- a/litestar/contrib/mako.py
+++ b/litestar/contrib/mako.py
@@ -70,7 +70,9 @@
directory: Direct path or list of directory paths from which to serve templates.
"""
super().__init__(directory=directory)
- self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])
+ self.engine = TemplateLookup(
+ directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=["h"]
+ )
self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []
self.register_template_callable(key="url_for_static_asset", template_callable=url_for_static_asset) # type: ignore
self.register_template_callable(key="csrf_token", template_callable=csrf_token) # type: ignore
| {"golden_diff": "diff --git a/litestar/contrib/mako.py b/litestar/contrib/mako.py\n--- a/litestar/contrib/mako.py\n+++ b/litestar/contrib/mako.py\n@@ -70,7 +70,9 @@\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n- self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])\n+ self.engine = TemplateLookup(\n+ directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=[\"h\"]\n+ )\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n", "issue": "Docs: template autoescaping behavior\n### Summary\r\n\r\nI appreciate this framework having a built-in choice between Jinja and Mako. The documentation however makes no mention of a significant difference in the Litestar behavior between the two -- that using the Jinja engine will autoescape for you, whereas Mako will not. \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable\n\nfrom litestar.exceptions import MissingDependencyException, TemplateNotFoundException\nfrom litestar.template.base import (\n TemplateEngineProtocol,\n TemplateProtocol,\n csrf_token,\n url_for,\n url_for_static_asset,\n)\n\n__all__ = (\"MakoTemplate\", \"MakoTemplateEngine\")\n\n\ntry:\n import mako # noqa: F401\nexcept ImportError as e:\n raise MissingDependencyException(\"mako\") from e\n\n\nfrom mako.exceptions import TemplateLookupException as MakoTemplateNotFound\nfrom mako.lookup import TemplateLookup\n\nif TYPE_CHECKING:\n from mako.template import Template as _MakoTemplate\n from pydantic import DirectoryPath\n\n\nclass MakoTemplate(TemplateProtocol):\n \"\"\"Mako template, implementing ``TemplateProtocol``\"\"\"\n\n def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):\n \"\"\"Initialize a template.\n\n Args:\n template: Base ``MakoTemplate`` used by the underlying mako-engine\n template_callables: List of callables passed to the template\n \"\"\"\n super().__init__()\n self.template = template\n self.template_callables = template_callables\n\n def render(self, *args: Any, **kwargs: Any) -> str:\n \"\"\"Render a template.\n\n Args:\n args: Positional arguments passed to the engines ``render`` function\n kwargs: Keyword arguments passed to the engines ``render`` function\n\n Returns:\n Rendered template as a string\n \"\"\"\n for callable_key, template_callable in self.template_callables:\n kwargs_copy = {**kwargs}\n kwargs[callable_key] = partial(template_callable, kwargs_copy)\n\n return str(self.template.render(*args, **kwargs))\n\n\nclass MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):\n \"\"\"Mako based TemplateEngine.\"\"\"\n\n def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:\n \"\"\"Initialize template engine.\n\n Args:\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n self.register_template_callable(key=\"url_for\", template_callable=url_for) # type: ignore\n\n def get_template(self, template_name: str) -> MakoTemplate:\n \"\"\"Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.\n\n Args:\n template_name: A dotted path\n\n Returns:\n MakoTemplate instance\n\n Raises:\n TemplateNotFoundException: if no template is found.\n \"\"\"\n try:\n return MakoTemplate(\n template=self.engine.get_template(template_name), template_callables=self._template_callables\n )\n except MakoTemplateNotFound as exc:\n raise TemplateNotFoundException(template_name=template_name) from exc\n\n def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:\n \"\"\"Register a callable on the template engine.\n\n Args:\n key: The callable key, i.e. the value to use inside the template to call the callable.\n template_callable: A callable to register.\n\n Returns:\n None\n \"\"\"\n self._template_callables.append((key, template_callable))\n", "path": "litestar/contrib/mako.py"}]} | 1,653 | 213 |
gh_patches_debug_27060 | rasdani/github-patches | git_diff | svthalia__concrexit-1399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documents page broken
Steps to reproduce:
1. Go to https://thalia.nu/association/documents/
Sentry Issue: [CONCREXIT-4E](https://sentry.io/organizations/thalia/issues/2020405926/?referrer=github_integration)
```
FieldError: Cannot resolve keyword 'name_en' into field. Choices are: annualdocument, category, created, event, eventdocument, file, generalmeeting, id, last_updated, members_only, minutes, name
(5 additional frame(s) were not displayed)
...
File "documents/views.py", line 54, in get_context_data
"association_documents": AssociationDocument.objects.order_by(
File "django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "django/db/models/query.py", line 1134, in order_by
obj.query.add_ordering(*field_names)
File "django/db/models/sql/query.py", line 1919, in add_ordering
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
File "django/db/models/sql/query.py", line 1481, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
```
</issue>
<code>
[start of website/documents/views.py]
1 """Views provided by the documents package"""
2 import os
3
4 from django.conf import settings
5 from django.core.exceptions import PermissionDenied
6 from django.http import Http404, HttpResponse
7 from django.shortcuts import redirect
8 from django.utils import timezone
9 from django.utils.text import slugify
10 from django.utils.translation import get_language
11 from django.views.generic import TemplateView, DetailView
12 from django_sendfile import sendfile
13
14 from documents.models import (
15 AnnualDocument,
16 AssociationDocument,
17 GeneralMeeting,
18 Document,
19 )
20 from utils.snippets import datetime_to_lectureyear
21
22
23 class DocumentsIndexView(TemplateView):
24 """
25 View that renders the documents index page
26 """
27
28 template_name = "documents/index.html"
29
30 def get_context_data(self, **kwargs) -> dict:
31 lecture_year = datetime_to_lectureyear(timezone.now())
32
33 years = {x: {} for x in reversed(range(1990, lecture_year + 1))}
34 for year in years:
35 years[year] = {
36 "documents": {"policy": None, "report": None, "financial": None},
37 "general_meetings": [],
38 }
39
40 for document in AnnualDocument.objects.filter(subcategory="policy"):
41 years[document.year]["documents"]["policy"] = document
42 for document in AnnualDocument.objects.filter(subcategory="report"):
43 years[document.year]["documents"]["report"] = document
44 for document in AnnualDocument.objects.filter(subcategory="financial"):
45 years[document.year]["documents"]["financial"] = document
46
47 for obj in GeneralMeeting.objects.all():
48 meeting_year = datetime_to_lectureyear(obj.datetime)
49 years[meeting_year]["general_meetings"].append(obj)
50
51 context = super().get_context_data(**kwargs)
52 context.update(
53 {
54 "association_documents": AssociationDocument.objects.order_by(
55 f"name_{get_language()}"
56 ).all(),
57 "years": list(years.items()),
58 }
59 )
60 return context
61
62
63 class DocumentDownloadView(DetailView):
64 """
65 View that allows you to download a specific document based on it's and your
66 permissions settings
67 """
68
69 model = Document
70
71 def get(self, request, *args, **kwargs) -> HttpResponse:
72 """
73 :return: either a 302 redirect to the login page or
74 a 200 with the document
75 """
76 response = super().get(request, *args, **kwargs)
77 document = response.context_data["document"]
78
79 if document.members_only and not request.user.is_authenticated:
80 return redirect("{}?next={}".format(settings.LOGIN_URL, request.path))
81 if document.members_only and not request.member.has_active_membership():
82 raise PermissionDenied
83
84 lang = request.GET.get("language")
85 try:
86 if lang == "en":
87 file = document.file_en
88 else: # Fall back on language detection
89 file = document.file
90 except ValueError as e:
91 raise Http404("This document does not exist.") from e
92
93 ext = os.path.splitext(file.path)[1]
94
95 return sendfile(
96 request,
97 file.path,
98 attachment=True,
99 attachment_filename=slugify(document.name) + ext,
100 )
101
[end of website/documents/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/documents/views.py b/website/documents/views.py
--- a/website/documents/views.py
+++ b/website/documents/views.py
@@ -7,7 +7,6 @@
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.text import slugify
-from django.utils.translation import get_language
from django.views.generic import TemplateView, DetailView
from django_sendfile import sendfile
@@ -52,7 +51,7 @@
context.update(
{
"association_documents": AssociationDocument.objects.order_by(
- f"name_{get_language()}"
+ "name"
).all(),
"years": list(years.items()),
}
@@ -81,12 +80,8 @@
if document.members_only and not request.member.has_active_membership():
raise PermissionDenied
- lang = request.GET.get("language")
try:
- if lang == "en":
- file = document.file_en
- else: # Fall back on language detection
- file = document.file
+ file = document.file
except ValueError as e:
raise Http404("This document does not exist.") from e
| {"golden_diff": "diff --git a/website/documents/views.py b/website/documents/views.py\n--- a/website/documents/views.py\n+++ b/website/documents/views.py\n@@ -7,7 +7,6 @@\n from django.shortcuts import redirect\n from django.utils import timezone\n from django.utils.text import slugify\n-from django.utils.translation import get_language\n from django.views.generic import TemplateView, DetailView\n from django_sendfile import sendfile\n \n@@ -52,7 +51,7 @@\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n- f\"name_{get_language()}\"\n+ \"name\"\n ).all(),\n \"years\": list(years.items()),\n }\n@@ -81,12 +80,8 @@\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n \n- lang = request.GET.get(\"language\")\n try:\n- if lang == \"en\":\n- file = document.file_en\n- else: # Fall back on language detection\n- file = document.file\n+ file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n", "issue": "Documents page broken\nSteps to reproduce:\n1. Go to https://thalia.nu/association/documents/\n\n\nSentry Issue: [CONCREXIT-4E](https://sentry.io/organizations/thalia/issues/2020405926/?referrer=github_integration)\n\n```\nFieldError: Cannot resolve keyword 'name_en' into field. Choices are: annualdocument, category, created, event, eventdocument, file, generalmeeting, id, last_updated, members_only, minutes, name\n(5 additional frame(s) were not displayed)\n...\n File \"documents/views.py\", line 54, in get_context_data\n \"association_documents\": AssociationDocument.objects.order_by(\n File \"django/db/models/manager.py\", line 85, in manager_method\n return getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"django/db/models/query.py\", line 1134, in order_by\n obj.query.add_ordering(*field_names)\n File \"django/db/models/sql/query.py\", line 1919, in add_ordering\n self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)\n File \"django/db/models/sql/query.py\", line 1481, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n```\n", "before_files": [{"content": "\"\"\"Views provided by the documents package\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom django.utils.translation import get_language\nfrom django.views.generic import TemplateView, DetailView\nfrom django_sendfile import sendfile\n\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n GeneralMeeting,\n Document,\n)\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentsIndexView(TemplateView):\n \"\"\"\n View that renders the documents index page\n \"\"\"\n\n template_name = \"documents/index.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n lecture_year = datetime_to_lectureyear(timezone.now())\n\n years = {x: {} for x in reversed(range(1990, lecture_year + 1))}\n for year in years:\n years[year] = {\n \"documents\": {\"policy\": None, \"report\": None, \"financial\": None},\n \"general_meetings\": [],\n }\n\n for document in AnnualDocument.objects.filter(subcategory=\"policy\"):\n years[document.year][\"documents\"][\"policy\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"report\"):\n years[document.year][\"documents\"][\"report\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"financial\"):\n years[document.year][\"documents\"][\"financial\"] = document\n\n for obj in GeneralMeeting.objects.all():\n meeting_year = datetime_to_lectureyear(obj.datetime)\n years[meeting_year][\"general_meetings\"].append(obj)\n\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n f\"name_{get_language()}\"\n ).all(),\n \"years\": list(years.items()),\n }\n )\n return context\n\n\nclass DocumentDownloadView(DetailView):\n \"\"\"\n View that allows you to download a specific document based on it's and your\n permissions settings\n \"\"\"\n\n model = Document\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n \"\"\"\n :return: either a 302 redirect to the login page or\n a 200 with the document\n \"\"\"\n response = super().get(request, *args, **kwargs)\n document = response.context_data[\"document\"]\n\n if document.members_only and not request.user.is_authenticated:\n return redirect(\"{}?next={}\".format(settings.LOGIN_URL, request.path))\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n\n lang = request.GET.get(\"language\")\n try:\n if lang == \"en\":\n file = document.file_en\n else: # Fall back on language detection\n file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n\n ext = os.path.splitext(file.path)[1]\n\n return sendfile(\n request,\n file.path,\n attachment=True,\n attachment_filename=slugify(document.name) + ext,\n )\n", "path": "website/documents/views.py"}]} | 1,702 | 255 |
gh_patches_debug_16036 | rasdani/github-patches | git_diff | svthalia__concrexit-3641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement HR changes to discounted membership upgrades/renewal
### What?
The huishoudelijk reglement has changed wrt the time in which (ex-)
members can use the discount when renewing/upgrading their membership to be study-long.
Check the HR precisely for the exact requirements.
### Why?
We need to match the official rules. It has to be released at the latest towards the end of this lecture year, to cover the wave of renewals in august/september and after.
### How?
I think there's a nice place where the discount is applied either in the RenewalForm or in the Renewal model.
</issue>
<code>
[start of website/registrations/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
4 from django.forms import HiddenInput, TypedChoiceField
5 from django.urls import reverse_lazy
6 from django.utils import timezone
7 from django.utils.safestring import mark_safe
8 from django.utils.text import capfirst
9 from django.utils.translation import gettext_lazy as _
10
11 from members.models import Membership
12 from payments.widgets import SignatureWidget
13 from registrations import services
14
15 from .models import Reference, Registration, Renewal
16
17
18 class BaseRegistrationForm(forms.ModelForm):
19 """Base form for membership registrations.
20
21 Subclasses must implement setting the right contribution.
22 """
23
24 birthday = forms.DateField(
25 label=capfirst(_("birthday")),
26 )
27
28 privacy_policy = forms.BooleanField(
29 required=True,
30 )
31
32 direct_debit = forms.BooleanField(
33 required=False,
34 label=_("Pay via direct debit"),
35 help_text=_(
36 "This will allow you to sign a Direct Debit mandate, allowing Thalia to withdraw the membership fees from your bank account. Also, you will be able to use this bank account for future payments to Thalia via Thalia Pay."
37 ),
38 )
39
40 contribution = forms.DecimalField(required=False, widget=HiddenInput())
41
42 def __init__(self, *args, **kwargs):
43 super().__init__(*args, **kwargs)
44 self.fields["privacy_policy"].label = mark_safe(
45 _('I accept the <a href="{}">privacy policy</a>.').format(
46 reverse_lazy("singlepages:privacy-policy")
47 )
48 )
49 self.fields["birthday"].widget.input_type = "date"
50
51 def clean(self):
52 if self.cleaned_data.get("phone_number") is not None: # pragma: no cover
53 self.cleaned_data["phone_number"] = self.cleaned_data[
54 "phone_number"
55 ].replace(" ", "")
56 super().clean()
57
58
59 class RegistrationAdminForm(forms.ModelForm):
60 """Custom admin form for Registration model to add the widget for the signature."""
61
62 class Meta:
63 fields = "__all__"
64 model = Registration
65 widgets = {
66 "signature": SignatureWidget(),
67 }
68
69
70 class MemberRegistrationForm(BaseRegistrationForm):
71 """Form for member registrations."""
72
73 this_year = timezone.now().year
74 years = reversed(
75 [(x, f"{x} - {x + 1}") for x in range(this_year - 20, this_year + 1)]
76 )
77
78 starting_year = TypedChoiceField(
79 choices=years,
80 coerce=int,
81 empty_value=this_year,
82 required=False,
83 help_text=_("What lecture year did you start studying at Radboud University?"),
84 )
85
86 def __init__(self, *args, **kwargs):
87 super().__init__(*args, **kwargs)
88 self.fields["student_number"].required = True
89 self.fields["programme"].required = True
90 self.fields["starting_year"].required = True
91
92 class Meta:
93 model = Registration
94 widgets = {"signature": SignatureWidget()}
95 fields = (
96 "length",
97 "first_name",
98 "last_name",
99 "birthday",
100 "email",
101 "phone_number",
102 "student_number",
103 "programme",
104 "starting_year",
105 "address_street",
106 "address_street2",
107 "address_postal_code",
108 "address_city",
109 "address_country",
110 "optin_birthday",
111 "optin_mailinglist",
112 "contribution",
113 "membership_type",
114 "direct_debit",
115 "initials",
116 "iban",
117 "bic",
118 "signature",
119 )
120
121 def clean(self):
122 super().clean()
123 self.cleaned_data["contribution"] = settings.MEMBERSHIP_PRICES[
124 self.cleaned_data["length"]
125 ]
126
127 return self.cleaned_data
128
129
130 class BenefactorRegistrationForm(BaseRegistrationForm):
131 """Form for benefactor registrations."""
132
133 icis_employee = forms.BooleanField(
134 required=False, label=_("I am an employee of iCIS")
135 )
136
137 contribution = forms.DecimalField(
138 required=True,
139 max_digits=5,
140 decimal_places=2,
141 )
142
143 class Meta:
144 model = Registration
145 widgets = {
146 "signature": SignatureWidget(),
147 }
148 fields = (
149 "length",
150 "first_name",
151 "last_name",
152 "birthday",
153 "email",
154 "phone_number",
155 "student_number",
156 "address_street",
157 "address_street2",
158 "address_postal_code",
159 "address_city",
160 "address_country",
161 "optin_birthday",
162 "optin_mailinglist",
163 "contribution",
164 "membership_type",
165 "direct_debit",
166 "initials",
167 "iban",
168 "bic",
169 "signature",
170 )
171
172
173 class RenewalForm(forms.ModelForm):
174 """Form for membership renewals."""
175
176 privacy_policy = forms.BooleanField(
177 required=True,
178 )
179
180 icis_employee = forms.BooleanField(
181 required=False, label=_("I am an employee of iCIS")
182 )
183
184 contribution = forms.DecimalField(
185 required=False,
186 max_digits=5,
187 decimal_places=2,
188 )
189
190 def __init__(self, *args, **kwargs):
191 super().__init__(*args, **kwargs)
192 self.fields["privacy_policy"].label = mark_safe(
193 _('I accept the <a href="{}">privacy policy</a>.').format(
194 reverse_lazy("singlepages:privacy-policy")
195 )
196 )
197 self.fields["length"].help_text = (
198 "A discount of €7,50 will be applied if you upgrade your (active) year membership "
199 "to a membership until graduation. You will only have to pay €22,50 in that case."
200 )
201
202 class Meta:
203 model = Renewal
204 fields = (
205 "member",
206 "length",
207 "contribution",
208 "membership_type",
209 "no_references",
210 "remarks",
211 )
212
213 def clean(self):
214 super().clean()
215 if self.cleaned_data["member"].profile.is_minimized:
216 raise ValidationError(
217 "It's not possible to renew a membership using an incomplete profile."
218 )
219
220 if self.cleaned_data["length"] == Renewal.MEMBERSHIP_STUDY:
221 now = timezone.now()
222 if Membership.objects.filter(
223 user=self.cleaned_data["member"],
224 type=Membership.MEMBER,
225 until__gte=now,
226 since__lte=now,
227 ).exists():
228 # The membership upgrade discount applies if, at the time a Renewal is
229 # created, the user has an active 'member' type membership for a year.
230 self.cleaned_data["contribution"] = (
231 settings.MEMBERSHIP_PRICES[Renewal.MEMBERSHIP_STUDY]
232 - settings.MEMBERSHIP_PRICES[Renewal.MEMBERSHIP_YEAR]
233 )
234 else:
235 self.cleaned_data["contribution"] = settings.MEMBERSHIP_PRICES[
236 Renewal.MEMBERSHIP_STUDY
237 ]
238 elif self.cleaned_data["membership_type"] == Membership.MEMBER:
239 self.cleaned_data["contribution"] = settings.MEMBERSHIP_PRICES[
240 self.cleaned_data["length"]
241 ]
242
243 return self.cleaned_data
244
245
246 class ReferenceForm(forms.ModelForm):
247 def clean(self):
248 super().clean()
249 membership = self.cleaned_data["member"].current_membership
250 if membership and membership.type == Membership.BENEFACTOR:
251 raise ValidationError(_("Benefactors cannot give references."))
252
253 membership = self.cleaned_data["member"].latest_membership
254 if (
255 membership
256 and membership.until
257 and membership.until < services.calculate_membership_since()
258 ):
259 raise ValidationError(
260 "It's not possible to give references for memberships "
261 "that start after your own membership's end."
262 )
263
264 class Meta:
265 model = Reference
266 fields = "__all__"
267 error_messages = {
268 NON_FIELD_ERRORS: {
269 "unique_together": _(
270 "You've already given a reference for this person."
271 ),
272 }
273 }
274
[end of website/registrations/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/registrations/forms.py b/website/registrations/forms.py
--- a/website/registrations/forms.py
+++ b/website/registrations/forms.py
@@ -1,3 +1,5 @@
+from datetime import timedelta
+
from django import forms
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
@@ -222,7 +224,7 @@
if Membership.objects.filter(
user=self.cleaned_data["member"],
type=Membership.MEMBER,
- until__gte=now,
+ until__gte=now - timedelta(days=366),
since__lte=now,
).exists():
# The membership upgrade discount applies if, at the time a Renewal is
| {"golden_diff": "diff --git a/website/registrations/forms.py b/website/registrations/forms.py\n--- a/website/registrations/forms.py\n+++ b/website/registrations/forms.py\n@@ -1,3 +1,5 @@\n+from datetime import timedelta\n+\n from django import forms\n from django.conf import settings\n from django.core.exceptions import NON_FIELD_ERRORS, ValidationError\n@@ -222,7 +224,7 @@\n if Membership.objects.filter(\n user=self.cleaned_data[\"member\"],\n type=Membership.MEMBER,\n- until__gte=now,\n+ until__gte=now - timedelta(days=366),\n since__lte=now,\n ).exists():\n # The membership upgrade discount applies if, at the time a Renewal is\n", "issue": "Implement HR changes to discounted membership upgrades/renewal\n### What?\r\nThe huishoudelijk reglement has changed wrt the time in which (ex-)\r\nmembers can use the discount when renewing/upgrading their membership to be study-long.\r\n\r\nCheck the HR precisely for the exact requirements.\r\n\r\n### Why?\r\nWe need to match the official rules. It has to be released at the latest towards the end of this lecture year, to cover the wave of renewals in august/september and after. \r\n\r\n### How?\r\nI think there's a nice place where the discount is applied either in the RenewalForm or in the Renewal model.\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import HiddenInput, TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\n\nfrom .models import Reference, Registration, Renewal\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\n\n Subclasses must implement setting the right contribution.\n \"\"\"\n\n birthday = forms.DateField(\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(\n required=True,\n )\n\n direct_debit = forms.BooleanField(\n required=False,\n label=_(\"Pay via direct debit\"),\n help_text=_(\n \"This will allow you to sign a Direct Debit mandate, allowing Thalia to withdraw the membership fees from your bank account. Also, you will be able to use this bank account for future payments to Thalia via Thalia Pay.\"\n ),\n )\n\n contribution = forms.DecimalField(required=False, widget=HiddenInput())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n self.fields[\"birthday\"].widget.input_type = \"date\"\n\n def clean(self):\n if self.cleaned_data.get(\"phone_number\") is not None: # pragma: no cover\n self.cleaned_data[\"phone_number\"] = self.cleaned_data[\n \"phone_number\"\n ].replace(\" \", \"\")\n super().clean()\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, f\"{x} - {x + 1}\") for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"student_number\"].required = True\n self.fields[\"programme\"].required = True\n self.fields[\"starting_year\"].required = True\n\n class Meta:\n model = Registration\n widgets = {\"signature\": SignatureWidget()}\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n def clean(self):\n super().clean()\n self.cleaned_data[\"contribution\"] = settings.MEMBERSHIP_PRICES[\n self.cleaned_data[\"length\"]\n ]\n\n return self.cleaned_data\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.DecimalField(\n required=True,\n max_digits=5,\n decimal_places=2,\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(\n required=True,\n )\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.DecimalField(\n required=False,\n max_digits=5,\n decimal_places=2,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n self.fields[\"length\"].help_text = (\n \"A discount of \u20ac7,50 will be applied if you upgrade your (active) year membership \"\n \"to a membership until graduation. You will only have to pay \u20ac22,50 in that case.\"\n )\n\n class Meta:\n model = Renewal\n fields = (\n \"member\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"no_references\",\n \"remarks\",\n )\n\n def clean(self):\n super().clean()\n if self.cleaned_data[\"member\"].profile.is_minimized:\n raise ValidationError(\n \"It's not possible to renew a membership using an incomplete profile.\"\n )\n\n if self.cleaned_data[\"length\"] == Renewal.MEMBERSHIP_STUDY:\n now = timezone.now()\n if Membership.objects.filter(\n user=self.cleaned_data[\"member\"],\n type=Membership.MEMBER,\n until__gte=now,\n since__lte=now,\n ).exists():\n # The membership upgrade discount applies if, at the time a Renewal is\n # created, the user has an active 'member' type membership for a year.\n self.cleaned_data[\"contribution\"] = (\n settings.MEMBERSHIP_PRICES[Renewal.MEMBERSHIP_STUDY]\n - settings.MEMBERSHIP_PRICES[Renewal.MEMBERSHIP_YEAR]\n )\n else:\n self.cleaned_data[\"contribution\"] = settings.MEMBERSHIP_PRICES[\n Renewal.MEMBERSHIP_STUDY\n ]\n elif self.cleaned_data[\"membership_type\"] == Membership.MEMBER:\n self.cleaned_data[\"contribution\"] = settings.MEMBERSHIP_PRICES[\n self.cleaned_data[\"length\"]\n ]\n\n return self.cleaned_data\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n \"It's not possible to give references for memberships \"\n \"that start after your own membership's end.\"\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py"}]} | 3,102 | 165 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.