problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_15397 | rasdani/github-patches | git_diff | crytic__slither-1945 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
similar variables detector is extremely slow
This detector makes up the majority of runtime on large codebases
```
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 41.286 41.286 /Users/alpharush/tob/slither/slither/__main__.py:81(process_all)
1 0.000 0.000 39.059 39.059 /Users/alpharush/tob/slither/slither/__main__.py:58(process_single)
1 0.000 0.000 33.319 33.319 /Users/alpharush/tob/slither/slither/__main__.py:111(_process)
1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:198(run_detectors)
1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:204(<listcomp>)
84 0.001 0.000 33.317 0.397 /Users/alpharush/tob/slither/slither/detectors/abstract_detector.py:176(detect)
1 0.000 0.000 31.215 31.215 /Users/alpharush/tob/slither/slither/detectors/variables/similar_variables.py:72(_detect)
```
https://github.com/crytic/slither/blob/master/slither/detectors/variables/similar_variables.py#L63-L66
</issue>
<code>
[start of slither/detectors/variables/similar_variables.py]
1 """
2 Check for state variables too similar
3 Do not check contract inheritance
4 """
5 import difflib
6 from typing import List, Set, Tuple
7
8 from slither.core.declarations.contract import Contract
9 from slither.core.variables.local_variable import LocalVariable
10 from slither.detectors.abstract_detector import (
11 AbstractDetector,
12 DetectorClassification,
13 DETECTOR_INFO,
14 )
15 from slither.utils.output import Output
16
17
18 class SimilarVarsDetection(AbstractDetector):
19 """
20 Variable similar detector
21 """
22
23 ARGUMENT = "similar-names"
24 HELP = "Variable names are too similar"
25 IMPACT = DetectorClassification.INFORMATIONAL
26 CONFIDENCE = DetectorClassification.MEDIUM
27
28 WIKI = (
29 "https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar"
30 )
31
32 WIKI_TITLE = "Variable names too similar"
33 WIKI_DESCRIPTION = "Detect variables with names that are too similar."
34 WIKI_EXPLOIT_SCENARIO = "Bob uses several variables with similar names. As a result, his code is difficult to review."
35 WIKI_RECOMMENDATION = "Prevent variables from having similar names."
36
37 @staticmethod
38 def similar(seq1: str, seq2: str) -> bool:
39 """Test the name similarity
40
41 Two name are similar if difflib.SequenceMatcher on the lowercase
42 version of the name is greater than 0.90
43 See: https://docs.python.org/2/library/difflib.html
44 Args:
45 seq1 (str): first name
46 seq2 (str): second name
47 Returns:
48 bool: true if names are similar
49 """
50 if len(seq1) != len(seq2):
51 return False
52 val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()
53 ret = val > 0.90
54 return ret
55
56 @staticmethod
57 def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:
58 """Detect variables with similar name
59
60 Returns:
61 bool: true if variables have similar name
62 """
63 all_var = [x.variables for x in contract.functions]
64 all_var = [x for l in all_var for x in l]
65
66 contract_var = contract.variables
67
68 all_var = set(all_var + contract_var)
69
70 ret = []
71 for v1 in all_var:
72 for v2 in all_var:
73 if v1.name.lower() != v2.name.lower():
74 if SimilarVarsDetection.similar(v1.name, v2.name):
75 if (v2, v1) not in ret:
76 ret.append((v1, v2))
77
78 return set(ret)
79
80 def _detect(self) -> List[Output]:
81 """Detect similar variables name
82
83 Returns:
84 list: {'vuln', 'filename,'contract','vars'}
85 """
86 results = []
87 for c in self.contracts:
88 allVars = self.detect_sim(c)
89 if allVars:
90 for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):
91 v_left = v1 if v1.name < v2.name else v2
92 v_right = v2 if v_left == v1 else v1
93 info: DETECTOR_INFO = [
94 "Variable ",
95 v_left,
96 " is too similar to ",
97 v_right,
98 "\n",
99 ]
100 json = self.generate_result(info)
101 results.append(json)
102 return results
103
[end of slither/detectors/variables/similar_variables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/detectors/variables/similar_variables.py b/slither/detectors/variables/similar_variables.py
--- a/slither/detectors/variables/similar_variables.py
+++ b/slither/detectors/variables/similar_variables.py
@@ -65,12 +65,16 @@
contract_var = contract.variables
- all_var = set(all_var + contract_var)
+ all_var = list(set(all_var + contract_var))
ret = []
- for v1 in all_var:
- for v2 in all_var:
- if v1.name.lower() != v2.name.lower():
+ # pylint: disable=consider-using-enumerate
+ for i in range(len(all_var)):
+ v1 = all_var[i]
+ _v1_name_lower = v1.name.lower()
+ for j in range(i, len(all_var)):
+ v2 = all_var[j]
+ if _v1_name_lower != v2.name.lower():
if SimilarVarsDetection.similar(v1.name, v2.name):
if (v2, v1) not in ret:
ret.append((v1, v2))
| {"golden_diff": "diff --git a/slither/detectors/variables/similar_variables.py b/slither/detectors/variables/similar_variables.py\n--- a/slither/detectors/variables/similar_variables.py\n+++ b/slither/detectors/variables/similar_variables.py\n@@ -65,12 +65,16 @@\n \n contract_var = contract.variables\n \n- all_var = set(all_var + contract_var)\n+ all_var = list(set(all_var + contract_var))\n \n ret = []\n- for v1 in all_var:\n- for v2 in all_var:\n- if v1.name.lower() != v2.name.lower():\n+ # pylint: disable=consider-using-enumerate\n+ for i in range(len(all_var)):\n+ v1 = all_var[i]\n+ _v1_name_lower = v1.name.lower()\n+ for j in range(i, len(all_var)):\n+ v2 = all_var[j]\n+ if _v1_name_lower != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n", "issue": " similar variables detector is extremely slow\n This detector makes up the majority of runtime on large codebases\r\n```\r\nncalls tottime percall cumtime percall filename:lineno(function)\r\n 1 0.000 0.000 41.286 41.286 /Users/alpharush/tob/slither/slither/__main__.py:81(process_all)\r\n 1 0.000 0.000 39.059 39.059 /Users/alpharush/tob/slither/slither/__main__.py:58(process_single)\r\n 1 0.000 0.000 33.319 33.319 /Users/alpharush/tob/slither/slither/__main__.py:111(_process)\r\n 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:198(run_detectors)\r\n 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:204(<listcomp>)\r\n 84 0.001 0.000 33.317 0.397 /Users/alpharush/tob/slither/slither/detectors/abstract_detector.py:176(detect)\r\n 1 0.000 0.000 31.215 31.215 /Users/alpharush/tob/slither/slither/detectors/variables/similar_variables.py:72(_detect)\r\n\r\n```\r\nhttps://github.com/crytic/slither/blob/master/slither/detectors/variables/similar_variables.py#L63-L66\n", "before_files": [{"content": "\"\"\"\nCheck for state variables too similar\nDo not check contract inheritance\n\"\"\"\nimport difflib\nfrom typing import List, Set, Tuple\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.variables.local_variable import LocalVariable\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\nclass SimilarVarsDetection(AbstractDetector):\n \"\"\"\n Variable similar detector\n \"\"\"\n\n ARGUMENT = \"similar-names\"\n HELP = \"Variable names are too similar\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = (\n \"https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar\"\n )\n\n WIKI_TITLE = \"Variable names too similar\"\n WIKI_DESCRIPTION = \"Detect variables with names that are too similar.\"\n WIKI_EXPLOIT_SCENARIO = \"Bob uses several variables with similar names. As a result, his code is difficult to review.\"\n WIKI_RECOMMENDATION = \"Prevent variables from having similar names.\"\n\n @staticmethod\n def similar(seq1: str, seq2: str) -> bool:\n \"\"\"Test the name similarity\n\n Two name are similar if difflib.SequenceMatcher on the lowercase\n version of the name is greater than 0.90\n See: https://docs.python.org/2/library/difflib.html\n Args:\n seq1 (str): first name\n seq2 (str): second name\n Returns:\n bool: true if names are similar\n \"\"\"\n if len(seq1) != len(seq2):\n return False\n val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()\n ret = val > 0.90\n return ret\n\n @staticmethod\n def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:\n \"\"\"Detect variables with similar name\n\n Returns:\n bool: true if variables have similar name\n \"\"\"\n all_var = [x.variables for x in contract.functions]\n all_var = [x for l in all_var for x in l]\n\n contract_var = contract.variables\n\n all_var = set(all_var + contract_var)\n\n ret = []\n for v1 in all_var:\n for v2 in all_var:\n if v1.name.lower() != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n\n return set(ret)\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect similar variables name\n\n Returns:\n list: {'vuln', 'filename,'contract','vars'}\n \"\"\"\n results = []\n for c in self.contracts:\n allVars = self.detect_sim(c)\n if allVars:\n for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):\n v_left = v1 if v1.name < v2.name else v2\n v_right = v2 if v_left == v1 else v1\n info: DETECTOR_INFO = [\n \"Variable \",\n v_left,\n \" is too similar to \",\n v_right,\n \"\\n\",\n ]\n json = self.generate_result(info)\n results.append(json)\n return results\n", "path": "slither/detectors/variables/similar_variables.py"}]} | 1,986 | 258 |
gh_patches_debug_28132 | rasdani/github-patches | git_diff | bokeh__bokeh-5457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
If main.py is run by bokeh serve, warn about running with directory name instead
Lots of reports of people running, e.g.
```
bokeh serve --show crossfilter/main.py
```
Which prevents all the features of "directory style" apps from being enabled.
</issue>
<code>
[start of bokeh/command/util.py]
1 ''' Provide utility functions for implementing the Bokeh command.
2
3 '''
4 from __future__ import print_function
5
6 import os
7 import sys
8
9 from bokeh.application import Application
10 from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler
11
12 def die(message):
13 ''' Print an error message and exit.
14
15 Args:
16 message (str) : error message to print
17
18 '''
19 print(message, file=sys.stderr)
20 sys.exit(1)
21
22 def build_single_handler_application(path, argv=None):
23 ''' Return a Bokeh application built using a single handler for a file
24 or directory.
25
26 Args:
27 path (str) : path to a file or directory for creating a Bokeh
28 application.
29 argv (seq[str], optional) : command line arguments to pass to the
30 application handler
31
32 Returns:
33 Application
34
35 Raises:
36 RuntimeError
37
38 '''
39 argv = argv or []
40 path = os.path.abspath(path)
41 if os.path.isdir(path):
42 handler = DirectoryHandler(filename=path, argv=argv)
43 else:
44 if path.endswith(".ipynb"):
45 handler = NotebookHandler(filename=path, argv=argv)
46 elif path.endswith(".py"):
47 handler = ScriptHandler(filename=path, argv=argv)
48 else:
49 raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
50
51 if handler.failed:
52 raise RuntimeError("Error loading %s:\n\n%s\n%s " % (path, handler.error, handler.error_detail))
53
54 application = Application(handler)
55
56 return application
57
58 def build_single_handler_applications(paths, argvs=None):
59 ''' Return a dictionary mapping routes to Bokeh applications built using
60 single handlers, for specified files or directories.
61
62 Args:
63 path (seq[str]) : paths to files or directories for creating Bokeh
64 applications.
65 argvs (dict[str, list[str]], optional) : mapping of paths to command
66 line arguments to pass to the handler for each path
67
68 Returns:
69 dict[str, Application]
70
71 Raises:
72 RuntimeError
73
74 '''
75 applications = {}
76 argvs = {} or argvs
77
78 for path in paths:
79 application = build_single_handler_application(path, argvs.get(path, []))
80
81 route = application.handlers[0].url_path()
82
83 if not route:
84 if '/' in applications:
85 raise RuntimeError("Don't know the URL path to use for %s" % (path))
86 route = '/'
87 applications[route] = application
88
89 return applications
90
[end of bokeh/command/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/command/util.py b/bokeh/command/util.py
--- a/bokeh/command/util.py
+++ b/bokeh/command/util.py
@@ -5,6 +5,7 @@
import os
import sys
+import warnings
from bokeh.application import Application
from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler
@@ -19,6 +20,16 @@
print(message, file=sys.stderr)
sys.exit(1)
+DIRSTYLE_MAIN_WARNING = """
+It looks like you might be running the main.py of a directory app directly.
+If this is the case, to enable the features of directory style apps, you must
+call "bokeh serve" on the directory instead. For example:
+
+ bokeh serve my_app_dir/
+
+If this is not the case, renaming main.py will supress this warning.
+"""
+
def build_single_handler_application(path, argv=None):
''' Return a Bokeh application built using a single handler for a file
or directory.
@@ -44,6 +55,8 @@
if path.endswith(".ipynb"):
handler = NotebookHandler(filename=path, argv=argv)
elif path.endswith(".py"):
+ if path.endswith("main.py"):
+ warnings.warn(DIRSTYLE_MAIN_WARNING)
handler = ScriptHandler(filename=path, argv=argv)
else:
raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
| {"golden_diff": "diff --git a/bokeh/command/util.py b/bokeh/command/util.py\n--- a/bokeh/command/util.py\n+++ b/bokeh/command/util.py\n@@ -5,6 +5,7 @@\n \n import os\n import sys\n+import warnings\n \n from bokeh.application import Application\n from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n@@ -19,6 +20,16 @@\n print(message, file=sys.stderr)\n sys.exit(1)\n \n+DIRSTYLE_MAIN_WARNING = \"\"\"\n+It looks like you might be running the main.py of a directory app directly.\n+If this is the case, to enable the features of directory style apps, you must\n+call \"bokeh serve\" on the directory instead. For example:\n+\n+ bokeh serve my_app_dir/\n+\n+If this is not the case, renaming main.py will supress this warning.\n+\"\"\"\n+\n def build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n@@ -44,6 +55,8 @@\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n+ if path.endswith(\"main.py\"):\n+ warnings.warn(DIRSTYLE_MAIN_WARNING)\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n", "issue": "If main.py is run by bokeh serve, warn about running with directory name instead\nLots of reports of people running, e.g.\r\n```\r\nbokeh serve --show crossfilter/main.py\r\n```\r\nWhich prevents all the features of \"directory style\" apps from being enabled. \n", "before_files": [{"content": "''' Provide utility functions for implementing the Bokeh command.\n\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n\ndef die(message):\n ''' Print an error message and exit.\n\n Args:\n message (str) : error message to print\n\n '''\n print(message, file=sys.stderr)\n sys.exit(1)\n\ndef build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n\n Args:\n path (str) : path to a file or directory for creating a Bokeh\n application.\n argv (seq[str], optional) : command line arguments to pass to the\n application handler\n\n Returns:\n Application\n\n Raises:\n RuntimeError\n\n '''\n argv = argv or []\n path = os.path.abspath(path)\n if os.path.isdir(path):\n handler = DirectoryHandler(filename=path, argv=argv)\n else:\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n\n if handler.failed:\n raise RuntimeError(\"Error loading %s:\\n\\n%s\\n%s \" % (path, handler.error, handler.error_detail))\n\n application = Application(handler)\n\n return application\n\ndef build_single_handler_applications(paths, argvs=None):\n ''' Return a dictionary mapping routes to Bokeh applications built using\n single handlers, for specified files or directories.\n\n Args:\n path (seq[str]) : paths to files or directories for creating Bokeh\n applications.\n argvs (dict[str, list[str]], optional) : mapping of paths to command\n line arguments to pass to the handler for each path\n\n Returns:\n dict[str, Application]\n\n Raises:\n RuntimeError\n\n '''\n applications = {}\n argvs = {} or argvs\n\n for path in paths:\n application = build_single_handler_application(path, argvs.get(path, []))\n\n route = application.handlers[0].url_path()\n\n if not route:\n if '/' in applications:\n raise RuntimeError(\"Don't know the URL path to use for %s\" % (path))\n route = '/'\n applications[route] = application\n\n return applications\n", "path": "bokeh/command/util.py"}]} | 1,313 | 326 |
gh_patches_debug_12174 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1517 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove sub modules
We often have problems, when building plugins. Plugins have another release pace than the server.
We should remove the submodules
* pam_python
* freeradius
* simplesamlphp
* adminclient
and give them their own versioning.
The debian build environment has to be added/moved to those repositories.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 from setuptools import setup, find_packages
3 import os
4 import stat
5 import sys
6
7 #VERSION="2.1dev4"
8 VERSION="3.0dev1"
9
10 # Taken from kennethreitz/requests/setup.py
11 package_directory = os.path.realpath(os.path.dirname(__file__))
12
13
14 def get_file_contents(file_path):
15 """Get the context of the file using full path name."""
16 content = ""
17 try:
18 full_path = os.path.join(package_directory, file_path)
19 content = open(full_path, 'r').read()
20 except:
21 print >> sys.stderr, "### could not open file {0!r}".format(file_path)
22 return content
23
24 def get_file_list(file_path):
25 full_path = os.path.join(package_directory, file_path)
26 file_list = os.listdir(full_path)
27 # now we need to add the path to the files
28 return [ file_path + f for f in file_list ]
29
30
31 install_requires = ["Flask>=0.10.1",
32 "Flask-Migrate>=1.2.0",
33 "Flask-SQLAlchemy>=2.0",
34 "Flask-Script>=2.0.5",
35 "Jinja2>=2.7.3",
36 "Mako>=0.9.1",
37 "MarkupSafe>=0.23",
38 "PyMySQL>=0.6.6",
39 "Pillow>=2.6.1",
40 "PyJWT>=1.3.0",
41 "PyYAML>=3.11",
42 "Pygments>=2.0.2",
43 "SQLAlchemy>=1.0.5",
44 "Werkzeug>=0.10.4",
45 "alembic>=0.6.7",
46 "argparse>=1.2.1",
47 "bcrypt>=1.1.0",
48 "beautifulsoup4>=4.3.2",
49 "cffi>=0.8.6",
50 "configobj>=5.0.6",
51 "docutils>=0.12",
52 "funcparserlib>=0.3.6",
53 "itsdangerous>=0.24",
54 "ldap3>=2.5",
55 "netaddr>=0.7.12",
56 "passlib>=1.6.2",
57 "pyasn1>=0.4.2",
58 "pyOpenSSL>=17.5",
59 "pycparser>=2.10",
60 "pycrypto>=2.6.1",
61 "pyrad>=2.0",
62 "pyusb>=1.0.0b2",
63 "qrcode>=5.1",
64 "requests>=2.7.0",
65 "sqlsoup>=0.9.0",
66 "ecdsa>=0.13",
67 "lxml>=3.3",
68 "python-gnupg>=0.3.8",
69 "defusedxml>=0.4.1",
70 "flask-babel>=0.9",
71 "croniter>=0.3.8",
72 "oauth2client>=2.0.1"
73 ]
74
75 # For python 2.6 we need additional dependency importlib
76 try:
77 import importlib
78 except ImportError:
79 install_requires.append('importlib')
80
81
82 def get_man_pages(dir):
83 """
84 Get man pages in a directory.
85 :param dir:
86 :return: list of file names
87 """
88 files = os.listdir(dir)
89 r_files = []
90 for file in files:
91 if file.endswith(".1"):
92 r_files.append(dir + "/" + file)
93 return r_files
94
95
96 def get_scripts(dir):
97 """
98 Get files that are executable
99 :param dir:
100 :return: list of file names
101 """
102 files = os.listdir(dir)
103 r_files = []
104 for file in files:
105 if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC:
106 r_files.append(dir + "/" + file)
107 return r_files
108
109
110 setup(
111 name='privacyIDEA',
112 version=VERSION,
113 description='privacyIDEA: identity, multifactor authentication (OTP), '
114 'authorization, audit',
115 author='privacyidea.org',
116 license='AGPLv3',
117 author_email='[email protected]',
118 url='http://www.privacyidea.org',
119 keywords='OTP, two factor authentication, management, security',
120 packages=find_packages(),
121 scripts=["pi-manage"] + get_scripts("tools"),
122 extras_require={
123 'dev': ["Sphinx>=1.3.1",
124 "sphinxcontrib-httpdomain>=1.3.0"],
125 'test': ["coverage>=3.7.1",
126 "mock>=1.0.1",
127 "pyparsing>=2.0.3",
128 "nose>=1.3.4",
129 "responses>=0.4.0",
130 "six>=1.8.0"],
131 },
132 install_requires=install_requires,
133 include_package_data=True,
134 data_files=[('etc/privacyidea/',
135 ['deploy/apache/privacyideaapp.wsgi',
136 'deploy/privacyidea/dictionary',
137 'deploy/privacyidea/enckey',
138 'deploy/privacyidea/private.pem',
139 'deploy/privacyidea/public.pem']),
140 ('share/man/man1', get_man_pages("tools")),
141 ('lib/privacyidea/authmodules/FreeRADIUS',
142 ["authmodules/FreeRADIUS/LICENSE",
143 "authmodules/FreeRADIUS/privacyidea_radius.pm"]),
144 ('lib/privacyidea/authmodules/OTRS',
145 ["authmodules/OTRS/privacyIDEA.pm"]),
146 ('lib/privacyidea/migrations',
147 ["migrations/alembic.ini",
148 "migrations/env.py",
149 "migrations/README",
150 "migrations/script.py.mako"]),
151 ('lib/privacyidea/migrations/versions',
152 get_file_list("migrations/versions/"))
153 ],
154 classifiers=["Framework :: Flask",
155 "License :: OSI Approved :: "
156 "GNU Affero General Public License v3",
157 "Programming Language :: Python",
158 "Development Status :: 5 - Production/Stable",
159 "Topic :: Internet",
160 "Topic :: Security",
161 "Topic :: System ::"
162 " Systems Administration :: Authentication/Directory"
163 ],
164 #message_extractors={'privacyidea': [
165 # ('**.py', 'python', None),
166 # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},
167 zip_safe=False,
168 long_description=get_file_contents('README.rst')
169 )
170
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -138,9 +138,6 @@
'deploy/privacyidea/private.pem',
'deploy/privacyidea/public.pem']),
('share/man/man1', get_man_pages("tools")),
- ('lib/privacyidea/authmodules/FreeRADIUS',
- ["authmodules/FreeRADIUS/LICENSE",
- "authmodules/FreeRADIUS/privacyidea_radius.pm"]),
('lib/privacyidea/authmodules/OTRS',
["authmodules/OTRS/privacyIDEA.pm"]),
('lib/privacyidea/migrations',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -138,9 +138,6 @@\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n- ('lib/privacyidea/authmodules/FreeRADIUS',\n- [\"authmodules/FreeRADIUS/LICENSE\",\n- \"authmodules/FreeRADIUS/privacyidea_radius.pm\"]),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n", "issue": "Remove sub modules\nWe often have problems, when building plugins. Plugins have another release pace than the server.\r\n\r\nWe should remove the submodules\r\n* pam_python\r\n* freeradius\r\n* simplesamlphp\r\n* adminclient\r\n\r\nand give them their own versioning. \r\nThe debian build environment has to be added/moved to those repositories.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION=\"2.1dev4\"\nVERSION=\"3.0dev1\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print >> sys.stderr, \"### could not open file {0!r}\".format(file_path)\n return content\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [ file_path + f for f in file_list ]\n\n\ninstall_requires = [\"Flask>=0.10.1\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Script>=2.0.5\",\n \"Jinja2>=2.7.3\",\n \"Mako>=0.9.1\",\n \"MarkupSafe>=0.23\",\n \"PyMySQL>=0.6.6\",\n \"Pillow>=2.6.1\",\n \"PyJWT>=1.3.0\",\n \"PyYAML>=3.11\",\n \"Pygments>=2.0.2\",\n \"SQLAlchemy>=1.0.5\",\n \"Werkzeug>=0.10.4\",\n \"alembic>=0.6.7\",\n \"argparse>=1.2.1\",\n \"bcrypt>=1.1.0\",\n \"beautifulsoup4>=4.3.2\",\n \"cffi>=0.8.6\",\n \"configobj>=5.0.6\",\n \"docutils>=0.12\",\n \"funcparserlib>=0.3.6\",\n \"itsdangerous>=0.24\",\n \"ldap3>=2.5\",\n \"netaddr>=0.7.12\",\n \"passlib>=1.6.2\",\n \"pyasn1>=0.4.2\",\n \"pyOpenSSL>=17.5\",\n \"pycparser>=2.10\",\n \"pycrypto>=2.6.1\",\n \"pyrad>=2.0\",\n \"pyusb>=1.0.0b2\",\n \"qrcode>=5.1\",\n \"requests>=2.7.0\",\n \"sqlsoup>=0.9.0\",\n \"ecdsa>=0.13\",\n \"lxml>=3.3\",\n \"python-gnupg>=0.3.8\",\n \"defusedxml>=0.4.1\",\n \"flask-babel>=0.9\",\n \"croniter>=0.3.8\",\n \"oauth2client>=2.0.1\"\n ]\n\n# For python 2.6 we need additional dependency importlib\ntry:\n import importlib\nexcept ImportError:\n install_requires.append('importlib')\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'dev': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\"],\n 'test': [\"coverage>=3.7.1\",\n \"mock>=1.0.1\",\n \"pyparsing>=2.0.3\",\n \"nose>=1.3.4\",\n \"responses>=0.4.0\",\n \"six>=1.8.0\"],\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary',\n 'deploy/privacyidea/enckey',\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/authmodules/FreeRADIUS',\n [\"authmodules/FreeRADIUS/LICENSE\",\n \"authmodules/FreeRADIUS/privacyidea_radius.pm\"]),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\"))\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\"\n ],\n #message_extractors={'privacyidea': [\n # ('**.py', 'python', None),\n # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}]} | 2,432 | 139 |
gh_patches_debug_28736 | rasdani/github-patches | git_diff | opsdroid__opsdroid-183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change default log location
Logs by default are written to `./opsdroid.log`. So they end up being written wherever you run the command.
Logs should either be written to `/var/log/opsdroid.log` or as that may not be writeable by all users maybe `~/.opsdroid/opsdroid.log`.
</issue>
<code>
[start of opsdroid/const.py]
1 """Constants used by OpsDroid."""
2 import os
3
4 __version__ = "0.8.1"
5
6 LOG_FILENAME = 'output.log'
7 DEFAULT_GIT_URL = "https://github.com/opsdroid/"
8 MODULES_DIRECTORY = "opsdroid-modules"
9 DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid")
10 DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
11 DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages")
12 DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
13 DEFAULT_MODULE_BRANCH = "master"
14 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
15 "configuration/example_configuration.yaml")
16
[end of opsdroid/const.py]
[start of opsdroid/__main__.py]
1 """Starts opsdroid."""
2
3 import sys
4 import logging
5 import argparse
6
7 from opsdroid.core import OpsDroid
8 from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE
9 from opsdroid.web import Web
10
11
12 _LOGGER = logging.getLogger("opsdroid")
13
14
15 def configure_logging(config):
16 """Configure the root logger based on user config."""
17 rootlogger = logging.getLogger()
18 while rootlogger.handlers:
19 rootlogger.handlers.pop()
20
21 try:
22 logfile_path = config["logging"]["path"]
23 except KeyError:
24 logfile_path = LOG_FILENAME
25
26 try:
27 log_level = get_logging_level(
28 config["logging"]["level"])
29 except KeyError:
30 log_level = logging.INFO
31
32 rootlogger.setLevel(log_level)
33 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')
34
35 console_handler = logging.StreamHandler()
36 console_handler.setLevel(log_level)
37 console_handler.setFormatter(formatter)
38 rootlogger.addHandler(console_handler)
39
40 try:
41 if not config["logging"]["console"]:
42 console_handler.setLevel(logging.CRITICAL)
43 except KeyError:
44 pass
45
46 if logfile_path:
47 file_handler = logging.FileHandler(logfile_path)
48 file_handler.setLevel(log_level)
49 file_handler.setFormatter(formatter)
50 rootlogger.addHandler(file_handler)
51
52 _LOGGER.info("="*40)
53 _LOGGER.info("Stated application")
54
55
56 def get_logging_level(logging_level):
57 """Get the logger level based on the user configuration."""
58 if logging_level == 'critical':
59 return logging.CRITICAL
60 elif logging_level == 'error':
61 return logging.ERROR
62 elif logging_level == 'warning':
63 return logging.WARNING
64 elif logging_level == 'debug':
65 return logging.DEBUG
66
67 return logging.INFO
68
69
70 def parse_args(args):
71 """Parse command line arguments."""
72 parser = argparse.ArgumentParser(description='Run opsdroid.')
73 parser.add_argument('--gen-config', action="store_true",
74 help='prints out an example configuration file')
75 return parser.parse_args(args)
76
77
78 def check_dependencies():
79 """Check for system dependencies required by opsdroid."""
80 if sys.version_info[0] < 3 or sys.version_info[1] < 5:
81 logging.critical("Whoops! opsdroid requires python 3.5 or above.")
82 sys.exit(1)
83
84
85 def main():
86 """Enter the application here."""
87 args = parse_args(sys.argv[1:])
88
89 if args.gen_config:
90 with open(EXAMPLE_CONFIG_FILE, 'r') as conf:
91 print(conf.read())
92 sys.exit(0)
93
94 check_dependencies()
95
96 restart = True
97
98 while restart:
99 with OpsDroid() as opsdroid:
100 opsdroid.load()
101 configure_logging(opsdroid.config)
102 opsdroid.web_server = Web(opsdroid)
103 opsdroid.start_loop()
104 restart = opsdroid.should_restart
105
106
107 if __name__ == "__main__":
108 main()
109
[end of opsdroid/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -1,11 +1,12 @@
"""Starts opsdroid."""
+import os
import sys
import logging
import argparse
from opsdroid.core import OpsDroid
-from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE
+from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE
from opsdroid.web import Web
@@ -19,9 +20,12 @@
rootlogger.handlers.pop()
try:
- logfile_path = config["logging"]["path"]
+ if config["logging"]["path"]:
+ logfile_path = os.path.expanduser(config["logging"]["path"])
+ else:
+ logfile_path = config["logging"]["path"]
except KeyError:
- logfile_path = LOG_FILENAME
+ logfile_path = DEFAULT_LOG_FILENAME
try:
log_level = get_logging_level(
diff --git a/opsdroid/const.py b/opsdroid/const.py
--- a/opsdroid/const.py
+++ b/opsdroid/const.py
@@ -3,10 +3,10 @@
__version__ = "0.8.1"
-LOG_FILENAME = 'output.log'
DEFAULT_GIT_URL = "https://github.com/opsdroid/"
MODULES_DIRECTORY = "opsdroid-modules"
-DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid")
+DEFAULT_ROOT_PATH = os.path.expanduser("~/.opsdroid")
+DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')
DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages")
DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
| {"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -1,11 +1,12 @@\n \"\"\"Starts opsdroid.\"\"\"\n \n+import os\n import sys\n import logging\n import argparse\n \n from opsdroid.core import OpsDroid\n-from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE\n+from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\n from opsdroid.web import Web\n \n \n@@ -19,9 +20,12 @@\n rootlogger.handlers.pop()\n \n try:\n- logfile_path = config[\"logging\"][\"path\"]\n+ if config[\"logging\"][\"path\"]:\n+ logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n+ else:\n+ logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n- logfile_path = LOG_FILENAME\n+ logfile_path = DEFAULT_LOG_FILENAME\n \n try:\n log_level = get_logging_level(\ndiff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -3,10 +3,10 @@\n \n __version__ = \"0.8.1\"\n \n-LOG_FILENAME = 'output.log'\n DEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\n MODULES_DIRECTORY = \"opsdroid-modules\"\n-DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n+DEFAULT_ROOT_PATH = os.path.expanduser(\"~/.opsdroid\")\n+DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n", "issue": "Change default log location\nLogs by default are written to `./opsdroid.log`. So they end up being written wherever you run the command.\r\n\r\nLogs should either be written to `/var/log/opsdroid.log` or as that may not be writeable by all users maybe `~/.opsdroid/opsdroid.log`.\n", "before_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.1\"\n\nLOG_FILENAME = 'output.log'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]} | 1,653 | 423 |
gh_patches_debug_65370 | rasdani/github-patches | git_diff | fossasia__open-event-server-5151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
For the edit action button in admin/users the super admin should be allowed to make the users as an admin.
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
The patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to v1/users/user_id
2. Send a patch request
3. Change the value of is_admin for some user from false to true.
4. See error

After the request

</issue>
<code>
[start of app/api/users.py]
1 import base64
2
3 from flask import Blueprint, request, jsonify, abort, make_response
4 from flask_jwt import current_identity as current_user
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6
7 from app import get_settings
8 from app.api.bootstrap import api
9 from app.api.helpers.db import safe_query, get_count
10 from app.api.helpers.exceptions import ConflictException
11 from app.api.helpers.exceptions import ForbiddenException
12 from app.api.helpers.files import create_save_image_sizes, make_frontend_url
13 from app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action
14 from app.api.helpers.permission_manager import has_access
15 from app.api.helpers.permissions import is_user_itself
16 from app.api.helpers.utilities import get_serializer, str_generator
17 from app.api.schema.users import UserSchema, UserSchemaPublic
18 from app.models import db
19 from app.models.access_code import AccessCode
20 from app.models.discount_code import DiscountCode
21 from app.models.email_notification import EmailNotification
22 from app.models.event_invoice import EventInvoice
23 from app.models.feedback import Feedback
24 from app.models.mail import USER_REGISTER_WITH_PASSWORD
25 from app.models.notification import Notification
26 from app.models.session import Session
27 from app.models.speaker import Speaker
28 from app.models.ticket_holder import TicketHolder
29 from app.models.user import User
30 from app.models.users_events_role import UsersEventsRoles
31
32 user_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')
33
34
35 class UserList(ResourceList):
36 """
37 List and create Users
38 """
39 def before_create_object(self, data, view_kwargs):
40 """
41 method to check if there is an existing user with same email which is received in data to create a new user
42 :param data:
43 :param view_kwargs:
44 :return:
45 """
46 if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:
47 raise ConflictException({'pointer': '/data/attributes/email'}, "Email already exists")
48
49 def after_create_object(self, user, data, view_kwargs):
50 """
51 method to send-
52 email notification
53 mail link for register verification
54 add image urls
55 :param user:
56 :param data:
57 :param view_kwargs:
58 :return:
59 """
60 s = get_serializer()
61 hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')
62 link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})
63 send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],
64 email=user.email)
65 send_email_confirmation(user.email, link)
66
67 if data.get('original_image_url'):
68 uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)
69 uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']
70 del uploaded_images['large_image_url']
71 self.session.query(User).filter_by(id=user.id).update(uploaded_images)
72
73 decorators = (api.has_permission('is_admin', methods="GET"),)
74 schema = UserSchema
75 data_layer = {'session': db.session,
76 'model': User,
77 'methods': {
78 'before_create_object': before_create_object,
79 'after_create_object': after_create_object
80 }}
81
82
83 class UserDetail(ResourceDetail):
84 """
85 User detail by id
86 """
87 def before_get(self, args, kwargs):
88
89 if current_user.is_admin or current_user.is_super_admin or current_user:
90 self.schema = UserSchema
91 else:
92 self.schema = UserSchemaPublic
93
94 def before_get_object(self, view_kwargs):
95 """
96 before get method for user object
97 :param view_kwargs:
98 :return:
99 """
100 if view_kwargs.get('notification_id') is not None:
101 notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')
102 if notification.user_id is not None:
103 view_kwargs['id'] = notification.user_id
104 else:
105 view_kwargs['id'] = None
106
107 if view_kwargs.get('feedback_id') is not None:
108 print(view_kwargs['feedback_id'])
109 feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')
110 if feedback.user_id is not None:
111 view_kwargs['id'] = feedback.user_id
112 else:
113 view_kwargs['id'] = None
114
115 if view_kwargs.get('attendee_id') is not None:
116 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
117 if attendee.user is not None:
118 if (not has_access('is_user_itself',
119 user_id=attendee.user.id) or not has_access('is_coorganizer',
120 event_id=attendee.event_id)):
121 raise ForbiddenException({'source': ''}, 'Access Forbidden')
122 view_kwargs['id'] = attendee.user.id
123 else:
124 view_kwargs['id'] = None
125
126 if view_kwargs.get('event_invoice_id') is not None:
127 event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')
128 if event_invoice.user_id is not None:
129 view_kwargs['id'] = event_invoice.user_id
130 else:
131 view_kwargs['id'] = None
132
133 if view_kwargs.get('users_events_role_id') is not None:
134 users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],
135 'users_events_role_id')
136 if users_events_role.user_id is not None:
137 view_kwargs['id'] = users_events_role.user_id
138
139 if view_kwargs.get('speaker_id') is not None:
140 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
141 if speaker.user_id is not None:
142 view_kwargs['id'] = speaker.user_id
143 else:
144 view_kwargs['id'] = None
145
146 if view_kwargs.get('session_id') is not None:
147 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
148 if session.creator_id is not None:
149 view_kwargs['id'] = session.creator_id
150 else:
151 view_kwargs['id'] = None
152
153 if view_kwargs.get('access_code_id') is not None:
154 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
155 if access_code.marketer_id is not None:
156 view_kwargs['id'] = access_code.marketer_id
157 else:
158 view_kwargs['id'] = None
159
160 if view_kwargs.get('discount_code_id') is not None:
161 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
162 if discount_code.marketer_id is not None:
163 view_kwargs['id'] = discount_code.marketer_id
164 else:
165 view_kwargs['id'] = None
166
167 if view_kwargs.get('email_notification_id') is not None:
168 email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],
169 'email_notification_id')
170 if email_notification.user_id is not None:
171 view_kwargs['id'] = email_notification.user_id
172 else:
173 view_kwargs['id'] = None
174
175 def before_update_object(self, user, data, view_kwargs):
176 if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:
177 uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)
178 data['original_image_url'] = uploaded_images['original_image_url']
179 data['small_image_url'] = uploaded_images['thumbnail_image_url']
180 data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']
181 data['icon_image_url'] = uploaded_images['icon_image_url']
182
183 if data.get('email') and data['email'] != user.email:
184 view_kwargs['email_changed'] = user.email
185
186 if data.get('is_admin') != user.is_admin:
187 user.is_admin = not user.is_admin
188
189 def after_update_object(self, user, data, view_kwargs):
190 """
191 method to mail user about email change
192 :param user:
193 :param data:
194 :param view_kwargs:
195 :return:
196 """
197 if view_kwargs.get('email_changed'):
198 send_email_change_user_email(user, view_kwargs.get('email_changed'))
199
200 decorators = (api.has_permission('is_user_itself', fetch="user_id,id", fetch_as="user_id",
201 model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,
202 DiscountCode, EmailNotification, Speaker, User],
203 fetch_key_url="notification_id, feedback_id, users_events_role_id, session_id, \
204 event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id",
205 leave_if=lambda a: a.get('attendee_id')), )
206 schema = UserSchema
207 data_layer = {'session': db.session,
208 'model': User,
209 'methods': {
210 'before_get_object': before_get_object,
211 'before_update_object': before_update_object,
212 'after_update_object': after_update_object
213 }}
214
215
216 class UserRelationship(ResourceRelationship):
217 """
218 User Relationship
219 """
220 decorators = (is_user_itself, )
221 schema = UserSchema
222 data_layer = {'session': db.session,
223 'model': User}
224
225
226 @user_misc_routes.route('/users/checkEmail', methods=['POST'])
227 def is_email_available():
228 email = request.json.get('email', None)
229 if email:
230 if get_count(db.session.query(User).filter_by(email=email)):
231 return jsonify(
232 result="False"
233 )
234 else:
235 return jsonify(
236 result="True"
237 )
238 else:
239 abort(
240 make_response(jsonify(error="Email field missing"), 422)
241 )
242
[end of app/api/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/users.py b/app/api/users.py
--- a/app/api/users.py
+++ b/app/api/users.py
@@ -183,7 +183,7 @@
if data.get('email') and data['email'] != user.email:
view_kwargs['email_changed'] = user.email
- if data.get('is_admin') != user.is_admin:
+ if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:
user.is_admin = not user.is_admin
def after_update_object(self, user, data, view_kwargs):
| {"golden_diff": "diff --git a/app/api/users.py b/app/api/users.py\n--- a/app/api/users.py\n+++ b/app/api/users.py\n@@ -183,7 +183,7 @@\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n \n- if data.get('is_admin') != user.is_admin:\n+ if has_access('is_super_admin') and data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n \n def after_update_object(self, user, data, view_kwargs):\n", "issue": "For the edit action button in admin/users the super admin should be allowed to make the users as an admin.\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe patch for the users is working but is_admin is a readonly property even for the super user. Used the patch request for users and changed the value for is_admin from true to false. But there is no change after the request to the value of is_admin. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to v1/users/user_id\r\n2. Send a patch request\r\n3. Change the value of is_admin for some user from false to true.\r\n4. See error\r\n\r\n\r\n\r\nAfter the request\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import base64\n\nfrom flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app import get_settings\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.files import create_save_image_sizes, make_frontend_url\nfrom app.api.helpers.mail import send_email_confirmation, send_email_change_user_email, send_email_with_action\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import is_user_itself\nfrom app.api.helpers.utilities import get_serializer, str_generator\nfrom app.api.schema.users import UserSchema, UserSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.email_notification import EmailNotification\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.feedback import Feedback\nfrom app.models.mail import USER_REGISTER_WITH_PASSWORD\nfrom app.models.notification import Notification\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\nfrom app.models.users_events_role import UsersEventsRoles\n\nuser_misc_routes = Blueprint('user_misc', __name__, url_prefix='/v1')\n\n\nclass UserList(ResourceList):\n \"\"\"\n List and create Users\n \"\"\"\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n method to check if there is an existing user with same email which is received in data to create a new user\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if db.session.query(User.id).filter_by(email=data['email'], deleted_at=None).scalar() is not None:\n raise ConflictException({'pointer': '/data/attributes/email'}, \"Email already exists\")\n\n def after_create_object(self, user, data, view_kwargs):\n \"\"\"\n method to send-\n email notification\n mail link for register verification\n add image urls\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n s = get_serializer()\n hash = str(base64.b64encode(str(s.dumps([user.email, str_generator()])).encode()), 'utf-8')\n link = make_frontend_url('/email/verify'.format(id=user.id), {'token': hash})\n send_email_with_action(user, USER_REGISTER_WITH_PASSWORD, app_name=get_settings()['app_name'],\n email=user.email)\n send_email_confirmation(user.email, link)\n\n if data.get('original_image_url'):\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n uploaded_images['small_image_url'] = uploaded_images['thumbnail_image_url']\n del uploaded_images['large_image_url']\n self.session.query(User).filter_by(id=user.id).update(uploaded_images)\n\n decorators = (api.has_permission('is_admin', methods=\"GET\"),)\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass UserDetail(ResourceDetail):\n \"\"\"\n User detail by id\n \"\"\"\n def before_get(self, args, kwargs):\n\n if current_user.is_admin or current_user.is_super_admin or current_user:\n self.schema = UserSchema\n else:\n self.schema = UserSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method for user object\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('notification_id') is not None:\n notification = safe_query(self, Notification, 'id', view_kwargs['notification_id'], 'notification_id')\n if notification.user_id is not None:\n view_kwargs['id'] = notification.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('feedback_id') is not None:\n print(view_kwargs['feedback_id'])\n feedback = safe_query(self, Feedback, 'id', view_kwargs['feedback_id'], 'feedback_id')\n if feedback.user_id is not None:\n view_kwargs['id'] = feedback.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.user is not None:\n if (not has_access('is_user_itself',\n user_id=attendee.user.id) or not has_access('is_coorganizer',\n event_id=attendee.event_id)):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n view_kwargs['id'] = attendee.user.id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('event_invoice_id') is not None:\n event_invoice = safe_query(self, EventInvoice, 'id', view_kwargs['event_invoice_id'], 'event_invoice_id')\n if event_invoice.user_id is not None:\n view_kwargs['id'] = event_invoice.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_role_id') is not None:\n users_events_role = safe_query(self, UsersEventsRoles, 'id', view_kwargs['users_events_role_id'],\n 'users_events_role_id')\n if users_events_role.user_id is not None:\n view_kwargs['id'] = users_events_role.user_id\n\n if view_kwargs.get('speaker_id') is not None:\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n if speaker.user_id is not None:\n view_kwargs['id'] = speaker.user_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('session_id') is not None:\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if session.creator_id is not None:\n view_kwargs['id'] = session.creator_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('access_code_id') is not None:\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n if access_code.marketer_id is not None:\n view_kwargs['id'] = access_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('discount_code_id') is not None:\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n if discount_code.marketer_id is not None:\n view_kwargs['id'] = discount_code.marketer_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('email_notification_id') is not None:\n email_notification = safe_query(self, EmailNotification, 'id', view_kwargs['email_notification_id'],\n 'email_notification_id')\n if email_notification.user_id is not None:\n view_kwargs['id'] = email_notification.user_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, user, data, view_kwargs):\n if data.get('original_image_url') and data['original_image_url'] != user.original_image_url:\n uploaded_images = create_save_image_sizes(data['original_image_url'], 'user', user.id)\n data['original_image_url'] = uploaded_images['original_image_url']\n data['small_image_url'] = uploaded_images['thumbnail_image_url']\n data['thumbnail_image_url'] = uploaded_images['thumbnail_image_url']\n data['icon_image_url'] = uploaded_images['icon_image_url']\n\n if data.get('email') and data['email'] != user.email:\n view_kwargs['email_changed'] = user.email\n\n if data.get('is_admin') != user.is_admin:\n user.is_admin = not user.is_admin\n\n def after_update_object(self, user, data, view_kwargs):\n \"\"\"\n method to mail user about email change\n :param user:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('email_changed'):\n send_email_change_user_email(user, view_kwargs.get('email_changed'))\n\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id,id\", fetch_as=\"user_id\",\n model=[Notification, Feedback, UsersEventsRoles, Session, EventInvoice, AccessCode,\n DiscountCode, EmailNotification, Speaker, User],\n fetch_key_url=\"notification_id, feedback_id, users_events_role_id, session_id, \\\n event_invoice_id, access_code_id, discount_code_id, email_notification_id, speaker_id, id\",\n leave_if=lambda a: a.get('attendee_id')), )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass UserRelationship(ResourceRelationship):\n \"\"\"\n User Relationship\n \"\"\"\n decorators = (is_user_itself, )\n schema = UserSchema\n data_layer = {'session': db.session,\n 'model': User}\n\n\n@user_misc_routes.route('/users/checkEmail', methods=['POST'])\ndef is_email_available():\n email = request.json.get('email', None)\n if email:\n if get_count(db.session.query(User).filter_by(email=email)):\n return jsonify(\n result=\"False\"\n )\n else:\n return jsonify(\n result=\"True\"\n )\n else:\n abort(\n make_response(jsonify(error=\"Email field missing\"), 422)\n )\n", "path": "app/api/users.py"}]} | 3,556 | 129 |
gh_patches_debug_25413 | rasdani/github-patches | git_diff | getsentry__sentry-27105 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade from 21.6.1 to 21.6.2 migration error, relation already exists
### Environment
self-hosted (`onpremise` deployment)
### Version
21.6.1 upgrade to 21.6.1
### Steps to Reproduce
1. git fetch tags/21.6.2
2. ./install.sh
### Expected Result
Migration to succeed.
### Actual Result
```
django.db.utils.ProgrammingError: ProgrammingError('relation "sentry_groupedmessage_project_id_id_515aaa7e_uniq" already exists\n',)
SQL: ALTER TABLE "sentry_groupedmessage" ADD CONSTRAINT "sentry_groupedmessage_project_id_id_515aaa7e_uniq" UNIQUE ("project_id", "id")
```
</issue>
<code>
[start of src/sentry/migrations/0216_cdc_setup_replication_index.py]
1 # Generated by Django 1.11.29 on 2021-06-30 18:51
2
3 from django.db import migrations
4
5
6 class Migration(migrations.Migration):
7 # This flag is used to mark that a migration shouldn't be automatically run in
8 # production. We set this to True for operations that we think are risky and want
9 # someone from ops to run manually and monitor.
10 # General advice is that if in doubt, mark your migration as `is_dangerous`.
11 # Some things you should always mark as dangerous:
12 # - Large data migrations. Typically we want these to be run manually by ops so that
13 # they can be monitored. Since data migrations will now hold a transaction open
14 # this is even more important.
15 # - Adding columns to highly active tables, even ones that are NULL.
16 is_dangerous = True
17
18 # This flag is used to decide whether to run this migration in a transaction or not.
19 # By default we prefer to run in a transaction, but for migrations where you want
20 # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
21 # want to create an index concurrently when adding one to an existing table.
22 # You'll also usually want to set this to `False` if you're writing a data
23 # migration, since we don't want the entire migration to run in one long-running
24 # transaction.
25 atomic = False
26
27 dependencies = [
28 ("sentry", "0215_fix_state"),
29 ]
30
31 operations = [
32 migrations.AlterUniqueTogether(
33 name="group",
34 unique_together={("project", "id"), ("project", "short_id")},
35 ),
36 migrations.RunSQL(
37 sql="""
38 ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX
39 sentry_groupasignee_project_id_group_id_fbf4364e_uniq
40 """,
41 reverse_sql="""
42 ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT
43 """,
44 hints={"tables": ["sentry_groupasignee"]},
45 ),
46 migrations.RunSQL(
47 sql="""
48 ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX
49 sentry_groupedmessage_project_id_id_515aaa7e_uniq
50 """,
51 reverse_sql="""
52 ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT
53 """,
54 hints={"tables": ["sentry_groupedmessage"]},
55 ),
56 ]
57
[end of src/sentry/migrations/0216_cdc_setup_replication_index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/migrations/0216_cdc_setup_replication_index.py b/src/sentry/migrations/0216_cdc_setup_replication_index.py
--- a/src/sentry/migrations/0216_cdc_setup_replication_index.py
+++ b/src/sentry/migrations/0216_cdc_setup_replication_index.py
@@ -29,9 +29,33 @@
]
operations = [
- migrations.AlterUniqueTogether(
- name="group",
- unique_together={("project", "id"), ("project", "short_id")},
+ migrations.SeparateDatabaseAndState(
+ database_operations=[
+ migrations.RunSQL(
+ """
+ CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS "sentry_groupedmessage_project_id_id_515aaa7e_uniq" ON "sentry_groupedmessage" ("project_id", "id");
+ """,
+ reverse_sql="""
+ DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;
+ """,
+ hints={"tables": ["sentry_groupedmessage"]},
+ ),
+ migrations.RunSQL(
+ """
+ ALTER TABLE "sentry_groupedmessage" ADD CONSTRAINT "sentry_groupedmessage_project_id_id_515aaa7e_uniq" UNIQUE USING INDEX "sentry_groupedmessage_project_id_id_515aaa7e_uniq";
+ """,
+ reverse_sql="""
+ ALTER TABLE "sentry_groupedmessage" DROP CONSTRAINT IF EXISTS "sentry_groupedmessage_project_id_id_515aaa7e_uniq";
+ """,
+ hints={"tables": ["sentry_groupedmessage"]},
+ ),
+ ],
+ state_operations=[
+ migrations.AlterUniqueTogether(
+ name="group",
+ unique_together={("project", "id"), ("project", "short_id")},
+ ),
+ ],
),
migrations.RunSQL(
sql="""
| {"golden_diff": "diff --git a/src/sentry/migrations/0216_cdc_setup_replication_index.py b/src/sentry/migrations/0216_cdc_setup_replication_index.py\n--- a/src/sentry/migrations/0216_cdc_setup_replication_index.py\n+++ b/src/sentry/migrations/0216_cdc_setup_replication_index.py\n@@ -29,9 +29,33 @@\n ]\n \n operations = [\n- migrations.AlterUniqueTogether(\n- name=\"group\",\n- unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n+ migrations.SeparateDatabaseAndState(\n+ database_operations=[\n+ migrations.RunSQL(\n+ \"\"\"\n+ CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" ON \"sentry_groupedmessage\" (\"project_id\", \"id\");\n+ \"\"\",\n+ reverse_sql=\"\"\"\n+ DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;\n+ \"\"\",\n+ hints={\"tables\": [\"sentry_groupedmessage\"]},\n+ ),\n+ migrations.RunSQL(\n+ \"\"\"\n+ ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE USING INDEX \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n+ \"\"\",\n+ reverse_sql=\"\"\"\n+ ALTER TABLE \"sentry_groupedmessage\" DROP CONSTRAINT IF EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n+ \"\"\",\n+ hints={\"tables\": [\"sentry_groupedmessage\"]},\n+ ),\n+ ],\n+ state_operations=[\n+ migrations.AlterUniqueTogether(\n+ name=\"group\",\n+ unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n+ ),\n+ ],\n ),\n migrations.RunSQL(\n sql=\"\"\"\n", "issue": "Upgrade from 21.6.1 to 21.6.2 migration error, relation already exists\n### Environment\n\nself-hosted (`onpremise` deployment)\n\n### Version\n\n21.6.1 upgrade to 21.6.1\n\n### Steps to Reproduce\n\n1. git fetch tags/21.6.2\r\n2. ./install.sh\n\n### Expected Result\n\nMigration to succeed.\n\n### Actual Result\n\n```\r\ndjango.db.utils.ProgrammingError: ProgrammingError('relation \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" already exists\\n',)\r\nSQL: ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE (\"project_id\", \"id\")\r\n```\n", "before_files": [{"content": "# Generated by Django 1.11.29 on 2021-06-30 18:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n # This flag is used to decide whether to run this migration in a transaction or not.\n # By default we prefer to run in a transaction, but for migrations where you want\n # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll\n # want to create an index concurrently when adding one to an existing table.\n # You'll also usually want to set this to `False` if you're writing a data\n # migration, since we don't want the entire migration to run in one long-running\n # transaction.\n atomic = False\n\n dependencies = [\n (\"sentry\", \"0215_fix_state\"),\n ]\n\n operations = [\n migrations.AlterUniqueTogether(\n name=\"group\",\n unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX\n sentry_groupasignee_project_id_group_id_fbf4364e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupasignee\"]},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX\n sentry_groupedmessage_project_id_id_515aaa7e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ]\n", "path": "src/sentry/migrations/0216_cdc_setup_replication_index.py"}]} | 1,388 | 444 |
gh_patches_debug_14886 | rasdani/github-patches | git_diff | DDMAL__CantusDB-582 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On Century Detail pages, sometimes unpublished sources are listed
Visiting century/3863 while not logged in, there's a link to source/672452. When I click on it, I get a 403 Access Denied error. We need to ensure that links to sources that are inaccessible to a user are never displayed.
This bug occurs on both staging and production.
</issue>
<code>
[start of django/cantusdb_project/main_app/views/century.py]
1 from django.views.generic import DetailView
2 from main_app.models import Century
3
4 class CenturyDetailView(DetailView):
5 model = Century
6 context_object_name = "century"
7 template_name = "century_detail.html"
8
[end of django/cantusdb_project/main_app/views/century.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py
--- a/django/cantusdb_project/main_app/views/century.py
+++ b/django/cantusdb_project/main_app/views/century.py
@@ -1,7 +1,20 @@
from django.views.generic import DetailView
-from main_app.models import Century
+from main_app.models import Century, Source
+from typing import Any
class CenturyDetailView(DetailView):
model = Century
context_object_name = "century"
template_name = "century_detail.html"
+
+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:
+ context = super().get_context_data(**kwargs)
+ century = self.get_object()
+ user = self.request.user
+ display_unpublished = user.is_authenticated
+ sources = Source.objects.filter(century=century)
+ if not display_unpublished:
+ sources = sources.filter(published=True)
+ sources=sources.only("title", "id")
+ context["sources"] = sources
+ return context
\ No newline at end of file
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py\n--- a/django/cantusdb_project/main_app/views/century.py\n+++ b/django/cantusdb_project/main_app/views/century.py\n@@ -1,7 +1,20 @@\n from django.views.generic import DetailView\n-from main_app.models import Century\n+from main_app.models import Century, Source\n+from typing import Any\n \n class CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n+\n+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n+ context = super().get_context_data(**kwargs)\n+ century = self.get_object()\n+ user = self.request.user\n+ display_unpublished = user.is_authenticated\n+ sources = Source.objects.filter(century=century)\n+ if not display_unpublished:\n+ sources = sources.filter(published=True)\n+ sources=sources.only(\"title\", \"id\")\n+ context[\"sources\"] = sources\n+ return context\n\\ No newline at end of file\n", "issue": "On Century Detail pages, sometimes unpublished sources are listed\nVisiting century/3863 while not logged in, there's a link to source/672452. When I click on it, I get a 403 Access Denied error. We need to ensure that links to sources that are inaccessible to a user are never displayed.\r\n\r\nThis bug occurs on both staging and production.\n", "before_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Century\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n", "path": "django/cantusdb_project/main_app/views/century.py"}]} | 688 | 265 |
gh_patches_debug_12512 | rasdani/github-patches | git_diff | PaddlePaddle__models-2482 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deeplabv3+ eval 报维度错误
在paddle1.5分支下,deeplabv3+ 的评估有如下报错:
Traceback (most recent call last):
File "./eval.py", line 131, in <module>
fetch_list=[pred, miou, out_wrong, out_correct])
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py", line 650, in run
use_program_cache=use_program_cache)
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py", line 748, in _run
exe.run(program.desc, scope, 0, True, True, fetch_var_name)
paddle.fluid.core_avx.EnforceNotMet: Invoke operator bilinear_interp error.
Python Callstacks:
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/framework.py", line 1699, in append_op
attrs=kwargs.get("attrs", None))
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layer_helper.py", line 43, in append_op
return self.main_program.current_block().append_op(*args, **kwargs)
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py", line 7637, in image_resize
attrs=attrs)
File "/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py", line 7742, in resize_bilinear
align_corners, align_mode)
File "./eval.py", line 84, in <module>
img = fluid.layers.resize_bilinear(img, image_shape)
C++ Callstacks:
Enforce failed. Expected dim_x.size() == 4, but received dim_x.size():3 != 4:4.
X's dimension must be 4 at [/ssd1/xiege/paddle_ce/Paddle/paddle/fluid/operators/interpolate_op.cc:40]
</issue>
<code>
[start of PaddleCV/deeplabv3+/eval.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 import os
5 if 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:
6 os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'
7 os.environ['FLAGS_enable_parallel_graph'] = '1'
8
9 import paddle
10 import paddle.fluid as fluid
11 import numpy as np
12 import argparse
13 from reader import CityscapeDataset
14 import reader
15 import models
16 import sys
17 import utility
18
19 parser = argparse.ArgumentParser()
20 add_arg = lambda *args: utility.add_arguments(*args, argparser=parser)
21
22 # yapf: disable
23 add_arg('total_step', int, -1, "Number of the step to be evaluated, -1 for full evaluation.")
24 add_arg('init_weights_path', str, None, "Path of the weights to evaluate.")
25 add_arg('dataset_path', str, None, "Cityscape dataset path.")
26 add_arg('use_gpu', bool, True, "Whether use GPU or CPU.")
27 add_arg('num_classes', int, 19, "Number of classes.")
28 add_arg('use_py_reader', bool, True, "Use py_reader.")
29 add_arg('norm_type', str, 'bn', "Normalization type, should be 'bn' or 'gn'.")
30 #yapf: enable
31
32
33 def mean_iou(pred, label):
34 label = fluid.layers.elementwise_min(
35 label, fluid.layers.assign(np.array(
36 [num_classes], dtype=np.int32)))
37 label_ignore = (label == num_classes).astype('int32')
38 label_nignore = (label != num_classes).astype('int32')
39
40 pred = pred * label_nignore + label_ignore * num_classes
41
42 miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes + 1)
43 return miou, wrong, correct
44
45
46 def load_model():
47 if os.path.isdir(args.init_weights_path):
48 fluid.io.load_params(
49 exe, dirname=args.init_weights_path, main_program=tp)
50 else:
51 fluid.io.load_params(
52 exe, dirname="", filename=args.init_weights_path, main_program=tp)
53
54
55 CityscapeDataset = reader.CityscapeDataset
56
57 args = parser.parse_args()
58
59 models.clean()
60 models.is_train = False
61 models.default_norm_type = args.norm_type
62 deeplabv3p = models.deeplabv3p
63
64 image_shape = [1025, 2049]
65 eval_shape = [1024, 2048]
66
67 sp = fluid.Program()
68 tp = fluid.Program()
69 batch_size = 1
70 reader.default_config['crop_size'] = -1
71 reader.default_config['shuffle'] = False
72 num_classes = args.num_classes
73
74 with fluid.program_guard(tp, sp):
75 if args.use_py_reader:
76 py_reader = fluid.layers.py_reader(capacity=64,
77 shapes=[[1, 3, 0, 0], [1] + eval_shape],
78 dtypes=['float32', 'int32'])
79 img, label = fluid.layers.read_file(py_reader)
80 else:
81 img = fluid.layers.data(name='img', shape=[3, 0, 0], dtype='float32')
82 label = fluid.layers.data(name='label', shape=eval_shape, dtype='int32')
83
84 img = fluid.layers.resize_bilinear(img, image_shape)
85 logit = deeplabv3p(img)
86 logit = fluid.layers.resize_bilinear(logit, eval_shape)
87 pred = fluid.layers.argmax(logit, axis=1).astype('int32')
88 miou, out_wrong, out_correct = mean_iou(pred, label)
89
90 tp = tp.clone(True)
91 fluid.memory_optimize(
92 tp,
93 print_log=False,
94 skip_opt_set=set([pred.name, miou, out_wrong, out_correct]),
95 level=1)
96
97 place = fluid.CPUPlace()
98 if args.use_gpu:
99 place = fluid.CUDAPlace(0)
100 exe = fluid.Executor(place)
101 exe.run(sp)
102
103 if args.init_weights_path:
104 print("load from:", args.init_weights_path)
105 load_model()
106
107 dataset = CityscapeDataset(args.dataset_path, 'val')
108 if args.total_step == -1:
109 total_step = len(dataset.label_files)
110 else:
111 total_step = args.total_step
112
113 batches = dataset.get_batch_generator(batch_size, total_step)
114 if args.use_py_reader:
115 py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])
116 py_reader.start()
117
118 sum_iou = 0
119 all_correct = np.array([0], dtype=np.int64)
120 all_wrong = np.array([0], dtype=np.int64)
121
122 for i in range(total_step):
123 if not args.use_py_reader:
124 _, imgs, labels, names = next(batches)
125 result = exe.run(tp,
126 feed={'img': imgs,
127 'label': labels},
128 fetch_list=[pred, miou, out_wrong, out_correct])
129 else:
130 result = exe.run(tp,
131 fetch_list=[pred, miou, out_wrong, out_correct])
132
133 wrong = result[2][:-1] + all_wrong
134 right = result[3][:-1] + all_correct
135 all_wrong = wrong.copy()
136 all_correct = right.copy()
137 mp = (wrong + right) != 0
138 miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))
139 print('step: %s, mIoU: %s' % (i + 1, miou2))
140
[end of PaddleCV/deeplabv3+/eval.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PaddleCV/deeplabv3+/eval.py b/PaddleCV/deeplabv3+/eval.py
--- a/PaddleCV/deeplabv3+/eval.py
+++ b/PaddleCV/deeplabv3+/eval.py
@@ -112,7 +112,7 @@
batches = dataset.get_batch_generator(batch_size, total_step)
if args.use_py_reader:
- py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])
+ py_reader.decorate_tensor_provider(lambda :[ (yield b[0],b[1]) for b in batches])
py_reader.start()
sum_iou = 0
@@ -137,3 +137,5 @@
mp = (wrong + right) != 0
miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))
print('step: %s, mIoU: %s' % (i + 1, miou2))
+
+print('eval done!')
| {"golden_diff": "diff --git a/PaddleCV/deeplabv3+/eval.py b/PaddleCV/deeplabv3+/eval.py\n--- a/PaddleCV/deeplabv3+/eval.py\n+++ b/PaddleCV/deeplabv3+/eval.py\n@@ -112,7 +112,7 @@\n \n batches = dataset.get_batch_generator(batch_size, total_step)\n if args.use_py_reader:\n- py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])\n+ py_reader.decorate_tensor_provider(lambda :[ (yield b[0],b[1]) for b in batches])\n py_reader.start()\n \n sum_iou = 0\n@@ -137,3 +137,5 @@\n mp = (wrong + right) != 0\n miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))\n print('step: %s, mIoU: %s' % (i + 1, miou2))\n+\n+print('eval done!')\n", "issue": "deeplabv3+ eval \u62a5\u7ef4\u5ea6\u9519\u8bef\n\u5728paddle1.5\u5206\u652f\u4e0b\uff0cdeeplabv3+ \u7684\u8bc4\u4f30\u6709\u5982\u4e0b\u62a5\u9519\uff1a\r\nTraceback (most recent call last):\r\n File \"./eval.py\", line 131, in <module>\r\n fetch_list=[pred, miou, out_wrong, out_correct])\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py\", line 650, in run\r\n use_program_cache=use_program_cache)\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/executor.py\", line 748, in _run\r\n exe.run(program.desc, scope, 0, True, True, fetch_var_name)\r\npaddle.fluid.core_avx.EnforceNotMet: Invoke operator bilinear_interp error.\r\nPython Callstacks:\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/framework.py\", line 1699, in append_op\r\n attrs=kwargs.get(\"attrs\", None))\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layer_helper.py\", line 43, in append_op\r\n return self.main_program.current_block().append_op(*args, **kwargs)\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py\", line 7637, in image_resize\r\n attrs=attrs)\r\n File \"/opt/_internal/cpython-3.6.0/lib/python3.6/site-packages/paddle/fluid/layers/nn.py\", line 7742, in resize_bilinear\r\n align_corners, align_mode)\r\n File \"./eval.py\", line 84, in <module>\r\n img = fluid.layers.resize_bilinear(img, image_shape)\r\nC++ Callstacks:\r\nEnforce failed. Expected dim_x.size() == 4, but received dim_x.size():3 != 4:4.\r\nX's dimension must be 4 at [/ssd1/xiege/paddle_ce/Paddle/paddle/fluid/operators/interpolate_op.cc:40]\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\nos.environ['FLAGS_enable_parallel_graph'] = '1'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport sys\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('total_step', int, -1, \"Number of the step to be evaluated, -1 for full evaluation.\")\nadd_arg('init_weights_path', str, None, \"Path of the weights to evaluate.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('use_py_reader', bool, True, \"Use py_reader.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\n#yapf: enable\n\n\ndef mean_iou(pred, label):\n label = fluid.layers.elementwise_min(\n label, fluid.layers.assign(np.array(\n [num_classes], dtype=np.int32)))\n label_ignore = (label == num_classes).astype('int32')\n label_nignore = (label != num_classes).astype('int32')\n\n pred = pred * label_nignore + label_ignore * num_classes\n\n miou, wrong, correct = fluid.layers.mean_iou(pred, label, num_classes + 1)\n return miou, wrong, correct\n\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_params(\n exe, dirname=\"\", filename=args.init_weights_path, main_program=tp)\n\n\nCityscapeDataset = reader.CityscapeDataset\n\nargs = parser.parse_args()\n\nmodels.clean()\nmodels.is_train = False\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nimage_shape = [1025, 2049]\neval_shape = [1024, 2048]\n\nsp = fluid.Program()\ntp = fluid.Program()\nbatch_size = 1\nreader.default_config['crop_size'] = -1\nreader.default_config['shuffle'] = False\nnum_classes = args.num_classes\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[1, 3, 0, 0], [1] + eval_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(name='img', shape=[3, 0, 0], dtype='float32')\n label = fluid.layers.data(name='label', shape=eval_shape, dtype='int32')\n\n img = fluid.layers.resize_bilinear(img, image_shape)\n logit = deeplabv3p(img)\n logit = fluid.layers.resize_bilinear(logit, eval_shape)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n miou, out_wrong, out_correct = mean_iou(pred, label)\n\ntp = tp.clone(True)\nfluid.memory_optimize(\n tp,\n print_log=False,\n skip_opt_set=set([pred.name, miou, out_wrong, out_correct]),\n level=1)\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = CityscapeDataset(args.dataset_path, 'val')\nif args.total_step == -1:\n total_step = len(dataset.label_files)\nelse:\n total_step = args.total_step\n\nbatches = dataset.get_batch_generator(batch_size, total_step)\nif args.use_py_reader:\n py_reader.decorate_tensor_provider(lambda :[ (yield b[1],b[2]) for b in batches])\n py_reader.start()\n\nsum_iou = 0\nall_correct = np.array([0], dtype=np.int64)\nall_wrong = np.array([0], dtype=np.int64)\n\nfor i in range(total_step):\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n result = exe.run(tp,\n feed={'img': imgs,\n 'label': labels},\n fetch_list=[pred, miou, out_wrong, out_correct])\n else:\n result = exe.run(tp,\n fetch_list=[pred, miou, out_wrong, out_correct])\n\n wrong = result[2][:-1] + all_wrong\n right = result[3][:-1] + all_correct\n all_wrong = wrong.copy()\n all_correct = right.copy()\n mp = (wrong + right) != 0\n miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp])))\n print('step: %s, mIoU: %s' % (i + 1, miou2))\n", "path": "PaddleCV/deeplabv3+/eval.py"}]} | 2,596 | 243 |
gh_patches_debug_17941 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1213 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default InputTransform for video classification provide no normalization
## 🐛 Bug
Normalization does not work.
### To Reproduce
```bash
tensor([[[[[157., 157., 158., ..., 168., 171., 173.],
[157., 157., 158., ..., 192., 189., 187.],
[158., 158., 159., ..., 201., 214., 217.],
...,
[ 81., 81., 82., ..., 90., 91., 93.],
[ 81., 81., 82., ..., 88., 87., 87.],
[ 81., 81., 82., ..., 87., 86., 86.]],
[[158., 157., 154., ..., 177., 177., 177.],
[159., 158., 156., ..., 177., 177., 178.],
[159., 158., 157., ..., 178., 178., 180.],
...,
[ 83., 83., 83., ..., 114., 112., 107.],
[ 83., 83., 83., ..., 113., 98., 93.],
[ 83., 83., 83., ..., 232., 211., 207.]],
[[160., 160., 159., ..., 112., 102., 86.],
[166., 166., 166., ..., 116., 103., 86.],
[175., 176., 175., ..., 114., 105., 86.],
...,
[ 81., 81., 83., ..., 93., 92., 93.],
[ 82., 83., 83., ..., 108., 104., 103.],
[ 82., 82., 82., ..., 99., 97., 97.]],
...,
```
#### Code sample
```python
import os
from argparse import ArgumentParser
from torch.utils.data.sampler import RandomSampler
import flash
from flash.core.finetuning import NoFreeze
from flash.core.data.utils import download_data
from flash.video import VideoClassificationData, VideoClassifier
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--backbone', type=str, default="x3d_xs")
parser.add_argument('--download', type=bool, default=True)
parser.add_argument('--train_folder', type=str, default=os.path.join(os.getcwd(),
"./data/kinetics/train"))
parser.add_argument('--val_folder', type=str, default=os.path.join(os.getcwd(),
"./data/kinetics/val"))
parser.add_argument('--predict_folder', type=str, default=os.path.join(os.getcwd(),
"./data/kinetics/predict"))
parser.add_argument('--max_epochs', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--gpus', type=int, default=None)
parser.add_argument('--fast_dev_run', type=int, default=False)
args = parser.parse_args()
if args.download:
# Dataset Credit:Download a video clip dataset.
# Find more datasets at https://pytorchvideo.readthedocs.io/en/latest/data.html
download_data("https://pl-flash-data.s3.amazonaws.com/kinetics.zip",
os.path.join(os.getcwd(), "data/"))
datamodule = VideoClassificationData.from_folders(
train_folder=args.train_folder,
val_folder=args.val_folder,
predict_folder=args.predict_folder,
batch_size=8,
clip_sampler="uniform",
clip_duration=2,
video_sampler=RandomSampler,
decode_audio=False,
num_workers=2,
)
for batch in datamodule.train_dataloader():
print(batch['video'])
break
model = VideoClassifier(backbone=args.backbone, num_classes=datamodule.num_classes, pretrained=False)
trainer = flash.Trainer(max_epochs=args.max_epochs, gpus=args.gpus, strategy='ddp', fast_dev_run=args.fast_dev_run)
trainer.finetune(model, datamodule=datamodule, strategy=NoFreeze())
```
### Expected behavior
The input values to the model should be in range [0-1].
### Environment
- OS (e.g., Linux): Linux
- Python version: 3.8
- PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7):
- GPU models and configuration:
- Any other relevant information:
### Additional context
By default [VideoClassInputTransform](https://github.com/PyTorchLightning/lightning-flash/blob/master/flash/video/classification/input_transform.py#L35) performs necessary preprocessing step. One step that is missing is dividing by 255. i.e. adding `Lambda(lambda x: x/255.)` function.
I can create a PR with a quick fix.
</issue>
<code>
[start of flash/video/classification/input_transform.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from dataclasses import dataclass
15 from typing import Callable
16
17 import torch
18
19 from flash.core.data.io.input_transform import InputTransform
20 from flash.core.data.transforms import ApplyToKeys
21 from flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires
22
23 if _KORNIA_AVAILABLE:
24 import kornia.augmentation as K
25
26 if _PYTORCHVIDEO_AVAILABLE:
27 from pytorchvideo.transforms import UniformTemporalSubsample
28 from torchvision.transforms import CenterCrop, Compose, RandomCrop
29 else:
30 ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
31
32
33 @requires("video")
34 @dataclass
35 class VideoClassificationInputTransform(InputTransform):
36
37 image_size: int = 244
38 temporal_sub_sample: int = 8
39 mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])
40 std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])
41 data_format: str = "BCTHW"
42 same_on_frame: bool = False
43
44 def per_sample_transform(self) -> Callable:
45 if self.training:
46 per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]
47 else:
48 per_sample_transform = [CenterCrop(self.image_size)]
49
50 return ApplyToKeys(
51 "video", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)
52 )
53
54 def per_batch_transform_on_device(self) -> Callable:
55 return ApplyToKeys(
56 "video",
57 K.VideoSequential(
58 K.Normalize(self.mean, self.std),
59 data_format=self.data_format,
60 same_on_frame=self.same_on_frame,
61 ),
62 )
63
[end of flash/video/classification/input_transform.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash/video/classification/input_transform.py b/flash/video/classification/input_transform.py
--- a/flash/video/classification/input_transform.py
+++ b/flash/video/classification/input_transform.py
@@ -30,6 +30,10 @@
ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
+def normalize(x: torch.Tensor) -> torch.Tensor:
+ return x / 255.0
+
+
@requires("video")
@dataclass
class VideoClassificationInputTransform(InputTransform):
@@ -48,7 +52,8 @@
per_sample_transform = [CenterCrop(self.image_size)]
return ApplyToKeys(
- "video", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)
+ "video",
+ Compose([UniformTemporalSubsample(self.temporal_sub_sample), normalize] + per_sample_transform),
)
def per_batch_transform_on_device(self) -> Callable:
| {"golden_diff": "diff --git a/flash/video/classification/input_transform.py b/flash/video/classification/input_transform.py\n--- a/flash/video/classification/input_transform.py\n+++ b/flash/video/classification/input_transform.py\n@@ -30,6 +30,10 @@\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n \n \n+def normalize(x: torch.Tensor) -> torch.Tensor:\n+ return x / 255.0\n+\n+\n @requires(\"video\")\n @dataclass\n class VideoClassificationInputTransform(InputTransform):\n@@ -48,7 +52,8 @@\n per_sample_transform = [CenterCrop(self.image_size)]\n \n return ApplyToKeys(\n- \"video\", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)\n+ \"video\",\n+ Compose([UniformTemporalSubsample(self.temporal_sub_sample), normalize] + per_sample_transform),\n )\n \n def per_batch_transform_on_device(self) -> Callable:\n", "issue": "Default InputTransform for video classification provide no normalization\n## \ud83d\udc1b Bug\r\n\r\nNormalization does not work.\r\n\r\n### To Reproduce\r\n\r\n```bash\r\ntensor([[[[[157., 157., 158., ..., 168., 171., 173.],\r\n [157., 157., 158., ..., 192., 189., 187.],\r\n [158., 158., 159., ..., 201., 214., 217.],\r\n ...,\r\n [ 81., 81., 82., ..., 90., 91., 93.],\r\n [ 81., 81., 82., ..., 88., 87., 87.],\r\n [ 81., 81., 82., ..., 87., 86., 86.]],\r\n\r\n [[158., 157., 154., ..., 177., 177., 177.],\r\n [159., 158., 156., ..., 177., 177., 178.],\r\n [159., 158., 157., ..., 178., 178., 180.],\r\n ...,\r\n [ 83., 83., 83., ..., 114., 112., 107.],\r\n [ 83., 83., 83., ..., 113., 98., 93.],\r\n [ 83., 83., 83., ..., 232., 211., 207.]],\r\n\r\n [[160., 160., 159., ..., 112., 102., 86.],\r\n [166., 166., 166., ..., 116., 103., 86.],\r\n [175., 176., 175., ..., 114., 105., 86.],\r\n ...,\r\n [ 81., 81., 83., ..., 93., 92., 93.],\r\n [ 82., 83., 83., ..., 108., 104., 103.],\r\n [ 82., 82., 82., ..., 99., 97., 97.]],\r\n\r\n ...,\r\n\r\n```\r\n\r\n\r\n#### Code sample\r\n\r\n```python\r\nimport os\r\nfrom argparse import ArgumentParser\r\n\r\nfrom torch.utils.data.sampler import RandomSampler\r\n\r\nimport flash\r\nfrom flash.core.finetuning import NoFreeze\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.video import VideoClassificationData, VideoClassifier\r\n\r\nif __name__ == '__main__':\r\n parser = ArgumentParser()\r\n parser.add_argument('--seed', type=int, default=1234)\r\n parser.add_argument('--backbone', type=str, default=\"x3d_xs\")\r\n parser.add_argument('--download', type=bool, default=True)\r\n parser.add_argument('--train_folder', type=str, default=os.path.join(os.getcwd(),\r\n \"./data/kinetics/train\"))\r\n parser.add_argument('--val_folder', type=str, default=os.path.join(os.getcwd(),\r\n \"./data/kinetics/val\"))\r\n parser.add_argument('--predict_folder', type=str, default=os.path.join(os.getcwd(),\r\n \"./data/kinetics/predict\"))\r\n parser.add_argument('--max_epochs', type=int, default=1)\r\n parser.add_argument('--learning_rate', type=float, default=1e-3)\r\n parser.add_argument('--gpus', type=int, default=None)\r\n parser.add_argument('--fast_dev_run', type=int, default=False)\r\n args = parser.parse_args()\r\n\r\n\r\n if args.download:\r\n # Dataset Credit:Download a video clip dataset.\r\n # Find more datasets at https://pytorchvideo.readthedocs.io/en/latest/data.html\r\n download_data(\"https://pl-flash-data.s3.amazonaws.com/kinetics.zip\",\r\n os.path.join(os.getcwd(), \"data/\"))\r\n\r\n datamodule = VideoClassificationData.from_folders(\r\n train_folder=args.train_folder,\r\n val_folder=args.val_folder,\r\n predict_folder=args.predict_folder,\r\n batch_size=8,\r\n clip_sampler=\"uniform\",\r\n clip_duration=2,\r\n video_sampler=RandomSampler,\r\n decode_audio=False,\r\n num_workers=2,\r\n )\r\n\r\n for batch in datamodule.train_dataloader():\r\n print(batch['video'])\r\n break\r\n \r\n model = VideoClassifier(backbone=args.backbone, num_classes=datamodule.num_classes, pretrained=False)\r\n\r\n trainer = flash.Trainer(max_epochs=args.max_epochs, gpus=args.gpus, strategy='ddp', fast_dev_run=args.fast_dev_run)\r\n trainer.finetune(model, datamodule=datamodule, strategy=NoFreeze())\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe input values to the model should be in range [0-1].\r\n\r\n### Environment\r\n \r\n - OS (e.g., Linux): Linux\r\n - Python version: 3.8\r\n - PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7): \r\n - GPU models and configuration: \r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\nBy default [VideoClassInputTransform](https://github.com/PyTorchLightning/lightning-flash/blob/master/flash/video/classification/input_transform.py#L35) performs necessary preprocessing step. One step that is missing is dividing by 255. i.e. adding `Lambda(lambda x: x/255.)` function.\r\n\r\nI can create a PR with a quick fix.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom typing import Callable\n\nimport torch\n\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires\n\nif _KORNIA_AVAILABLE:\n import kornia.augmentation as K\n\nif _PYTORCHVIDEO_AVAILABLE:\n from pytorchvideo.transforms import UniformTemporalSubsample\n from torchvision.transforms import CenterCrop, Compose, RandomCrop\nelse:\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n\n\n@requires(\"video\")\n@dataclass\nclass VideoClassificationInputTransform(InputTransform):\n\n image_size: int = 244\n temporal_sub_sample: int = 8\n mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])\n std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])\n data_format: str = \"BCTHW\"\n same_on_frame: bool = False\n\n def per_sample_transform(self) -> Callable:\n if self.training:\n per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]\n else:\n per_sample_transform = [CenterCrop(self.image_size)]\n\n return ApplyToKeys(\n \"video\", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)\n )\n\n def per_batch_transform_on_device(self) -> Callable:\n return ApplyToKeys(\n \"video\",\n K.VideoSequential(\n K.Normalize(self.mean, self.std),\n data_format=self.data_format,\n same_on_frame=self.same_on_frame,\n ),\n )\n", "path": "flash/video/classification/input_transform.py"}]} | 2,517 | 223 |
gh_patches_debug_36629 | rasdani/github-patches | git_diff | svthalia__concrexit-3382 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Facedetection admin resubmit action
Sometimes the facedetection lambda can fail randomly. Photos are resubmitted nightly if that happens, but it may be nice to have an alternative for that to do it sooner, without SSHing into the server.
</issue>
<code>
[start of website/facedetection/admin.py]
1 from django.contrib import admin
2 from django.db.models.query import Prefetch
3 from django.urls import reverse
4 from django.utils.html import format_html
5 from django.utils.safestring import mark_safe
6
7 from .models import (
8 FaceDetectionPhoto,
9 PhotoFaceEncoding,
10 ReferenceFace,
11 ReferenceFaceEncoding,
12 )
13
14
15 class ReferenceFaceEncodingInline(admin.TabularInline):
16 model = ReferenceFaceEncoding
17 readonly_fields = ["num_matches"]
18 fields = ["num_matches"]
19 can_delete = False
20 extra = 0
21
22 def has_add_permission(self, request, obj=None):
23 return False # Encodings should not be created manually.
24
25 def get_queryset(self, request):
26 return super().get_queryset(request).only("reference")
27
28
29 @admin.register(ReferenceFace)
30 class ReferenceFaceAdmin(admin.ModelAdmin):
31 list_display = [
32 "user",
33 "status",
34 "created_at",
35 "marked_for_deletion_at",
36 ]
37
38 search_fields = [
39 "user__username",
40 "user__first_name",
41 "user__last_name",
42 ]
43
44 list_filter = ["status", "marked_for_deletion_at"]
45 inlines = [ReferenceFaceEncodingInline]
46
47 def get_readonly_fields(self, request, obj=None):
48 if obj is None:
49 return ["created_at", "submitted_at", "status"]
50 return ["file", "user", "created_at", "submitted_at", "status"]
51
52
53 class PhotoFaceEncodingInline(admin.TabularInline):
54 model = PhotoFaceEncoding
55 readonly_fields = ["view_matches"]
56 fields = ["view_matches"]
57 can_delete = False
58 extra = 0
59
60 @admin.display(description="Matches")
61 def view_matches(self, obj):
62 reference_faces = [match.reference for match in obj.matches.all()]
63 if not reference_faces:
64 return "-"
65
66 links = [
67 format_html(
68 '<a href="{url}">{text}</a>',
69 url=reverse(
70 "admin:facedetection_referenceface_change",
71 kwargs={"object_id": rf.pk},
72 ),
73 text=str(rf),
74 )
75 for rf in reference_faces
76 ]
77 return mark_safe(", ".join(links))
78
79 def has_add_permission(self, request, obj=None):
80 return False # Encodings should not be created manually.
81
82 def get_queryset(self, request):
83 return (
84 super()
85 .get_queryset(request)
86 .only("photo") # Don't select the 128 encoding fields.
87 .prefetch_related(
88 "photo__photo__album",
89 Prefetch(
90 "matches",
91 queryset=ReferenceFaceEncoding.objects.select_related(
92 "reference", "reference__user"
93 ).only("reference"),
94 ),
95 )
96 )
97
98
99 @admin.register(FaceDetectionPhoto)
100 class FaceDetectionPhotoAdmin(admin.ModelAdmin):
101 list_display = [
102 "__str__",
103 "status",
104 "submitted_at",
105 "num_faces",
106 ]
107
108 readonly_fields = [
109 "photo",
110 "submitted_at",
111 "status",
112 ]
113
114 search_fields = [
115 "photo__album__title",
116 "photo__album__date",
117 "photo__file",
118 ]
119
120 list_filter = ["status", "submitted_at"]
121 inlines = [PhotoFaceEncodingInline]
122
123 def get_queryset(self, request):
124 return (
125 super()
126 .get_queryset(request)
127 .select_related("photo")
128 .prefetch_related("photo__album")
129 .select_properties("num_faces")
130 )
131
132 def has_add_permission(self, request):
133 return False
134
[end of website/facedetection/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/facedetection/admin.py b/website/facedetection/admin.py
--- a/website/facedetection/admin.py
+++ b/website/facedetection/admin.py
@@ -1,9 +1,11 @@
-from django.contrib import admin
+from django.contrib import admin, messages
from django.db.models.query import Prefetch
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
+from facedetection.services import trigger_facedetection_lambda
+
from .models import (
FaceDetectionPhoto,
PhotoFaceEncoding,
@@ -44,11 +46,25 @@
list_filter = ["status", "marked_for_deletion_at"]
inlines = [ReferenceFaceEncodingInline]
+ actions = ["resubmit_reference_faces"]
+
def get_readonly_fields(self, request, obj=None):
if obj is None:
return ["created_at", "submitted_at", "status"]
return ["file", "user", "created_at", "submitted_at", "status"]
+ @admin.action(description="Resubmit reference faces for analysis.")
+ def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:
+ querylist = list(
+ queryset.filter(
+ status=FaceDetectionPhoto.Status.PROCESSING,
+ )
+ )
+ if querylist:
+ trigger_facedetection_lambda(querylist)
+ messages.success(request, "Resubmit successful.")
+ return querylist
+
class PhotoFaceEncodingInline(admin.TabularInline):
model = PhotoFaceEncoding
@@ -120,6 +136,8 @@
list_filter = ["status", "submitted_at"]
inlines = [PhotoFaceEncodingInline]
+ actions = ["resubmit_face_detection_photos"]
+
def get_queryset(self, request):
return (
super()
@@ -131,3 +149,17 @@
def has_add_permission(self, request):
return False
+
+ @admin.action(description="Resubmits face detection photos for analysis.")
+ def resubmit_face_detection_photos(
+ self, request, queryset
+ ) -> list[FaceDetectionPhoto]:
+ querylist = list(
+ queryset.filter(
+ status=FaceDetectionPhoto.Status.PROCESSING,
+ )
+ )
+ if querylist:
+ trigger_facedetection_lambda(querylist)
+ messages.success(request, "Resubmit successful.")
+ return querylist
| {"golden_diff": "diff --git a/website/facedetection/admin.py b/website/facedetection/admin.py\n--- a/website/facedetection/admin.py\n+++ b/website/facedetection/admin.py\n@@ -1,9 +1,11 @@\n-from django.contrib import admin\n+from django.contrib import admin, messages\n from django.db.models.query import Prefetch\n from django.urls import reverse\n from django.utils.html import format_html\n from django.utils.safestring import mark_safe\n \n+from facedetection.services import trigger_facedetection_lambda\n+\n from .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n@@ -44,11 +46,25 @@\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n \n+ actions = [\"resubmit_reference_faces\"]\n+\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n \n+ @admin.action(description=\"Resubmit reference faces for analysis.\")\n+ def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:\n+ querylist = list(\n+ queryset.filter(\n+ status=FaceDetectionPhoto.Status.PROCESSING,\n+ )\n+ )\n+ if querylist:\n+ trigger_facedetection_lambda(querylist)\n+ messages.success(request, \"Resubmit successful.\")\n+ return querylist\n+\n \n class PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n@@ -120,6 +136,8 @@\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n \n+ actions = [\"resubmit_face_detection_photos\"]\n+\n def get_queryset(self, request):\n return (\n super()\n@@ -131,3 +149,17 @@\n \n def has_add_permission(self, request):\n return False\n+\n+ @admin.action(description=\"Resubmits face detection photos for analysis.\")\n+ def resubmit_face_detection_photos(\n+ self, request, queryset\n+ ) -> list[FaceDetectionPhoto]:\n+ querylist = list(\n+ queryset.filter(\n+ status=FaceDetectionPhoto.Status.PROCESSING,\n+ )\n+ )\n+ if querylist:\n+ trigger_facedetection_lambda(querylist)\n+ messages.success(request, \"Resubmit successful.\")\n+ return querylist\n", "issue": "Facedetection admin resubmit action\nSometimes the facedetection lambda can fail randomly. Photos are resubmitted nightly if that happens, but it may be nice to have an alternative for that to do it sooner, without SSHing into the server.\r\n\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.db.models.query import Prefetch\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n ReferenceFace,\n ReferenceFaceEncoding,\n)\n\n\nclass ReferenceFaceEncodingInline(admin.TabularInline):\n model = ReferenceFaceEncoding\n readonly_fields = [\"num_matches\"]\n fields = [\"num_matches\"]\n can_delete = False\n extra = 0\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return super().get_queryset(request).only(\"reference\")\n\n\[email protected](ReferenceFace)\nclass ReferenceFaceAdmin(admin.ModelAdmin):\n list_display = [\n \"user\",\n \"status\",\n \"created_at\",\n \"marked_for_deletion_at\",\n ]\n\n search_fields = [\n \"user__username\",\n \"user__first_name\",\n \"user__last_name\",\n ]\n\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n\n\nclass PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n readonly_fields = [\"view_matches\"]\n fields = [\"view_matches\"]\n can_delete = False\n extra = 0\n\n @admin.display(description=\"Matches\")\n def view_matches(self, obj):\n reference_faces = [match.reference for match in obj.matches.all()]\n if not reference_faces:\n return \"-\"\n\n links = [\n format_html(\n '<a href=\"{url}\">{text}</a>',\n url=reverse(\n \"admin:facedetection_referenceface_change\",\n kwargs={\"object_id\": rf.pk},\n ),\n text=str(rf),\n )\n for rf in reference_faces\n ]\n return mark_safe(\", \".join(links))\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .only(\"photo\") # Don't select the 128 encoding fields.\n .prefetch_related(\n \"photo__photo__album\",\n Prefetch(\n \"matches\",\n queryset=ReferenceFaceEncoding.objects.select_related(\n \"reference\", \"reference__user\"\n ).only(\"reference\"),\n ),\n )\n )\n\n\[email protected](FaceDetectionPhoto)\nclass FaceDetectionPhotoAdmin(admin.ModelAdmin):\n list_display = [\n \"__str__\",\n \"status\",\n \"submitted_at\",\n \"num_faces\",\n ]\n\n readonly_fields = [\n \"photo\",\n \"submitted_at\",\n \"status\",\n ]\n\n search_fields = [\n \"photo__album__title\",\n \"photo__album__date\",\n \"photo__file\",\n ]\n\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\"photo\")\n .prefetch_related(\"photo__album\")\n .select_properties(\"num_faces\")\n )\n\n def has_add_permission(self, request):\n return False\n", "path": "website/facedetection/admin.py"}]} | 1,648 | 549 |
gh_patches_debug_8484 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1789 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
recursive submodule support for `language: golang`
I added this hook in `pre-commit-hooks.yaml` in `https://github.com/google/go-jsonnet`, and then when I try to use this hook as:
```
repos:
- repo: https://github.com/google/go-jsonnet
rev: 4a3144a417b7eb9b1f7e56741a9e72f3155de3fa
hooks:
- id: jsonnet-format
```
then I see following error.
```
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 65, in error_handler
yield
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 378, in main
return run(args.config, store, args)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 403, in run
install_hook_envs(hooks, store)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 224, in install_hook_envs
_hook_install(hook)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 82, in _hook_install
lang.install_environment(
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py", line 81, in install_environment
cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...')
return code: 2
expected return code: 0
stdout: (none)
stderr:
go: downloading github.com/sergi/go-diff v1.1.0
go: downloading github.com/fatih/color v1.9.0
go: downloading github.com/mattn/go-colorable v0.1.4
go: downloading github.com/mattn/go-isatty v0.0.11
go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
# github.com/google/go-jsonnet/c-bindings
libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found
```
Any idea? Thanks.
_Originally posted by @gaurav517 in https://github.com/pre-commit/pre-commit/issues/1785#issuecomment-774486062_
</issue>
<code>
[start of pre_commit/languages/golang.py]
1 import contextlib
2 import os.path
3 import sys
4 from typing import Generator
5 from typing import Sequence
6 from typing import Tuple
7
8 import pre_commit.constants as C
9 from pre_commit import git
10 from pre_commit.envcontext import envcontext
11 from pre_commit.envcontext import PatchesT
12 from pre_commit.envcontext import Var
13 from pre_commit.hook import Hook
14 from pre_commit.languages import helpers
15 from pre_commit.prefix import Prefix
16 from pre_commit.util import clean_path_on_failure
17 from pre_commit.util import cmd_output
18 from pre_commit.util import cmd_output_b
19 from pre_commit.util import rmtree
20
21 ENVIRONMENT_DIR = 'golangenv'
22 get_default_version = helpers.basic_get_default_version
23 healthy = helpers.basic_healthy
24
25
26 def get_env_patch(venv: str) -> PatchesT:
27 return (
28 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
29 )
30
31
32 @contextlib.contextmanager
33 def in_env(prefix: Prefix) -> Generator[None, None, None]:
34 envdir = prefix.path(
35 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
36 )
37 with envcontext(get_env_patch(envdir)):
38 yield
39
40
41 def guess_go_dir(remote_url: str) -> str:
42 if remote_url.endswith('.git'):
43 remote_url = remote_url[:-1 * len('.git')]
44 looks_like_url = (
45 not remote_url.startswith('file://') and
46 ('//' in remote_url or '@' in remote_url)
47 )
48 remote_url = remote_url.replace(':', '/')
49 if looks_like_url:
50 _, _, remote_url = remote_url.rpartition('//')
51 _, _, remote_url = remote_url.rpartition('@')
52 return remote_url
53 else:
54 return 'unknown_src_dir'
55
56
57 def install_environment(
58 prefix: Prefix,
59 version: str,
60 additional_dependencies: Sequence[str],
61 ) -> None:
62 helpers.assert_version_default('golang', version)
63 directory = prefix.path(
64 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
65 )
66
67 with clean_path_on_failure(directory):
68 remote = git.get_remote_url(prefix.prefix_dir)
69 repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))
70
71 # Clone into the goenv we'll create
72 helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))
73
74 if sys.platform == 'cygwin': # pragma: no cover
75 _, gopath, _ = cmd_output('cygpath', '-w', directory)
76 gopath = gopath.strip()
77 else:
78 gopath = directory
79 env = dict(os.environ, GOPATH=gopath)
80 env.pop('GOBIN', None)
81 cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)
82 for dependency in additional_dependencies:
83 cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)
84 # Same some disk space, we don't need these after installation
85 rmtree(prefix.path(directory, 'src'))
86 pkgdir = prefix.path(directory, 'pkg')
87 if os.path.exists(pkgdir): # pragma: no cover (go<1.10)
88 rmtree(pkgdir)
89
90
91 def run_hook(
92 hook: Hook,
93 file_args: Sequence[str],
94 color: bool,
95 ) -> Tuple[int, bytes]:
96 with in_env(hook.prefix):
97 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
98
[end of pre_commit/languages/golang.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py
--- a/pre_commit/languages/golang.py
+++ b/pre_commit/languages/golang.py
@@ -69,7 +69,8 @@
repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))
# Clone into the goenv we'll create
- helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))
+ cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)
+ helpers.run_setup_cmd(prefix, cmd)
if sys.platform == 'cygwin': # pragma: no cover
_, gopath, _ = cmd_output('cygpath', '-w', directory)
| {"golden_diff": "diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py\n--- a/pre_commit/languages/golang.py\n+++ b/pre_commit/languages/golang.py\n@@ -69,7 +69,8 @@\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n \n # Clone into the goenv we'll create\n- helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))\n+ cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)\n+ helpers.run_setup_cmd(prefix, cmd)\n \n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n", "issue": "recursive submodule support for `language: golang`\nI added this hook in `pre-commit-hooks.yaml` in `https://github.com/google/go-jsonnet`, and then when I try to use this hook as:\r\n```\r\nrepos:\r\n - repo: https://github.com/google/go-jsonnet\r\n rev: 4a3144a417b7eb9b1f7e56741a9e72f3155de3fa\r\n hooks:\r\n - id: jsonnet-format\r\n```\r\nthen I see following error.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py\", line 65, in error_handler\r\n yield\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py\", line 378, in main\r\n return run(args.config, store, args)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py\", line 403, in run\r\n install_hook_envs(hooks, store)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py\", line 224, in install_hook_envs\r\n _hook_install(hook)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py\", line 82, in _hook_install\r\n lang.install_environment(\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py\", line 81, in install_environment\r\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...')\r\nreturn code: 2\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n go: downloading github.com/sergi/go-diff v1.1.0\r\n go: downloading github.com/fatih/color v1.9.0\r\n go: downloading github.com/mattn/go-colorable v0.1.4\r\n go: downloading github.com/mattn/go-isatty v0.0.11\r\n go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037\r\n # github.com/google/go-jsonnet/c-bindings\r\n libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found\r\n\r\n```\r\nAny idea? Thanks.\r\n\r\n_Originally posted by @gaurav517 in https://github.com/pre-commit/pre-commit/issues/1785#issuecomment-774486062_\n", "before_files": [{"content": "import contextlib\nimport os.path\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'golangenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef guess_go_dir(remote_url: str) -> str:\n if remote_url.endswith('.git'):\n remote_url = remote_url[:-1 * len('.git')]\n looks_like_url = (\n not remote_url.startswith('file://') and\n ('//' in remote_url or '@' in remote_url)\n )\n remote_url = remote_url.replace(':', '/')\n if looks_like_url:\n _, _, remote_url = remote_url.rpartition('//')\n _, _, remote_url = remote_url.rpartition('@')\n return remote_url\n else:\n return 'unknown_src_dir'\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('golang', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n with clean_path_on_failure(directory):\n remote = git.get_remote_url(prefix.prefix_dir)\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n\n # Clone into the goenv we'll create\n helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))\n\n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n gopath = gopath.strip()\n else:\n gopath = directory\n env = dict(os.environ, GOPATH=gopath)\n env.pop('GOBIN', None)\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\n for dependency in additional_dependencies:\n cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)\n # Same some disk space, we don't need these after installation\n rmtree(prefix.path(directory, 'src'))\n pkgdir = prefix.path(directory, 'pkg')\n if os.path.exists(pkgdir): # pragma: no cover (go<1.10)\n rmtree(pkgdir)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/golang.py"}]} | 2,216 | 166 |
gh_patches_debug_4737 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename main storage/datastore/etc. docs since captions now have the name
We end up just repeating the names. See http://googlecloudplatform.github.io/gcloud-python/latest/

and http://gcloud-python.readthedocs.org/en/latest/

And notice that "Datastore" is both the heading name and the name of the first document in the heading.
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # gcloud documentation build configuration file, created by
4 # sphinx-quickstart on Tue Jan 21 22:24:47 2014.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 from email import message_from_string
15 import os
16 from pkg_resources import get_distribution
17 import sys
18 import urllib
19
20 import sphinx_rtd_theme
21
22
23 ON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'
24 LOCAL_READ_THE_DOCS = os.environ.get('LOCAL_RTD', None) == 'True'
25
26 # If extensions (or modules to document with autodoc) are in another directory,
27 # add these directories to sys.path here. If the directory is relative to the
28 # documentation root, use os.path.abspath to make it absolute, like shown here.
29 sys.path.insert(0, os.path.abspath('..'))
30
31 # -- General configuration -----------------------------------------------------
32
33 # If your documentation needs a minimal Sphinx version, state it here.
34 #needs_sphinx = '1.0'
35
36 # Add any Sphinx extension module names here, as strings. They can be extensions
37 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
38 extensions = [
39 'sphinx.ext.autodoc',
40 'sphinx.ext.autosummary',
41 'sphinx.ext.doctest',
42 'sphinx.ext.todo',
43 'sphinx.ext.viewcode',
44 ]
45
46 # Add any paths that contain templates here, relative to this directory.
47 if ON_READ_THE_DOCS or LOCAL_READ_THE_DOCS:
48 templates_path = []
49 else:
50 templates_path = ['_templates']
51
52 # The suffix of source filenames.
53 source_suffix = '.rst'
54
55 # The encoding of source files.
56 #source_encoding = 'utf-8-sig'
57
58 # The master toctree document.
59 master_doc = 'index'
60
61 # General information about the project.
62 project = u'gcloud'
63 copyright = u'2014, Google'
64
65 # The version info for the project you're documenting, acts as replacement for
66 # |version| and |release|, also used in various other places throughout the
67 # built documents.
68 #
69 # The short X.Y version.
70 distro = get_distribution('gcloud')
71 release = os.getenv('SPHINX_RELEASE', distro.version)
72
73 # The language for content autogenerated by Sphinx. Refer to documentation
74 # for a list of supported languages.
75 #language = None
76
77 # There are two options for replacing |today|: either, you set today to some
78 # non-false value, then it is used:
79 #today = ''
80 # Else, today_fmt is used as the format for a strftime call.
81 #today_fmt = '%B %d, %Y'
82
83 # List of patterns, relative to source directory, that match files and
84 # directories to ignore when looking for source files.
85 exclude_patterns = ['_build', '_components/*']
86
87 # The reST default role (used for this markup: `text`) to use for all documents.
88 #default_role = None
89
90 # If true, '()' will be appended to :func: etc. cross-reference text.
91 #add_function_parentheses = True
92
93 # If true, the current module name will be prepended to all description
94 # unit titles (such as .. function::).
95 #add_module_names = True
96
97 # If true, sectionauthor and moduleauthor directives will be shown in the
98 # output. They are ignored by default.
99 #show_authors = False
100
101 # The name of the Pygments (syntax highlighting) style to use.
102 pygments_style = 'sphinx'
103
104 # A list of ignored prefixes for module index sorting.
105 #modindex_common_prefix = []
106
107
108 # -- Options for HTML output ---------------------------------------------------
109
110 # The theme to use for HTML and HTML Help pages. See the documentation for
111 # a list of builtin themes.
112
113 if LOCAL_READ_THE_DOCS:
114 html_theme = 'sphinx_rtd_theme'
115 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
116 elif not ON_READ_THE_DOCS:
117 html_style = 'css/main.css'
118
119 # Theme options are theme-specific and customize the look and feel of a theme
120 # further. For a list of options available for each theme, see the
121 # documentation.
122 #html_theme_options = {}
123
124 # Add any paths that contain custom themes here, relative to this directory.
125 #html_theme_path = []
126
127 # The name for this set of Sphinx documents. If None, it defaults to
128 # "<project> v<release> documentation".
129 #html_title = None
130
131 # A shorter title for the navigation bar. Default is the same as html_title.
132 #html_short_title = None
133
134 # The name of an image file (relative to this directory) to place at the top
135 # of the sidebar.
136 #html_logo = None
137
138 # The name of an image file (within the static path) to use as favicon of the
139 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
140 # pixels large.
141 html_favicon = '_static/images/favicon.ico'
142
143 # Add any paths that contain custom static files (such as style sheets) here,
144 # relative to this directory. They are copied after the builtin static files,
145 # so a file named "default.css" will overwrite the builtin "default.css".
146 html_static_path = ['_static']
147
148 html_add_permalinks = '#'
149
150 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
151 # using the given strftime format.
152 #html_last_updated_fmt = '%b %d, %Y'
153
154 # If true, SmartyPants will be used to convert quotes and dashes to
155 # typographically correct entities.
156 #html_use_smartypants = True
157
158 # Custom sidebar templates, maps document names to template names.
159 #html_sidebars = {}
160
161 # Additional templates that should be rendered to pages, maps page names to
162 # template names.
163 #html_additional_pages = {}
164
165 # If false, no module index is generated.
166 #html_domain_indices = True
167
168 # If false, no index is generated.
169 #html_use_index = True
170
171 # If true, the index is split into individual pages for each letter.
172 #html_split_index = False
173
174 # If true, links to the reST sources are added to the pages.
175 #html_show_sourcelink = True
176
177 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
178 #html_show_sphinx = True
179
180 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
181 #html_show_copyright = True
182
183 # If true, an OpenSearch description file will be output, and all pages will
184 # contain a <link> tag referring to it. The value of this option must be the
185 # base URL from which the finished HTML is served.
186 #html_use_opensearch = ''
187
188 # This is the file name suffix for HTML files (e.g. ".xhtml").
189 #html_file_suffix = None
190
191 # Output file base name for HTML help builder.
192 htmlhelp_basename = 'gclouddoc'
193
194 html_context = {}
195
196
197 # -- Options for LaTeX output --------------------------------------------------
198
199 latex_elements = {
200 # The paper size ('letterpaper' or 'a4paper').
201 #'papersize': 'letterpaper',
202
203 # The font size ('10pt', '11pt' or '12pt').
204 #'pointsize': '10pt',
205
206 # Additional stuff for the LaTeX preamble.
207 #'preamble': '',
208 }
209
210 metadata = distro.get_metadata(distro.PKG_INFO)
211 author = message_from_string(metadata).get('Author')
212 # Grouping the document tree into LaTeX files. List of tuples
213 # (source start file, target name, title, author, documentclass [howto/manual]).
214 latex_documents = [
215 ('index', 'gcloud.tex', u'gCloud Documentation',
216 author, 'manual'),
217 ]
218
219 # The name of an image file (relative to this directory) to place at the top of
220 # the title page.
221 #latex_logo = None
222
223 # For "manual" documents, if this is true, then toplevel headings are parts,
224 # not chapters.
225 #latex_use_parts = False
226
227 # If true, show page references after internal links.
228 #latex_show_pagerefs = False
229
230 # If true, show URL addresses after external links.
231 #latex_show_urls = False
232
233 # Documents to append as an appendix to all manuals.
234 #latex_appendices = []
235
236 # If false, no module index is generated.
237 #latex_domain_indices = True
238
239
240 # -- Options for manual page output --------------------------------------------
241
242 # One entry per manual page. List of tuples
243 # (source start file, name, description, authors, manual section).
244 man_pages = [
245 ('index', 'gcloud', u'gCloud Documentation',
246 [author], 1)
247 ]
248
249 # If true, show URL addresses after external links.
250 #man_show_urls = False
251
252
253 # -- Options for Texinfo output ------------------------------------------------
254
255 # Grouping the document tree into Texinfo files. List of tuples
256 # (source start file, target name, title, author,
257 # dir menu entry, description, category)
258 texinfo_documents = [
259 ('index', 'gcloud', u'gCloud Documentation',
260 author, 'gcloud', 'Python API for Google Cloud.',
261 'Miscellaneous'),
262 ]
263
264 # Documents to append as an appendix to all manuals.
265 #texinfo_appendices = []
266
267 # If false, no module index is generated.
268 #texinfo_domain_indices = True
269
270 # How to display URL addresses: 'footnote', 'no', or 'inline'.
271 #texinfo_show_urls = 'footnote'
272
273 # This pulls class descriptions from the class docstring,
274 # and parameter definitions from the __init__ docstring.
275 autoclass_content = 'both'
276
277 issue_uri = ('https://github.com/GoogleCloudPlatform/gcloud-python/issues/'
278 'new?' + urllib.urlencode({'title': '[Documentation Issue] '}))
279 issue_uri_template = (
280 issue_uri + '&' + urllib.urlencode({'body': 'Page Name: '}) + '{0}' +
281 urllib.quote('\nRelease: ') + '{1}')
282
283 html_context.update(
284 issue_uri=issue_uri,
285 issue_uri_template=issue_uri_template,
286 )
287
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -82,7 +82,7 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ['_build', '_components/*']
+exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -82,7 +82,7 @@\n \n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n-exclude_patterns = ['_build', '_components/*']\n+exclude_patterns = ['_build']\n \n # The reST default role (used for this markup: `text`) to use for all documents.\n #default_role = None\n", "issue": "Rename main storage/datastore/etc. docs since captions now have the name\nWe end up just repeating the names. See http://googlecloudplatform.github.io/gcloud-python/latest/\n\n\n\nand http://gcloud-python.readthedocs.org/en/latest/\n\n\n\nAnd notice that \"Datastore\" is both the heading name and the name of the first document in the heading.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# gcloud documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 21 22:24:47 2014.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom email import message_from_string\nimport os\nfrom pkg_resources import get_distribution\nimport sys\nimport urllib\n\nimport sphinx_rtd_theme\n\n\nON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'\nLOCAL_READ_THE_DOCS = os.environ.get('LOCAL_RTD', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\nif ON_READ_THE_DOCS or LOCAL_READ_THE_DOCS:\n templates_path = []\nelse:\n templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'gcloud'\ncopyright = u'2014, Google'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ndistro = get_distribution('gcloud')\nrelease = os.getenv('SPHINX_RELEASE', distro.version)\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '_components/*']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\nif LOCAL_READ_THE_DOCS:\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nelif not ON_READ_THE_DOCS:\n html_style = 'css/main.css'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/images/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_add_permalinks = '#'\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gclouddoc'\n\nhtml_context = {}\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\nmetadata = distro.get_metadata(distro.PKG_INFO)\nauthor = message_from_string(metadata).get('Author')\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'gcloud.tex', u'gCloud Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'gcloud', u'gCloud Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'gcloud', u'gCloud Documentation',\n author, 'gcloud', 'Python API for Google Cloud.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# This pulls class descriptions from the class docstring,\n# and parameter definitions from the __init__ docstring.\nautoclass_content = 'both'\n\nissue_uri = ('https://github.com/GoogleCloudPlatform/gcloud-python/issues/'\n 'new?' + urllib.urlencode({'title': '[Documentation Issue] '}))\nissue_uri_template = (\n issue_uri + '&' + urllib.urlencode({'body': 'Page Name: '}) + '{0}' +\n urllib.quote('\\nRelease: ') + '{1}')\n\nhtml_context.update(\n issue_uri=issue_uri,\n issue_uri_template=issue_uri_template,\n)\n", "path": "docs/conf.py"}]} | 3,747 | 110 |
gh_patches_debug_31338 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-347 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
json files created by provision are not human readable
</issue>
<code>
[start of nvflare/lighter/impl/static_file.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import json
17 import os
18
19 import yaml
20
21 from nvflare.lighter.spec import Builder
22 from nvflare.lighter.utils import sh_replace
23
24
25 class StaticFileBuilder(Builder):
26 def __init__(
27 self,
28 enable_byoc=False,
29 config_folder="",
30 app_validator="",
31 docker_image="",
32 snapshot_persistor="",
33 overseer_agent="",
34 ):
35 """Build all static files from template.
36
37 Uses the information from project.yml through study to go through the participants and write the contents of
38 each file with the template, and replacing with the appropriate values from project.yml.
39
40 Usually, two main categories of files are created in all FL participants, static and dynamic. Static files
41 have similar contents among different participants, with small differences. For example, the differences in
42 sub_start.sh are client name and python module. Those are basically static files. This builder uses template
43 file and string replacement to generate those static files for each participant.
44
45 Args:
46 enable_byoc: for each participant, true to enable loading of code in the custom folder of applications
47 config_folder: usually "config"
48 app_validator: optional path to an app validator to verify that uploaded app has the expected structure
49 docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin
50 """
51 self.enable_byoc = enable_byoc
52 self.config_folder = config_folder
53 self.docker_image = docker_image
54 self.app_validator = app_validator
55 self.overseer_agent = overseer_agent
56 self.snapshot_persistor = snapshot_persistor
57
58 def _write(self, file_full_path, content, mode, exe=False):
59 mode = mode + "w"
60 with open(file_full_path, mode) as f:
61 f.write(content)
62 if exe:
63 os.chmod(file_full_path, 0o755)
64
65 def _build_overseer(self, overseer, ctx):
66 dest_dir = self.get_kit_dir(overseer, ctx)
67 self._write(
68 os.path.join(dest_dir, "start.sh"),
69 self.template["start_svr_sh"],
70 "t",
71 exe=True,
72 )
73 protocol = overseer.props.get("protocol", "http")
74 api_root = overseer.props.get("api_root", "/api/v1/")
75 default_port = "443" if protocol == "https" else "80"
76 port = overseer.props.get("port", default_port)
77 replacement_dict = {"port": port}
78 admins = self.study.get_participants_by_type("admin", first_only=False)
79 privilege_dict = dict()
80 for admin in admins:
81 for role in admin.props.get("roles", {}):
82 if role in privilege_dict:
83 privilege_dict[role].append(admin.subject)
84 else:
85 privilege_dict[role] = [admin.subject]
86 self._write(
87 os.path.join(dest_dir, "privilege.yml"),
88 yaml.dump(privilege_dict, Dumper=yaml.Dumper),
89 "t",
90 exe=False,
91 )
92
93 if self.docker_image:
94 self._write(
95 os.path.join(dest_dir, "docker.sh"),
96 sh_replace(self.template["docker_svr_sh"], replacement_dict),
97 "t",
98 exe=True,
99 )
100 self._write(
101 os.path.join(dest_dir, "gunicorn.conf.py"),
102 sh_replace(self.template["gunicorn_conf_py"], replacement_dict),
103 "t",
104 exe=False,
105 )
106 self._write(
107 os.path.join(dest_dir, "start.sh"),
108 self.template["start_ovsr_sh"],
109 "t",
110 exe=True,
111 )
112 if port:
113 ctx["overseer_end_point"] = f"{protocol}://{overseer.name}:{port}{api_root}"
114 else:
115 ctx["overseer_end_point"] = f"{protocol}://{overseer.name}{api_root}"
116
117 def _build_server(self, server, ctx):
118 config = json.loads(self.template["fed_server"])
119 dest_dir = self.get_kit_dir(server, ctx)
120 server_0 = config["servers"][0]
121 server_0["name"] = self.study_name
122 admin_port = server.props.get("admin_port", 8003)
123 ctx["admin_port"] = admin_port
124 fed_learn_port = server.props.get("fed_learn_port", 8002)
125 ctx["fed_learn_port"] = fed_learn_port
126 ctx["server_name"] = server.name
127 server_0["service"]["target"] = f"{server.name}:{fed_learn_port}"
128 server_0["admin_host"] = server.name
129 server_0["admin_port"] = admin_port
130 config["enable_byoc"] = server.enable_byoc
131 if self.app_validator:
132 config["app_validator"] = {"path": self.app_validator}
133 if self.overseer_agent:
134 overseer_agent = copy.deepcopy(self.overseer_agent)
135 if overseer_agent.get("overseer_exists", True):
136 overseer_agent["args"] = {
137 "role": "server",
138 "overseer_end_point": ctx.get("overseer_end_point", ""),
139 "project": self.study_name,
140 "name": server.name,
141 "fl_port": str(fed_learn_port),
142 "admin_port": str(admin_port),
143 }
144 overseer_agent.pop("overseer_exists", None)
145 config["overseer_agent"] = overseer_agent
146 if self.snapshot_persistor:
147 config["snapshot_persistor"] = self.snapshot_persistor
148 self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config), "t")
149 replacement_dict = {
150 "admin_port": admin_port,
151 "fed_learn_port": fed_learn_port,
152 "config_folder": self.config_folder,
153 "docker_image": self.docker_image,
154 }
155 if self.docker_image:
156 self._write(
157 os.path.join(dest_dir, "docker.sh"),
158 sh_replace(self.template["docker_svr_sh"], replacement_dict),
159 "t",
160 exe=True,
161 )
162 self._write(
163 os.path.join(dest_dir, "start.sh"),
164 self.template["start_svr_sh"],
165 "t",
166 exe=True,
167 )
168 self._write(
169 os.path.join(dest_dir, "sub_start.sh"),
170 sh_replace(self.template["sub_start_svr_sh"], replacement_dict),
171 "t",
172 exe=True,
173 )
174 self._write(
175 os.path.join(dest_dir, "log.config"),
176 self.template["log_config"],
177 "t",
178 )
179 self._write(
180 os.path.join(dest_dir, "readme.txt"),
181 self.template["readme_fs"],
182 "t",
183 )
184 self._write(
185 os.path.join(dest_dir, "stop_fl.sh"),
186 self.template["stop_fl_sh"],
187 "t",
188 exe=True,
189 )
190
191 def _build_client(self, client, ctx):
192 config = json.loads(self.template["fed_client"])
193 dest_dir = self.get_kit_dir(client, ctx)
194 fed_learn_port = ctx.get("fed_learn_port")
195 server_name = ctx.get("server_name")
196 # config["servers"][0]["service"]["target"] = f"{server_name}:{fed_learn_port}"
197 config["servers"][0]["name"] = self.study_name
198 config["enable_byoc"] = client.enable_byoc
199 replacement_dict = {
200 "client_name": f"{client.subject}",
201 "config_folder": self.config_folder,
202 "docker_image": self.docker_image,
203 }
204 if self.overseer_agent:
205 overseer_agent = copy.deepcopy(self.overseer_agent)
206 if overseer_agent.get("overseer_exists", True):
207 overseer_agent["args"] = {
208 "role": "client",
209 "overseer_end_point": ctx.get("overseer_end_point", ""),
210 "project": self.study_name,
211 "name": client.subject,
212 }
213 overseer_agent.pop("overseer_exists", None)
214 config["overseer_agent"] = overseer_agent
215
216 self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config), "t")
217 if self.docker_image:
218 self._write(
219 os.path.join(dest_dir, "docker.sh"),
220 sh_replace(self.template["docker_cln_sh"], replacement_dict),
221 "t",
222 exe=True,
223 )
224 self._write(
225 os.path.join(dest_dir, "start.sh"),
226 self.template["start_cln_sh"],
227 "t",
228 exe=True,
229 )
230 self._write(
231 os.path.join(dest_dir, "sub_start.sh"),
232 sh_replace(self.template["sub_start_cln_sh"], replacement_dict),
233 "t",
234 exe=True,
235 )
236 self._write(
237 os.path.join(dest_dir, "log.config"),
238 self.template["log_config"],
239 "t",
240 )
241 self._write(
242 os.path.join(dest_dir, "readme.txt"),
243 self.template["readme_fc"],
244 "t",
245 )
246 self._write(
247 os.path.join(dest_dir, "stop_fl.sh"),
248 self.template["stop_fl_sh"],
249 "t",
250 exe=True,
251 )
252
253 def _build_admin(self, admin, ctx):
254 config = json.loads(self.template["fed_admin"])
255 dest_dir = self.get_kit_dir(admin, ctx)
256 admin_port = ctx.get("admin_port")
257 server_name = ctx.get("server_name")
258
259 replacement_dict = {
260 "cn": f"{server_name}",
261 "admin_port": f"{admin_port}",
262 "docker_image": self.docker_image,
263 }
264 agent_config = dict()
265 if self.overseer_agent:
266 overseer_agent = copy.deepcopy(self.overseer_agent)
267 if overseer_agent.get("overseer_exists", True):
268 overseer_agent["args"] = {
269 "role": "admin",
270 "overseer_end_point": ctx.get("overseer_end_point", ""),
271 "project": self.study_name,
272 "name": admin.subject,
273 }
274 overseer_agent.pop("overseer_exists", None)
275 agent_config["overseer_agent"] = overseer_agent
276 config["admin"].update(agent_config)
277 self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config), "t")
278 if self.docker_image:
279 self._write(
280 os.path.join(dest_dir, "docker.sh"),
281 sh_replace(self.template["docker_adm_sh"], replacement_dict),
282 "t",
283 exe=True,
284 )
285 self._write(
286 os.path.join(dest_dir, "fl_admin.sh"),
287 sh_replace(self.template["fl_admin_sh"], replacement_dict),
288 "t",
289 exe=True,
290 )
291 self._write(
292 os.path.join(dest_dir, "readme.txt"),
293 self.template["readme_am"],
294 "t",
295 )
296
297 def build(self, study, ctx):
298 self.template = ctx.get("template")
299 self.study_name = study.name
300 self.study = study
301 overseer = study.get_participants_by_type("overseer")
302 self._build_overseer(overseer, ctx)
303 servers = study.get_participants_by_type("server", first_only=False)
304 for server in servers:
305 self._build_server(server, ctx)
306
307 for client in study.get_participants_by_type("client", first_only=False):
308 self._build_client(client, ctx)
309
310 for admin in study.get_participants_by_type("admin", first_only=False):
311 self._build_admin(admin, ctx)
312
[end of nvflare/lighter/impl/static_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/lighter/impl/static_file.py b/nvflare/lighter/impl/static_file.py
--- a/nvflare/lighter/impl/static_file.py
+++ b/nvflare/lighter/impl/static_file.py
@@ -145,7 +145,7 @@
config["overseer_agent"] = overseer_agent
if self.snapshot_persistor:
config["snapshot_persistor"] = self.snapshot_persistor
- self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config), "t")
+ self._write(os.path.join(dest_dir, "fed_server.json"), json.dumps(config, sort_keys=True, indent=2), "t")
replacement_dict = {
"admin_port": admin_port,
"fed_learn_port": fed_learn_port,
@@ -213,7 +213,7 @@
overseer_agent.pop("overseer_exists", None)
config["overseer_agent"] = overseer_agent
- self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config), "t")
+ self._write(os.path.join(dest_dir, "fed_client.json"), json.dumps(config, sort_keys=True, indent=2), "t")
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
@@ -274,7 +274,7 @@
overseer_agent.pop("overseer_exists", None)
agent_config["overseer_agent"] = overseer_agent
config["admin"].update(agent_config)
- self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config), "t")
+ self._write(os.path.join(dest_dir, "fed_admin.json"), json.dumps(config, sort_keys=True, indent=2), "t")
if self.docker_image:
self._write(
os.path.join(dest_dir, "docker.sh"),
| {"golden_diff": "diff --git a/nvflare/lighter/impl/static_file.py b/nvflare/lighter/impl/static_file.py\n--- a/nvflare/lighter/impl/static_file.py\n+++ b/nvflare/lighter/impl/static_file.py\n@@ -145,7 +145,7 @@\n config[\"overseer_agent\"] = overseer_agent\n if self.snapshot_persistor:\n config[\"snapshot_persistor\"] = self.snapshot_persistor\n- self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config), \"t\")\n+ self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n replacement_dict = {\n \"admin_port\": admin_port,\n \"fed_learn_port\": fed_learn_port,\n@@ -213,7 +213,7 @@\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n \n- self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config), \"t\")\n+ self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n@@ -274,7 +274,7 @@\n overseer_agent.pop(\"overseer_exists\", None)\n agent_config[\"overseer_agent\"] = overseer_agent\n config[\"admin\"].update(agent_config)\n- self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config), \"t\")\n+ self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config, sort_keys=True, indent=2), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n", "issue": "json files created by provision are not human readable\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport json\nimport os\n\nimport yaml\n\nfrom nvflare.lighter.spec import Builder\nfrom nvflare.lighter.utils import sh_replace\n\n\nclass StaticFileBuilder(Builder):\n def __init__(\n self,\n enable_byoc=False,\n config_folder=\"\",\n app_validator=\"\",\n docker_image=\"\",\n snapshot_persistor=\"\",\n overseer_agent=\"\",\n ):\n \"\"\"Build all static files from template.\n\n Uses the information from project.yml through study to go through the participants and write the contents of\n each file with the template, and replacing with the appropriate values from project.yml.\n\n Usually, two main categories of files are created in all FL participants, static and dynamic. Static files\n have similar contents among different participants, with small differences. For example, the differences in\n sub_start.sh are client name and python module. Those are basically static files. This builder uses template\n file and string replacement to generate those static files for each participant.\n\n Args:\n enable_byoc: for each participant, true to enable loading of code in the custom folder of applications\n config_folder: usually \"config\"\n app_validator: optional path to an app validator to verify that uploaded app has the expected structure\n docker_image: when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin\n \"\"\"\n self.enable_byoc = enable_byoc\n self.config_folder = config_folder\n self.docker_image = docker_image\n self.app_validator = app_validator\n self.overseer_agent = overseer_agent\n self.snapshot_persistor = snapshot_persistor\n\n def _write(self, file_full_path, content, mode, exe=False):\n mode = mode + \"w\"\n with open(file_full_path, mode) as f:\n f.write(content)\n if exe:\n os.chmod(file_full_path, 0o755)\n\n def _build_overseer(self, overseer, ctx):\n dest_dir = self.get_kit_dir(overseer, ctx)\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_svr_sh\"],\n \"t\",\n exe=True,\n )\n protocol = overseer.props.get(\"protocol\", \"http\")\n api_root = overseer.props.get(\"api_root\", \"/api/v1/\")\n default_port = \"443\" if protocol == \"https\" else \"80\"\n port = overseer.props.get(\"port\", default_port)\n replacement_dict = {\"port\": port}\n admins = self.study.get_participants_by_type(\"admin\", first_only=False)\n privilege_dict = dict()\n for admin in admins:\n for role in admin.props.get(\"roles\", {}):\n if role in privilege_dict:\n privilege_dict[role].append(admin.subject)\n else:\n privilege_dict[role] = [admin.subject]\n self._write(\n os.path.join(dest_dir, \"privilege.yml\"),\n yaml.dump(privilege_dict, Dumper=yaml.Dumper),\n \"t\",\n exe=False,\n )\n\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"gunicorn.conf.py\"),\n sh_replace(self.template[\"gunicorn_conf_py\"], replacement_dict),\n \"t\",\n exe=False,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_ovsr_sh\"],\n \"t\",\n exe=True,\n )\n if port:\n ctx[\"overseer_end_point\"] = f\"{protocol}://{overseer.name}:{port}{api_root}\"\n else:\n ctx[\"overseer_end_point\"] = f\"{protocol}://{overseer.name}{api_root}\"\n\n def _build_server(self, server, ctx):\n config = json.loads(self.template[\"fed_server\"])\n dest_dir = self.get_kit_dir(server, ctx)\n server_0 = config[\"servers\"][0]\n server_0[\"name\"] = self.study_name\n admin_port = server.props.get(\"admin_port\", 8003)\n ctx[\"admin_port\"] = admin_port\n fed_learn_port = server.props.get(\"fed_learn_port\", 8002)\n ctx[\"fed_learn_port\"] = fed_learn_port\n ctx[\"server_name\"] = server.name\n server_0[\"service\"][\"target\"] = f\"{server.name}:{fed_learn_port}\"\n server_0[\"admin_host\"] = server.name\n server_0[\"admin_port\"] = admin_port\n config[\"enable_byoc\"] = server.enable_byoc\n if self.app_validator:\n config[\"app_validator\"] = {\"path\": self.app_validator}\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"server\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": server.name,\n \"fl_port\": str(fed_learn_port),\n \"admin_port\": str(admin_port),\n }\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n if self.snapshot_persistor:\n config[\"snapshot_persistor\"] = self.snapshot_persistor\n self._write(os.path.join(dest_dir, \"fed_server.json\"), json.dumps(config), \"t\")\n replacement_dict = {\n \"admin_port\": admin_port,\n \"fed_learn_port\": fed_learn_port,\n \"config_folder\": self.config_folder,\n \"docker_image\": self.docker_image,\n }\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_svr_sh\"],\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"sub_start.sh\"),\n sh_replace(self.template[\"sub_start_svr_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"log.config\"),\n self.template[\"log_config\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_fs\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"stop_fl.sh\"),\n self.template[\"stop_fl_sh\"],\n \"t\",\n exe=True,\n )\n\n def _build_client(self, client, ctx):\n config = json.loads(self.template[\"fed_client\"])\n dest_dir = self.get_kit_dir(client, ctx)\n fed_learn_port = ctx.get(\"fed_learn_port\")\n server_name = ctx.get(\"server_name\")\n # config[\"servers\"][0][\"service\"][\"target\"] = f\"{server_name}:{fed_learn_port}\"\n config[\"servers\"][0][\"name\"] = self.study_name\n config[\"enable_byoc\"] = client.enable_byoc\n replacement_dict = {\n \"client_name\": f\"{client.subject}\",\n \"config_folder\": self.config_folder,\n \"docker_image\": self.docker_image,\n }\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"client\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": client.subject,\n }\n overseer_agent.pop(\"overseer_exists\", None)\n config[\"overseer_agent\"] = overseer_agent\n\n self._write(os.path.join(dest_dir, \"fed_client.json\"), json.dumps(config), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_cln_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"start.sh\"),\n self.template[\"start_cln_sh\"],\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"sub_start.sh\"),\n sh_replace(self.template[\"sub_start_cln_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"log.config\"),\n self.template[\"log_config\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_fc\"],\n \"t\",\n )\n self._write(\n os.path.join(dest_dir, \"stop_fl.sh\"),\n self.template[\"stop_fl_sh\"],\n \"t\",\n exe=True,\n )\n\n def _build_admin(self, admin, ctx):\n config = json.loads(self.template[\"fed_admin\"])\n dest_dir = self.get_kit_dir(admin, ctx)\n admin_port = ctx.get(\"admin_port\")\n server_name = ctx.get(\"server_name\")\n\n replacement_dict = {\n \"cn\": f\"{server_name}\",\n \"admin_port\": f\"{admin_port}\",\n \"docker_image\": self.docker_image,\n }\n agent_config = dict()\n if self.overseer_agent:\n overseer_agent = copy.deepcopy(self.overseer_agent)\n if overseer_agent.get(\"overseer_exists\", True):\n overseer_agent[\"args\"] = {\n \"role\": \"admin\",\n \"overseer_end_point\": ctx.get(\"overseer_end_point\", \"\"),\n \"project\": self.study_name,\n \"name\": admin.subject,\n }\n overseer_agent.pop(\"overseer_exists\", None)\n agent_config[\"overseer_agent\"] = overseer_agent\n config[\"admin\"].update(agent_config)\n self._write(os.path.join(dest_dir, \"fed_admin.json\"), json.dumps(config), \"t\")\n if self.docker_image:\n self._write(\n os.path.join(dest_dir, \"docker.sh\"),\n sh_replace(self.template[\"docker_adm_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"fl_admin.sh\"),\n sh_replace(self.template[\"fl_admin_sh\"], replacement_dict),\n \"t\",\n exe=True,\n )\n self._write(\n os.path.join(dest_dir, \"readme.txt\"),\n self.template[\"readme_am\"],\n \"t\",\n )\n\n def build(self, study, ctx):\n self.template = ctx.get(\"template\")\n self.study_name = study.name\n self.study = study\n overseer = study.get_participants_by_type(\"overseer\")\n self._build_overseer(overseer, ctx)\n servers = study.get_participants_by_type(\"server\", first_only=False)\n for server in servers:\n self._build_server(server, ctx)\n\n for client in study.get_participants_by_type(\"client\", first_only=False):\n self._build_client(client, ctx)\n\n for admin in study.get_participants_by_type(\"admin\", first_only=False):\n self._build_admin(admin, ctx)\n", "path": "nvflare/lighter/impl/static_file.py"}]} | 4,012 | 421 |
gh_patches_debug_23810 | rasdani/github-patches | git_diff | translate__pootle-5915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dates not getting localised in browse tables
seems like the dates are not getting localised as they should
</issue>
<code>
[start of pootle/apps/pootle_app/panels.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import re
10
11 from django.utils.safestring import mark_safe
12
13 from pootle.core.browser import get_table_headings
14 from pootle.core.decorators import persistent_property
15 from pootle.core.views.panels import TablePanel
16
17 from pootle.i18n.dates import timesince
18
19
20 class ChildrenPanel(TablePanel):
21 panel_name = "children"
22 _table_fields = (
23 'name', 'progress', 'activity',
24 'total', 'need-translation',
25 'suggestions', 'critical')
26
27 @property
28 def table_fields(self):
29 fields = (
30 ("name", "total")
31 if self.view.is_templates_context
32 else self._table_fields)
33 if self.view.has_admin_access:
34 fields += ('last-updated', )
35 return fields
36
37 @property
38 def children(self):
39 return self.view.object_children
40
41 @property
42 def table(self):
43 if self.view.object_children:
44 return {
45 'id': self.view.view_name,
46 'fields': self.table_fields,
47 'headings': get_table_headings(self.table_fields),
48 'rows': self.view.object_children}
49
50 @persistent_property
51 def _content(self):
52 return self.render()
53
54 @property
55 def child_update_times(self):
56 _times = {}
57 for child in self.children:
58 if not child.get("stats"):
59 continue
60 last_created_unit = (
61 timesince(child["stats"]["last_created_unit"]["creation_time"])
62 if child["stats"].get("last_created_unit")
63 else None)
64 last_submission = (
65 timesince(child["stats"]["last_submission"]["mtime"])
66 if child["stats"].get("last_submission")
67 else None)
68 _times[child["code"]] = (last_submission, last_created_unit)
69 return _times
70
71 @property
72 def content(self):
73 return self.update_times(self._content)
74
75 def get_context_data(self):
76 return dict(
77 table=self.table,
78 can_translate=self.view.can_translate)
79
80 def update_times(self, content):
81 times = {}
82 update_times = self.child_update_times.items()
83 for name, (last_submission, last_created_unit) in update_times:
84 if last_submission:
85 times[
86 "_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_"
87 % name] = last_submission
88 if last_created_unit:
89 times[
90 "_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_"
91 % name] = last_created_unit
92 if times:
93 regex = re.compile("(%s)" % "|".join(map(re.escape, times.keys())))
94 return mark_safe(
95 regex.sub(
96 lambda match: times[match.string[match.start():match.end()]],
97 content))
98 return content
99
[end of pootle/apps/pootle_app/panels.py]
[start of pootle/i18n/dates.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import locale as system_locale
10 import os
11 from datetime import datetime
12
13 from babel.dates import format_timedelta
14
15 from django.utils import translation
16
17
18 class LocalDate(object):
19
20 def __init__(self):
21 if not self.locale_code and not os.name == "nt":
22 self.set_locale()
23
24 @property
25 def default_locale(self):
26 return translation.to_locale(translation.get_language())
27
28 def set_locale(self):
29 system_locale.setlocale(
30 system_locale.LC_ALL,
31 (self.default_locale, 'UTF-8'))
32
33 @property
34 def locale_code(self):
35 return system_locale.getlocale()[0]
36
37 def format_timesince(self, timestamp, locale=None):
38 return format_timedelta(
39 datetime.now()
40 - datetime.fromtimestamp(
41 timestamp),
42 locale=(
43 locale
44 or self.locale_code
45 or self.default_locale))
46
47
48 localdate = LocalDate()
49
50
51 def timesince(timestamp, locale=None):
52 return localdate.format_timesince(timestamp, locale=locale)
53
[end of pootle/i18n/dates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/panels.py b/pootle/apps/pootle_app/panels.py
--- a/pootle/apps/pootle_app/panels.py
+++ b/pootle/apps/pootle_app/panels.py
@@ -58,11 +58,15 @@
if not child.get("stats"):
continue
last_created_unit = (
- timesince(child["stats"]["last_created_unit"]["creation_time"])
+ timesince(
+ child["stats"]["last_created_unit"]["creation_time"],
+ locale=self.view.request_lang)
if child["stats"].get("last_created_unit")
else None)
last_submission = (
- timesince(child["stats"]["last_submission"]["mtime"])
+ timesince(
+ child["stats"]["last_submission"]["mtime"],
+ locale=self.view.request_lang)
if child["stats"].get("last_submission")
else None)
_times[child["code"]] = (last_submission, last_created_unit)
diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py
--- a/pootle/i18n/dates.py
+++ b/pootle/i18n/dates.py
@@ -49,4 +49,6 @@
def timesince(timestamp, locale=None):
+ if locale:
+ locale = translation.to_locale(locale)
return localdate.format_timesince(timestamp, locale=locale)
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/panels.py b/pootle/apps/pootle_app/panels.py\n--- a/pootle/apps/pootle_app/panels.py\n+++ b/pootle/apps/pootle_app/panels.py\n@@ -58,11 +58,15 @@\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n- timesince(child[\"stats\"][\"last_created_unit\"][\"creation_time\"])\n+ timesince(\n+ child[\"stats\"][\"last_created_unit\"][\"creation_time\"],\n+ locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n- timesince(child[\"stats\"][\"last_submission\"][\"mtime\"])\n+ timesince(\n+ child[\"stats\"][\"last_submission\"][\"mtime\"],\n+ locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\ndiff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py\n--- a/pootle/i18n/dates.py\n+++ b/pootle/i18n/dates.py\n@@ -49,4 +49,6 @@\n \n \n def timesince(timestamp, locale=None):\n+ if locale:\n+ locale = translation.to_locale(locale)\n return localdate.format_timesince(timestamp, locale=locale)\n", "issue": "Dates not getting localised in browse tables\nseems like the dates are not getting localised as they should\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\n\nfrom django.utils.safestring import mark_safe\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.views.panels import TablePanel\n\nfrom pootle.i18n.dates import timesince\n\n\nclass ChildrenPanel(TablePanel):\n panel_name = \"children\"\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical')\n\n @property\n def table_fields(self):\n fields = (\n (\"name\", \"total\")\n if self.view.is_templates_context\n else self._table_fields)\n if self.view.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @property\n def children(self):\n return self.view.object_children\n\n @property\n def table(self):\n if self.view.object_children:\n return {\n 'id': self.view.view_name,\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.view.object_children}\n\n @persistent_property\n def _content(self):\n return self.render()\n\n @property\n def child_update_times(self):\n _times = {}\n for child in self.children:\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n timesince(child[\"stats\"][\"last_created_unit\"][\"creation_time\"])\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n timesince(child[\"stats\"][\"last_submission\"][\"mtime\"])\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\n return _times\n\n @property\n def content(self):\n return self.update_times(self._content)\n\n def get_context_data(self):\n return dict(\n table=self.table,\n can_translate=self.view.can_translate)\n\n def update_times(self, content):\n times = {}\n update_times = self.child_update_times.items()\n for name, (last_submission, last_created_unit) in update_times:\n if last_submission:\n times[\n \"_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_\"\n % name] = last_submission\n if last_created_unit:\n times[\n \"_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_\"\n % name] = last_created_unit\n if times:\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, times.keys())))\n return mark_safe(\n regex.sub(\n lambda match: times[match.string[match.start():match.end()]],\n content))\n return content\n", "path": "pootle/apps/pootle_app/panels.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale as system_locale\nimport os\nfrom datetime import datetime\n\nfrom babel.dates import format_timedelta\n\nfrom django.utils import translation\n\n\nclass LocalDate(object):\n\n def __init__(self):\n if not self.locale_code and not os.name == \"nt\":\n self.set_locale()\n\n @property\n def default_locale(self):\n return translation.to_locale(translation.get_language())\n\n def set_locale(self):\n system_locale.setlocale(\n system_locale.LC_ALL,\n (self.default_locale, 'UTF-8'))\n\n @property\n def locale_code(self):\n return system_locale.getlocale()[0]\n\n def format_timesince(self, timestamp, locale=None):\n return format_timedelta(\n datetime.now()\n - datetime.fromtimestamp(\n timestamp),\n locale=(\n locale\n or self.locale_code\n or self.default_locale))\n\n\nlocaldate = LocalDate()\n\n\ndef timesince(timestamp, locale=None):\n return localdate.format_timesince(timestamp, locale=locale)\n", "path": "pootle/i18n/dates.py"}]} | 1,826 | 317 |
gh_patches_debug_3513 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-578 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cookiecutter needs to always use utf-8 for writing files
I get this on Windows (pitty me):
```
File "c:\program files\python 3.5\lib\site-packages\cookiecutter\generate.py", line 318, in generate_files
run_hook('post_gen_project', project_dir, context)
File "c:\program files\python 3.5\lib\site-packages\cookiecutter\hooks.py", line 107, in run_hook
return run_script_with_context(script, project_dir, context)
File "c:\program files\python 3.5\lib\site-packages\cookiecutter\hooks.py", line 90, in run_script_with_context
temp.write(Template(contents).render(**context))
File "c:\program files\python 3.5\lib\tempfile.py", line 482, in func_wrapper
return func(*args, **kwargs)
File "c:\program files\python 3.5\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u0103' in position 1626: character maps to <undefined>
```
cookiecutter should pass `encoding='utf-8'` to `NamedTemporaryFile`
</issue>
<code>
[start of cookiecutter/hooks.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.hooks
6 ------------------
7
8 Functions for discovering and executing various cookiecutter hooks.
9 """
10
11 import io
12 import logging
13 import os
14 import subprocess
15 import sys
16 import tempfile
17
18 from jinja2 import Template
19
20 from cookiecutter import utils
21 from .exceptions import FailedHookException
22
23
24 _HOOKS = [
25 'pre_gen_project',
26 'post_gen_project',
27 # TODO: other hooks should be listed here
28 ]
29 EXIT_SUCCESS = 0
30
31
32 def find_hooks():
33 """
34 Must be called with the project template as the current working directory.
35 Returns a dict of all hook scripts provided.
36 Dict's key will be the hook/script's name, without extension, while
37 values will be the absolute path to the script.
38 Missing scripts will not be included in the returned dict.
39 """
40 hooks_dir = 'hooks'
41 r = {}
42 logging.debug('hooks_dir is {0}'.format(hooks_dir))
43 if not os.path.isdir(hooks_dir):
44 logging.debug('No hooks/ dir in template_dir')
45 return r
46 for f in os.listdir(hooks_dir):
47 basename = os.path.splitext(os.path.basename(f))[0]
48 if basename in _HOOKS:
49 r[basename] = os.path.abspath(os.path.join(hooks_dir, f))
50 return r
51
52
53 def run_script(script_path, cwd='.'):
54 """
55 Executes a script from a working directory.
56
57 :param script_path: Absolute path to the script to run.
58 :param cwd: The directory to run the script from.
59 """
60 run_thru_shell = sys.platform.startswith('win')
61 if script_path.endswith('.py'):
62 script_command = [sys.executable, script_path]
63 else:
64 script_command = [script_path]
65
66 utils.make_executable(script_path)
67
68 proc = subprocess.Popen(
69 script_command,
70 shell=run_thru_shell,
71 cwd=cwd
72 )
73 exit_status = proc.wait()
74 if exit_status != EXIT_SUCCESS:
75 raise FailedHookException(
76 "Hook script failed (exit status: %d)" % exit_status)
77
78
79 def run_script_with_context(script_path, cwd, context):
80 """
81 Executes a script after rendering with it Jinja.
82
83 :param script_path: Absolute path to the script to run.
84 :param cwd: The directory to run the script from.
85 :param context: Cookiecutter project template context.
86 """
87 _, extension = os.path.splitext(script_path)
88
89 contents = io.open(script_path, 'r', encoding='utf-8').read()
90
91 with tempfile.NamedTemporaryFile(
92 delete=False,
93 mode='w',
94 suffix=extension
95 ) as temp:
96 temp.write(Template(contents).render(**context))
97
98 run_script(temp.name, cwd)
99
100
101 def run_hook(hook_name, project_dir, context):
102 """
103 Try to find and execute a hook from the specified project directory.
104
105 :param hook_name: The hook to execute.
106 :param project_dir: The directory to execute the script from.
107 :param context: Cookiecutter project context.
108 """
109 script = find_hooks().get(hook_name)
110 if script is None:
111 logging.debug('No hooks found')
112 return
113 run_script_with_context(script, project_dir, context)
114
[end of cookiecutter/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py
--- a/cookiecutter/hooks.py
+++ b/cookiecutter/hooks.py
@@ -90,10 +90,11 @@
with tempfile.NamedTemporaryFile(
delete=False,
- mode='w',
+ mode='wb',
suffix=extension
) as temp:
- temp.write(Template(contents).render(**context))
+ output = Template(contents).render(**context)
+ temp.write(output.encode('utf-8'))
run_script(temp.name, cwd)
| {"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -90,10 +90,11 @@\n \n with tempfile.NamedTemporaryFile(\n delete=False,\n- mode='w',\n+ mode='wb',\n suffix=extension\n ) as temp:\n- temp.write(Template(contents).render(**context))\n+ output = Template(contents).render(**context)\n+ temp.write(output.encode('utf-8'))\n \n run_script(temp.name, cwd)\n", "issue": "Cookiecutter needs to always use utf-8 for writing files\nI get this on Windows (pitty me):\n\n```\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\generate.py\", line 318, in generate_files\n run_hook('post_gen_project', project_dir, context)\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\hooks.py\", line 107, in run_hook\n return run_script_with_context(script, project_dir, context)\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\hooks.py\", line 90, in run_script_with_context\n temp.write(Template(contents).render(**context))\n File \"c:\\program files\\python 3.5\\lib\\tempfile.py\", line 482, in func_wrapper\n return func(*args, **kwargs)\n File \"c:\\program files\\python 3.5\\lib\\encodings\\cp1252.py\", line 19, in encode\n return codecs.charmap_encode(input,self.errors,encoding_table)[0]\nUnicodeEncodeError: 'charmap' codec can't encode character '\\u0103' in position 1626: character maps to <undefined>\n```\n\ncookiecutter should pass `encoding='utf-8'` to `NamedTemporaryFile`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='w',\n suffix=extension\n ) as temp:\n temp.write(Template(contents).render(**context))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]} | 1,804 | 127 |
gh_patches_debug_27938 | rasdani/github-patches | git_diff | sanic-org__sanic-2627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uvicorn ignores listeners errors
**Describe the bug**
When an exception happens on a listener the error is not reported and the server doesn't crash when using Uvicorn.
**Code snippet**
```python
from sanic import Sanic
from sanic.response import text
app = Sanic("MyHelloWorldApp")
@app.listener("before_server_start")
async def raises(app, loop):
print("Trying to run faster")
raise Exception("oh noes")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
```
```
~ uvicorn app:app
INFO: Started server process [49837]
INFO: Waiting for application startup.
[2022-01-25 14:57:34 +0100] [49837] [INFO]
┌────────────────────────────────────────────────────────────────┐
│ Sanic v21.12.1 │
│ │
├───────────────────────┬────────────────────────────────────────┤
│ │ mode: production, ASGI │
│ ▄███ █████ ██ │ server: sanic │
│ ██ │ python: 3.9.10 │
│ ▀███████ ███▄ │ platform: macOS-12.1-x86_64-i386-64bit │
│ ██ │ packages: sanic-routing==0.7.2 │
│ ████ ████████▀ │ │
│ │ │
│ Build Fast. Run Fast. │ │
└───────────────────────┴────────────────────────────────────────┘
/Users/andre.ericson/projects/sanic-uvicorn/.venv/lib/python3.9/site-packages/sanic/asgi.py:27: UserWarning: You have set a listener for "before_server_start" in ASGI mode. It will be executed as early as possible, but not before the ASGI server is started.
warnings.warn(
Trying to run faster
INFO: ASGI 'lifespan' protocol appears unsupported.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
```
**Expected behavior**
The server should crash, or at least log an error.
**Environment (please complete the following information):**
- MacOS
- python 3.9.10
```
pip list
Package Version
------------- ------------
aiofiles 0.8.0
asgiref 3.5.0
click 8.0.3
h11 0.13.0
httptools 0.3.0
multidict 5.2.0
pip 21.3.1
sanic 21.12.1
sanic-routing 0.7.2
setuptools 58.1.0
ujson 5.1.0
uvicorn 0.17.0.post1
uvloop 0.16.0
websockets 10.1
```
**Additional context**
Works as expected with `sanic app.app` and also with GunicornWorker.
</issue>
<code>
[start of sanic/asgi.py]
1 from __future__ import annotations
2
3 import warnings
4
5 from typing import TYPE_CHECKING, Optional
6 from urllib.parse import quote
7
8 from sanic.compat import Header
9 from sanic.exceptions import ServerError
10 from sanic.helpers import Default
11 from sanic.http import Stage
12 from sanic.log import logger
13 from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
14 from sanic.request import Request
15 from sanic.response import BaseHTTPResponse
16 from sanic.server import ConnInfo
17 from sanic.server.websockets.connection import WebSocketConnection
18
19
20 if TYPE_CHECKING:
21 from sanic import Sanic
22
23
24 class Lifespan:
25 def __init__(self, asgi_app: ASGIApp) -> None:
26 self.asgi_app = asgi_app
27
28 if (
29 "server.init.before"
30 in self.asgi_app.sanic_app.signal_router.name_index
31 ):
32 logger.debug(
33 'You have set a listener for "before_server_start" '
34 "in ASGI mode. "
35 "It will be executed as early as possible, but not before "
36 "the ASGI server is started.",
37 extra={"verbosity": 1},
38 )
39 if (
40 "server.shutdown.after"
41 in self.asgi_app.sanic_app.signal_router.name_index
42 ):
43 logger.debug(
44 'You have set a listener for "after_server_stop" '
45 "in ASGI mode. "
46 "It will be executed as late as possible, but not after "
47 "the ASGI server is stopped.",
48 extra={"verbosity": 1},
49 )
50
51 async def startup(self) -> None:
52 """
53 Gather the listeners to fire on server start.
54 Because we are using a third-party server and not Sanic server, we do
55 not have access to fire anything BEFORE the server starts.
56 Therefore, we fire before_server_start and after_server_start
57 in sequence since the ASGI lifespan protocol only supports a single
58 startup event.
59 """
60 await self.asgi_app.sanic_app._startup()
61 await self.asgi_app.sanic_app._server_event("init", "before")
62 await self.asgi_app.sanic_app._server_event("init", "after")
63
64 if not isinstance(self.asgi_app.sanic_app.config.USE_UVLOOP, Default):
65 warnings.warn(
66 "You have set the USE_UVLOOP configuration option, but Sanic "
67 "cannot control the event loop when running in ASGI mode."
68 "This option will be ignored."
69 )
70
71 async def shutdown(self) -> None:
72 """
73 Gather the listeners to fire on server stop.
74 Because we are using a third-party server and not Sanic server, we do
75 not have access to fire anything AFTER the server stops.
76 Therefore, we fire before_server_stop and after_server_stop
77 in sequence since the ASGI lifespan protocol only supports a single
78 shutdown event.
79 """
80 await self.asgi_app.sanic_app._server_event("shutdown", "before")
81 await self.asgi_app.sanic_app._server_event("shutdown", "after")
82
83 async def __call__(
84 self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
85 ) -> None:
86 message = await receive()
87 if message["type"] == "lifespan.startup":
88 await self.startup()
89 await send({"type": "lifespan.startup.complete"})
90
91 message = await receive()
92 if message["type"] == "lifespan.shutdown":
93 await self.shutdown()
94 await send({"type": "lifespan.shutdown.complete"})
95
96
97 class ASGIApp:
98 sanic_app: Sanic
99 request: Request
100 transport: MockTransport
101 lifespan: Lifespan
102 ws: Optional[WebSocketConnection]
103 stage: Stage
104 response: Optional[BaseHTTPResponse]
105
106 def __init__(self) -> None:
107 self.ws = None
108
109 @classmethod
110 async def create(
111 cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
112 ) -> "ASGIApp":
113 instance = cls()
114 instance.sanic_app = sanic_app
115 instance.transport = MockTransport(scope, receive, send)
116 instance.transport.loop = sanic_app.loop
117 instance.stage = Stage.IDLE
118 instance.response = None
119 setattr(instance.transport, "add_task", sanic_app.loop.create_task)
120
121 headers = Header(
122 [
123 (key.decode("latin-1"), value.decode("latin-1"))
124 for key, value in scope.get("headers", [])
125 ]
126 )
127 instance.lifespan = Lifespan(instance)
128
129 if scope["type"] == "lifespan":
130 await instance.lifespan(scope, receive, send)
131 else:
132 path = (
133 scope["path"][1:]
134 if scope["path"].startswith("/")
135 else scope["path"]
136 )
137 url = "/".join([scope.get("root_path", ""), quote(path)])
138 url_bytes = url.encode("latin-1")
139 url_bytes += b"?" + scope["query_string"]
140
141 if scope["type"] == "http":
142 version = scope["http_version"]
143 method = scope["method"]
144 elif scope["type"] == "websocket":
145 version = "1.1"
146 method = "GET"
147
148 instance.ws = instance.transport.create_websocket_connection(
149 send, receive
150 )
151 else:
152 raise ServerError("Received unknown ASGI scope")
153
154 request_class = sanic_app.request_class or Request
155 instance.request = request_class(
156 url_bytes,
157 headers,
158 version,
159 method,
160 instance.transport,
161 sanic_app,
162 )
163 instance.request.stream = instance
164 instance.request_body = True
165 instance.request.conn_info = ConnInfo(instance.transport)
166
167 await sanic_app.dispatch(
168 "http.lifecycle.request",
169 inline=True,
170 context={"request": instance.request},
171 fail_not_found=False,
172 )
173
174 return instance
175
176 async def read(self) -> Optional[bytes]:
177 """
178 Read and stream the body in chunks from an incoming ASGI message.
179 """
180 if self.stage is Stage.IDLE:
181 self.stage = Stage.REQUEST
182 message = await self.transport.receive()
183 body = message.get("body", b"")
184 if not message.get("more_body", False):
185 self.request_body = False
186 if not body:
187 return None
188 return body
189
190 async def __aiter__(self):
191 while self.request_body:
192 data = await self.read()
193 if data:
194 yield data
195
196 def respond(self, response: BaseHTTPResponse):
197 if self.stage is not Stage.HANDLER:
198 self.stage = Stage.FAILED
199 raise RuntimeError("Response already started")
200 if self.response is not None:
201 self.response.stream = None
202 response.stream, self.response = self, response
203 return response
204
205 async def send(self, data, end_stream):
206 self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
207 if self.response:
208 response, self.response = self.response, None
209 await self.transport.send(
210 {
211 "type": "http.response.start",
212 "status": response.status,
213 "headers": response.processed_headers,
214 }
215 )
216 response_body = getattr(response, "body", None)
217 if response_body:
218 data = response_body + data if data else response_body
219 await self.transport.send(
220 {
221 "type": "http.response.body",
222 "body": data.encode() if hasattr(data, "encode") else data,
223 "more_body": not end_stream,
224 }
225 )
226
227 _asgi_single_callable = True # We conform to ASGI 3.0 single-callable
228
229 async def __call__(self) -> None:
230 """
231 Handle the incoming request.
232 """
233 try:
234 self.stage = Stage.HANDLER
235 await self.sanic_app.handle_request(self.request)
236 except Exception as e:
237 try:
238 await self.sanic_app.handle_exception(self.request, e)
239 except Exception as exc:
240 await self.sanic_app.handle_exception(self.request, exc, False)
241
[end of sanic/asgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -9,7 +9,7 @@
from sanic.exceptions import ServerError
from sanic.helpers import Default
from sanic.http import Stage
-from sanic.log import logger
+from sanic.log import error_logger, logger
from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
from sanic.request import Request
from sanic.response import BaseHTTPResponse
@@ -85,13 +85,27 @@
) -> None:
message = await receive()
if message["type"] == "lifespan.startup":
- await self.startup()
- await send({"type": "lifespan.startup.complete"})
+ try:
+ await self.startup()
+ except Exception as e:
+ error_logger.exception(e)
+ await send(
+ {"type": "lifespan.startup.failed", "message": str(e)}
+ )
+ else:
+ await send({"type": "lifespan.startup.complete"})
message = await receive()
if message["type"] == "lifespan.shutdown":
- await self.shutdown()
- await send({"type": "lifespan.shutdown.complete"})
+ try:
+ await self.shutdown()
+ except Exception as e:
+ error_logger.exception(e)
+ await send(
+ {"type": "lifespan.shutdown.failed", "message": str(e)}
+ )
+ else:
+ await send({"type": "lifespan.shutdown.complete"})
class ASGIApp:
| {"golden_diff": "diff --git a/sanic/asgi.py b/sanic/asgi.py\n--- a/sanic/asgi.py\n+++ b/sanic/asgi.py\n@@ -9,7 +9,7 @@\n from sanic.exceptions import ServerError\n from sanic.helpers import Default\n from sanic.http import Stage\n-from sanic.log import logger\n+from sanic.log import error_logger, logger\n from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\n from sanic.request import Request\n from sanic.response import BaseHTTPResponse\n@@ -85,13 +85,27 @@\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n- await self.startup()\n- await send({\"type\": \"lifespan.startup.complete\"})\n+ try:\n+ await self.startup()\n+ except Exception as e:\n+ error_logger.exception(e)\n+ await send(\n+ {\"type\": \"lifespan.startup.failed\", \"message\": str(e)}\n+ )\n+ else:\n+ await send({\"type\": \"lifespan.startup.complete\"})\n \n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n- await self.shutdown()\n- await send({\"type\": \"lifespan.shutdown.complete\"})\n+ try:\n+ await self.shutdown()\n+ except Exception as e:\n+ error_logger.exception(e)\n+ await send(\n+ {\"type\": \"lifespan.shutdown.failed\", \"message\": str(e)}\n+ )\n+ else:\n+ await send({\"type\": \"lifespan.shutdown.complete\"})\n \n \n class ASGIApp:\n", "issue": "Uvicorn ignores listeners errors\n**Describe the bug**\r\nWhen an exception happens on a listener the error is not reported and the server doesn't crash when using Uvicorn.\r\n\r\n\r\n**Code snippet**\r\n```python\r\nfrom sanic import Sanic\r\nfrom sanic.response import text\r\n\r\napp = Sanic(\"MyHelloWorldApp\")\r\n\r\[email protected](\"before_server_start\")\r\nasync def raises(app, loop):\r\n print(\"Trying to run faster\")\r\n raise Exception(\"oh noes\")\r\n\r\[email protected](\"/\")\r\nasync def hello_world(request):\r\n return text(\"Hello, world.\")\r\n```\r\n\r\n```\r\n~ uvicorn app:app\r\nINFO: Started server process [49837]\r\nINFO: Waiting for application startup.\r\n[2022-01-25 14:57:34 +0100] [49837] [INFO]\r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\n \u2502 Sanic v21.12.1 \u2502\r\n \u2502 \u2502\r\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\r\n \u2502 \u2502 mode: production, ASGI \u2502\r\n \u2502 \u2584\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588 \u2588\u2588 \u2502 server: sanic \u2502\r\n \u2502 \u2588\u2588 \u2502 python: 3.9.10 \u2502\r\n \u2502 \u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2584 \u2502 platform: macOS-12.1-x86_64-i386-64bit \u2502\r\n \u2502 \u2588\u2588 \u2502 packages: sanic-routing==0.7.2 \u2502\r\n \u2502 \u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2580 \u2502 \u2502\r\n \u2502 \u2502 \u2502\r\n \u2502 Build Fast. Run Fast. \u2502 \u2502\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n\r\n/Users/andre.ericson/projects/sanic-uvicorn/.venv/lib/python3.9/site-packages/sanic/asgi.py:27: UserWarning: You have set a listener for \"before_server_start\" in ASGI mode. It will be executed as early as possible, but not before the ASGI server is started.\r\n warnings.warn(\r\nTrying to run faster\r\nINFO: ASGI 'lifespan' protocol appears unsupported.\r\nINFO: Application startup complete.\r\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\r\n``` \r\n**Expected behavior**\r\nThe server should crash, or at least log an error.\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - MacOS\r\n - python 3.9.10\r\n ```\r\n pip list\r\nPackage Version\r\n------------- ------------\r\naiofiles 0.8.0\r\nasgiref 3.5.0\r\nclick 8.0.3\r\nh11 0.13.0\r\nhttptools 0.3.0\r\nmultidict 5.2.0\r\npip 21.3.1\r\nsanic 21.12.1\r\nsanic-routing 0.7.2\r\nsetuptools 58.1.0\r\nujson 5.1.0\r\nuvicorn 0.17.0.post1\r\nuvloop 0.16.0\r\nwebsockets 10.1\r\n ```\r\n\r\n\r\n**Additional context**\r\nWorks as expected with `sanic app.app` and also with GunicornWorker.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Optional\nfrom urllib.parse import quote\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.helpers import Default\nfrom sanic.http import Stage\nfrom sanic.log import logger\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.response import BaseHTTPResponse\nfrom sanic.server import ConnInfo\nfrom sanic.server.websockets.connection import WebSocketConnection\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n\n\nclass Lifespan:\n def __init__(self, asgi_app: ASGIApp) -> None:\n self.asgi_app = asgi_app\n\n if (\n \"server.init.before\"\n in self.asgi_app.sanic_app.signal_router.name_index\n ):\n logger.debug(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\",\n extra={\"verbosity\": 1},\n )\n if (\n \"server.shutdown.after\"\n in self.asgi_app.sanic_app.signal_router.name_index\n ):\n logger.debug(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\",\n extra={\"verbosity\": 1},\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n await self.asgi_app.sanic_app._startup()\n await self.asgi_app.sanic_app._server_event(\"init\", \"before\")\n await self.asgi_app.sanic_app._server_event(\"init\", \"after\")\n\n if not isinstance(self.asgi_app.sanic_app.config.USE_UVLOOP, Default):\n warnings.warn(\n \"You have set the USE_UVLOOP configuration option, but Sanic \"\n \"cannot control the event loop when running in ASGI mode.\"\n \"This option will be ignored.\"\n )\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n await self.asgi_app.sanic_app._server_event(\"shutdown\", \"before\")\n await self.asgi_app.sanic_app._server_event(\"shutdown\", \"after\")\n\n async def __call__(\n self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n message = await receive()\n if message[\"type\"] == \"lifespan.startup\":\n await self.startup()\n await send({\"type\": \"lifespan.startup.complete\"})\n\n message = await receive()\n if message[\"type\"] == \"lifespan.shutdown\":\n await self.shutdown()\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n\nclass ASGIApp:\n sanic_app: Sanic\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n stage: Stage\n response: Optional[BaseHTTPResponse]\n\n def __init__(self) -> None:\n self.ws = None\n\n @classmethod\n async def create(\n cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> \"ASGIApp\":\n instance = cls()\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n instance.stage = Stage.IDLE\n instance.response = None\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n instance.lifespan = Lifespan(instance)\n\n if scope[\"type\"] == \"lifespan\":\n await instance.lifespan(scope, receive, send)\n else:\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n await sanic_app.dispatch(\n \"http.lifecycle.request\",\n inline=True,\n context={\"request\": instance.request},\n fail_not_found=False,\n )\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n if self.stage is Stage.IDLE:\n self.stage = Stage.REQUEST\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response: BaseHTTPResponse):\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n if self.response is not None:\n self.response.stream = None\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n self.stage = Stage.HANDLER\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n try:\n await self.sanic_app.handle_exception(self.request, e)\n except Exception as exc:\n await self.sanic_app.handle_exception(self.request, exc, False)\n", "path": "sanic/asgi.py"}]} | 3,662 | 366 |
gh_patches_debug_27382 | rasdani/github-patches | git_diff | ultralytics__yolov5-5141 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DDP wandb utils not running check_file() on --data
## 🐛 Bug
@AyushExel I found a bit of a bug today on starting a DDP training with `--data coco.yaml`. All our main python files (train, test, detect) are set up so that files passed to the argparser are searched locally inside the yolov5 repo using `file = check_file(file)`. This allows for more concise commands like --cfg yolov5s.yaml rather than --cfg models/yolov5s.yaml, but it looks like opt.data got passed to the wandb utils before it had a chance to pass through check_file() first. Should be an easy fix, I think I can do this tomorrow.
<img width="1214" alt="Screenshot 2021-05-05 at 01 46 55" src="https://user-images.githubusercontent.com/26833433/117083255-94c12780-ad44-11eb-85c2-48bef8943717.png">
## To Reproduce (REQUIRED)
Start DDP training with `--data coco.yaml`
```
python -m torch.distributed.launch --nproc_per_node 2 --master_port 1 train.py --batch 64 --cfg yolov5x.yaml --weights '' --data coco.yaml
```
Question about calculating mAP at test time
## ❔Question
Thank you for your work, I really enjoyed running the codes.
I was trying to understand the way we calculate mAP by reading the [test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)
At line [197](https://github.com/ultralytics/yolov5/blob/master/test.py#L197), as I understood, the IoU values (`ious`) wasn't sorted before the further process (lines [199-211](https://github.com/ultralytics/yolov5/blob/master/test.py#L199-L211)).
Therefore, I think it doesn't guarantee that we find the best IoU (or a detected box) for a ground-truth box.
For example, a detected box is considered correct if the IoU with a ground-truth box is >= 0.5. However, there are possibilities that we detected multiple boxes with different IoU values. In this case, I think we should assign the box with the highest IoU as the correctly detected box.
Will the code affect the result of calculating mAP?
## Additional context
The comments in lines [191-192](https://github.com/ultralytics/yolov5/blob/master/test.py#L191-L192) (`# prediction indices`, `# target indices`) should be swapped, shouldn't they?
</issue>
<code>
[start of hubconf.py]
1 # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 """
3 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
4
5 Usage:
6 import torch
7 model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
8 """
9
10 import torch
11
12
13 def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
14 """Creates a specified YOLOv5 model
15
16 Arguments:
17 name (str): name of model, i.e. 'yolov5s'
18 pretrained (bool): load pretrained weights into the model
19 channels (int): number of input channels
20 classes (int): number of model classes
21 autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
22 verbose (bool): print all information to screen
23 device (str, torch.device, None): device to use for model parameters
24
25 Returns:
26 YOLOv5 pytorch model
27 """
28 from pathlib import Path
29
30 from models.yolo import Model
31 from models.experimental import attempt_load
32 from utils.general import check_requirements, set_logging
33 from utils.downloads import attempt_download
34 from utils.torch_utils import select_device
35
36 file = Path(__file__).resolve()
37 check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
38 set_logging(verbose=verbose)
39
40 save_dir = Path('') if str(name).endswith('.pt') else file.parent
41 path = (save_dir / name).with_suffix('.pt') # checkpoint path
42 try:
43 device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
44
45 if pretrained and channels == 3 and classes == 80:
46 model = attempt_load(path, map_location=device) # download/load FP32 model
47 else:
48 cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path
49 model = Model(cfg, channels, classes) # create model
50 if pretrained:
51 ckpt = torch.load(attempt_download(path), map_location=device) # load
52 msd = model.state_dict() # model state_dict
53 csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
54 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
55 model.load_state_dict(csd, strict=False) # load
56 if len(ckpt['model'].names) == classes:
57 model.names = ckpt['model'].names # set class names attribute
58 if autoshape:
59 model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
60 return model.to(device)
61
62 except Exception as e:
63 help_url = 'https://github.com/ultralytics/yolov5/issues/36'
64 s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
65 raise Exception(s) from e
66
67
68 def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
69 # YOLOv5 custom or local model
70 return _create(path, autoshape=autoshape, verbose=verbose, device=device)
71
72
73 def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
74 # YOLOv5-small model https://github.com/ultralytics/yolov5
75 return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
76
77
78 def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
79 # YOLOv5-medium model https://github.com/ultralytics/yolov5
80 return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
81
82
83 def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
84 # YOLOv5-large model https://github.com/ultralytics/yolov5
85 return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
86
87
88 def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
89 # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
90 return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
91
92
93 def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
94 # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
95 return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
96
97
98 def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
99 # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
100 return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
101
102
103 def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
104 # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
105 return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
106
107
108 def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
109 # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
110 return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
111
112
113 if __name__ == '__main__':
114 model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
115 # model = custom(path='path/to/model.pt') # custom
116
117 # Verify inference
118 import cv2
119 import numpy as np
120 from PIL import Image
121 from pathlib import Path
122
123 imgs = ['data/images/zidane.jpg', # filename
124 Path('data/images/zidane.jpg'), # Path
125 'https://ultralytics.com/images/zidane.jpg', # URI
126 cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
127 Image.open('data/images/bus.jpg'), # PIL
128 np.zeros((320, 640, 3))] # numpy
129
130 results = model(imgs) # batched inference
131 results.print()
132 results.save()
133
[end of hubconf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hubconf.py b/hubconf.py
--- a/hubconf.py
+++ b/hubconf.py
@@ -70,6 +70,11 @@
return _create(path, autoshape=autoshape, verbose=verbose, device=device)
+def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-nano model https://github.com/ultralytics/yolov5
+ return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)
+
+
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small model https://github.com/ultralytics/yolov5
return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
@@ -90,6 +95,11 @@
return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
+def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
+ # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
+ return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)
+
+
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
| {"golden_diff": "diff --git a/hubconf.py b/hubconf.py\n--- a/hubconf.py\n+++ b/hubconf.py\n@@ -70,6 +70,11 @@\n return _create(path, autoshape=autoshape, verbose=verbose, device=device)\n \n \n+def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n+ # YOLOv5-nano model https://github.com/ultralytics/yolov5\n+ return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)\n+\n+\n def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small model https://github.com/ultralytics/yolov5\n return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)\n@@ -90,6 +95,11 @@\n return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)\n \n \n+def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n+ # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5\n+ return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)\n+\n+\n def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)\n", "issue": "DDP wandb utils not running check_file() on --data\n## \ud83d\udc1b Bug\r\n\r\n@AyushExel I found a bit of a bug today on starting a DDP training with `--data coco.yaml`. All our main python files (train, test, detect) are set up so that files passed to the argparser are searched locally inside the yolov5 repo using `file = check_file(file)`. This allows for more concise commands like --cfg yolov5s.yaml rather than --cfg models/yolov5s.yaml, but it looks like opt.data got passed to the wandb utils before it had a chance to pass through check_file() first. Should be an easy fix, I think I can do this tomorrow.\r\n\r\n<img width=\"1214\" alt=\"Screenshot 2021-05-05 at 01 46 55\" src=\"https://user-images.githubusercontent.com/26833433/117083255-94c12780-ad44-11eb-85c2-48bef8943717.png\">\r\n\r\n## To Reproduce (REQUIRED)\r\n\r\nStart DDP training with `--data coco.yaml`\r\n```\r\n python -m torch.distributed.launch --nproc_per_node 2 --master_port 1 train.py --batch 64 --cfg yolov5x.yaml --weights '' --data coco.yaml\r\n```\r\n\nQuestion about calculating mAP at test time\n## \u2754Question\r\nThank you for your work, I really enjoyed running the codes.\r\n\r\nI was trying to understand the way we calculate mAP by reading the [test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)\r\nAt line [197](https://github.com/ultralytics/yolov5/blob/master/test.py#L197), as I understood, the IoU values (`ious`) wasn't sorted before the further process (lines [199-211](https://github.com/ultralytics/yolov5/blob/master/test.py#L199-L211)).\r\nTherefore, I think it doesn't guarantee that we find the best IoU (or a detected box) for a ground-truth box.\r\nFor example, a detected box is considered correct if the IoU with a ground-truth box is >= 0.5. However, there are possibilities that we detected multiple boxes with different IoU values. In this case, I think we should assign the box with the highest IoU as the correctly detected box.\r\n\r\nWill the code affect the result of calculating mAP?\r\n\r\n## Additional context\r\nThe comments in lines [191-192](https://github.com/ultralytics/yolov5/blob/master/test.py#L191-L192) (`# prediction indices`, `# target indices`) should be swapped, shouldn't they?\n", "before_files": [{"content": "# YOLOv5 \ud83d\ude80 by Ultralytics, GPL-3.0 license\n\"\"\"\nPyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/\n\nUsage:\n import torch\n model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\"\"\"\n\nimport torch\n\n\ndef _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n \"\"\"Creates a specified YOLOv5 model\n\n Arguments:\n name (str): name of model, i.e. 'yolov5s'\n pretrained (bool): load pretrained weights into the model\n channels (int): number of input channels\n classes (int): number of model classes\n autoshape (bool): apply YOLOv5 .autoshape() wrapper to model\n verbose (bool): print all information to screen\n device (str, torch.device, None): device to use for model parameters\n\n Returns:\n YOLOv5 pytorch model\n \"\"\"\n from pathlib import Path\n\n from models.yolo import Model\n from models.experimental import attempt_load\n from utils.general import check_requirements, set_logging\n from utils.downloads import attempt_download\n from utils.torch_utils import select_device\n\n file = Path(__file__).resolve()\n check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))\n set_logging(verbose=verbose)\n\n save_dir = Path('') if str(name).endswith('.pt') else file.parent\n path = (save_dir / name).with_suffix('.pt') # checkpoint path\n try:\n device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)\n\n if pretrained and channels == 3 and classes == 80:\n model = attempt_load(path, map_location=device) # download/load FP32 model\n else:\n cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path\n model = Model(cfg, channels, classes) # create model\n if pretrained:\n ckpt = torch.load(attempt_download(path), map_location=device) # load\n msd = model.state_dict() # model state_dict\n csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32\n csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter\n model.load_state_dict(csd, strict=False) # load\n if len(ckpt['model'].names) == classes:\n model.names = ckpt['model'].names # set class names attribute\n if autoshape:\n model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS\n return model.to(device)\n\n except Exception as e:\n help_url = 'https://github.com/ultralytics/yolov5/issues/36'\n s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url\n raise Exception(s) from e\n\n\ndef custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):\n # YOLOv5 custom or local model\n return _create(path, autoshape=autoshape, verbose=verbose, device=device)\n\n\ndef yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small model https://github.com/ultralytics/yolov5\n return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium model https://github.com/ultralytics/yolov5\n return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large model https://github.com/ultralytics/yolov5\n return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge model https://github.com/ultralytics/yolov5\n return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)\n\n\nif __name__ == '__main__':\n model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained\n # model = custom(path='path/to/model.pt') # custom\n\n # Verify inference\n import cv2\n import numpy as np\n from PIL import Image\n from pathlib import Path\n\n imgs = ['data/images/zidane.jpg', # filename\n Path('data/images/zidane.jpg'), # Path\n 'https://ultralytics.com/images/zidane.jpg', # URI\n cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV\n Image.open('data/images/bus.jpg'), # PIL\n np.zeros((320, 640, 3))] # numpy\n\n results = model(imgs) # batched inference\n results.print()\n results.save()\n", "path": "hubconf.py"}]} | 3,076 | 426 |
gh_patches_debug_30392 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E1010 Invalid GetAtt error message could be better
*cfn-lint version: (`cfn-lint --version`)* 0.7.1
Code:
```yaml
ElasticSearchHostname:
Value:
Fn::GetAtt: ElasticSearchDomain.DomainEndpoint
```
(I mixed short and long form)
Current:
```
E1010 Invalid GetAtt E.l.a.s.t.i.c.S.e.a.r.c.h.D.o.m.a.i.n...D.o.m.a.i.n.E.n.d.p.o.i.n.t for resource ElasticSearchHostname
cfn/x.cfn.yaml:342:7
```
Better:
```
E1010 GetAtt expects an array of length 2, not String for resource ElasticSearchHostname
```
(nb, this is also an error in an *output*, not *resource*, but I didn't even notice that until filing the bug report. I guess my eyes skip over everything except the line number)
</issue>
<code>
[start of src/cfnlint/rules/functions/GetAtt.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19 import cfnlint.helpers
20
21
22 class GetAtt(CloudFormationLintRule):
23 """Check if GetAtt values are correct"""
24 id = 'E1010'
25 shortdesc = 'GetAtt validation of parameters'
26 description = 'Validates that GetAtt parameters are to valid resources and properties of those resources'
27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getatt.html'
28 tags = ['functions', 'getatt']
29
30 def __init__(self):
31 resourcespecs = cfnlint.helpers.RESOURCE_SPECS['us-east-1']
32 self.resourcetypes = resourcespecs['ResourceTypes']
33 self.propertytypes = resourcespecs['PropertyTypes']
34
35 def match(self, cfn):
36 """Check CloudFormation GetAtt"""
37
38 matches = []
39
40 getatts = cfn.search_deep_keys('Fn::GetAtt')
41 valid_getatts = cfn.get_valid_getatts()
42 for getatt in getatts:
43 if len(getatt[-1]) < 2:
44 message = 'Invalid GetAtt for {0}'
45 matches.append(RuleMatch(getatt, message.format('/'.join(map(str, getatt[:-1])))))
46 continue
47 resname = getatt[-1][0]
48 restype = '.'.join(getatt[-1][1:])
49 if resname in valid_getatts:
50 if restype not in valid_getatts[resname] and '*' not in valid_getatts[resname]:
51 message = 'Invalid GetAtt {0}.{1} for resource {2}'
52 matches.append(RuleMatch(
53 getatt[:-1], message.format(resname, restype, getatt[1])))
54 else:
55 message = 'Invalid GetAtt {0}.{1} for resource {2}'
56 matches.append(RuleMatch(getatt, message.format(resname, restype, getatt[1])))
57
58 return matches
59
[end of src/cfnlint/rules/functions/GetAtt.py]
[start of src/cfnlint/decode/cfn_yaml.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import logging
18 import six
19 from yaml.composer import Composer
20 from yaml.reader import Reader
21 from yaml.scanner import Scanner
22 from yaml.resolver import Resolver
23 from yaml import ScalarNode
24 from yaml import SequenceNode
25 from yaml import MappingNode
26 from yaml.constructor import SafeConstructor
27 from yaml.constructor import ConstructorError
28 import cfnlint
29 from cfnlint.decode.node import str_node, dict_node, list_node
30
31 try:
32 from yaml.cyaml import CParser as Parser # pylint: disable=ungrouped-imports
33 cyaml = True
34 except ImportError:
35 from yaml.parser import Parser # pylint: disable=ungrouped-imports
36 cyaml = False
37
38 UNCONVERTED_SUFFIXES = ['Ref', 'Condition']
39 FN_PREFIX = 'Fn::'
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class CfnParseError(ConstructorError):
45 """
46 Error thrown when the template contains Cfn Error
47 """
48 def __init__(self, filename, message, line_number, column_number, key=' '):
49
50 # Call the base class constructor with the parameters it needs
51 super(CfnParseError, self).__init__(message)
52
53 # Now for your custom code...
54 self.filename = filename
55 self.line_number = line_number
56 self.column_number = column_number
57 self.message = message
58 self.match = cfnlint.Match(
59 line_number + 1, column_number + 1, line_number + 1,
60 column_number + 1 + len(key), filename, cfnlint.ParseError(), message=message)
61
62
63 class NodeConstructor(SafeConstructor):
64 """
65 Node Constructors for loading different types in Yaml
66 """
67
68 def __init__(self, filename):
69 # Call the base class constructor
70 super(NodeConstructor, self).__init__()
71
72 self.filename = filename
73
74 # To support lazy loading, the original constructors first yield
75 # an empty object, then fill them in when iterated. Due to
76 # laziness we omit this behaviour (and will only do "deep
77 # construction") by first exhausting iterators, then yielding
78 # copies.
79 def construct_yaml_map(self, node):
80
81 # Check for duplicate keys on the current level, this is not desirable
82 # because a dict does not support this. It overwrites it with the last
83 # occurance, which can give unexpected results
84 mapping = {}
85 for key_node, value_node in node.value:
86 key = self.construct_object(key_node, False)
87 value = self.construct_object(value_node, False)
88
89 if key in mapping:
90 raise CfnParseError(
91 self.filename,
92 'Duplicate resource found "{}" (line {})'.format(key, key_node.start_mark.line + 1),
93 key_node.start_mark.line, key_node.start_mark.column, key)
94 mapping[key] = value
95
96 obj, = SafeConstructor.construct_yaml_map(self, node)
97 return dict_node(obj, node.start_mark, node.end_mark)
98
99 def construct_yaml_str(self, node):
100 obj = SafeConstructor.construct_yaml_str(self, node)
101 assert isinstance(obj, (six.string_types))
102 return str_node(obj, node.start_mark, node.end_mark)
103
104 def construct_yaml_seq(self, node):
105 obj, = SafeConstructor.construct_yaml_seq(self, node)
106 assert isinstance(obj, list)
107 return list_node(obj, node.start_mark, node.end_mark)
108
109 def construct_yaml_null_error(self, node):
110 """Throw a null error"""
111 raise CfnParseError(
112 self.filename,
113 'Null value at line {0} column {1}'.format(node.start_mark.line, node.start_mark.column),
114 node.start_mark.line, node.start_mark.column, ' ')
115
116
117 NodeConstructor.add_constructor(
118 u'tag:yaml.org,2002:map',
119 NodeConstructor.construct_yaml_map)
120
121 NodeConstructor.add_constructor(
122 u'tag:yaml.org,2002:str',
123 NodeConstructor.construct_yaml_str)
124
125 NodeConstructor.add_constructor(
126 u'tag:yaml.org,2002:seq',
127 NodeConstructor.construct_yaml_seq)
128
129 NodeConstructor.add_constructor(
130 u'tag:yaml.org,2002:null',
131 NodeConstructor.construct_yaml_null_error)
132
133
134 class MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):
135 """
136 Class for marked loading YAML
137 """
138 # pylint: disable=non-parent-init-called,super-init-not-called
139 def __init__(self, stream, filename):
140 Reader.__init__(self, stream)
141 Scanner.__init__(self)
142 if cyaml:
143 Parser.__init__(self, stream)
144 else:
145 Parser.__init__(self)
146 Composer.__init__(self)
147 SafeConstructor.__init__(self)
148 Resolver.__init__(self)
149 NodeConstructor.__init__(self, filename)
150
151
152 def multi_constructor(loader, tag_suffix, node):
153 """
154 Deal with !Ref style function format
155 """
156
157 if tag_suffix not in UNCONVERTED_SUFFIXES:
158 tag_suffix = '{}{}'.format(FN_PREFIX, tag_suffix)
159
160 constructor = None
161 if tag_suffix == 'Fn::GetAtt':
162 constructor = construct_getatt
163 elif isinstance(node, ScalarNode):
164 constructor = loader.construct_scalar
165 elif isinstance(node, SequenceNode):
166 constructor = loader.construct_sequence
167 elif isinstance(node, MappingNode):
168 constructor = loader.construct_mapping
169 else:
170 raise 'Bad tag: !{}'.format(tag_suffix)
171
172 return dict_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark)
173
174
175 def construct_getatt(node):
176 """
177 Reconstruct !GetAtt into a list
178 """
179 if isinstance(node.value, (six.string_types)):
180 return list_node(node.value.split('.'), node.start_mark, node.end_mark)
181 if isinstance(node.value, list):
182 return list_node([s.value for s in node.value], node.start_mark, node.end_mark)
183
184 raise ValueError('Unexpected node type: {}'.format(type(node.value)))
185
186
187 def loads(yaml_string, fname=None):
188 """
189 Load the given YAML string
190 """
191 loader = MarkedLoader(yaml_string, fname)
192 loader.add_multi_constructor('!', multi_constructor)
193 template = loader.get_single_data()
194 # Convert an empty file to an empty dict
195 if template is None:
196 template = {}
197
198 return template
199
200
201 def load(filename):
202 """
203 Load the given YAML file
204 """
205 with open(filename) as fp:
206 return loads(fp.read(), filename)
207
[end of src/cfnlint/decode/cfn_yaml.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/decode/cfn_yaml.py b/src/cfnlint/decode/cfn_yaml.py
--- a/src/cfnlint/decode/cfn_yaml.py
+++ b/src/cfnlint/decode/cfn_yaml.py
@@ -176,6 +176,7 @@
"""
Reconstruct !GetAtt into a list
"""
+
if isinstance(node.value, (six.string_types)):
return list_node(node.value.split('.'), node.start_mark, node.end_mark)
if isinstance(node.value, list):
diff --git a/src/cfnlint/rules/functions/GetAtt.py b/src/cfnlint/rules/functions/GetAtt.py
--- a/src/cfnlint/rules/functions/GetAtt.py
+++ b/src/cfnlint/rules/functions/GetAtt.py
@@ -14,6 +14,7 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
+import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
import cfnlint.helpers
@@ -44,8 +45,12 @@
message = 'Invalid GetAtt for {0}'
matches.append(RuleMatch(getatt, message.format('/'.join(map(str, getatt[:-1])))))
continue
- resname = getatt[-1][0]
- restype = '.'.join(getatt[-1][1:])
+ if isinstance(getatt[-1], six.string_types):
+ resname, restype = getatt[-1].split('.')
+ else:
+ resname = getatt[-1][0]
+ restype = '.'.join(getatt[-1][1:])
+
if resname in valid_getatts:
if restype not in valid_getatts[resname] and '*' not in valid_getatts[resname]:
message = 'Invalid GetAtt {0}.{1} for resource {2}'
| {"golden_diff": "diff --git a/src/cfnlint/decode/cfn_yaml.py b/src/cfnlint/decode/cfn_yaml.py\n--- a/src/cfnlint/decode/cfn_yaml.py\n+++ b/src/cfnlint/decode/cfn_yaml.py\n@@ -176,6 +176,7 @@\n \"\"\"\n Reconstruct !GetAtt into a list\n \"\"\"\n+\n if isinstance(node.value, (six.string_types)):\n return list_node(node.value.split('.'), node.start_mark, node.end_mark)\n if isinstance(node.value, list):\ndiff --git a/src/cfnlint/rules/functions/GetAtt.py b/src/cfnlint/rules/functions/GetAtt.py\n--- a/src/cfnlint/rules/functions/GetAtt.py\n+++ b/src/cfnlint/rules/functions/GetAtt.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n import cfnlint.helpers\n@@ -44,8 +45,12 @@\n message = 'Invalid GetAtt for {0}'\n matches.append(RuleMatch(getatt, message.format('/'.join(map(str, getatt[:-1])))))\n continue\n- resname = getatt[-1][0]\n- restype = '.'.join(getatt[-1][1:])\n+ if isinstance(getatt[-1], six.string_types):\n+ resname, restype = getatt[-1].split('.')\n+ else:\n+ resname = getatt[-1][0]\n+ restype = '.'.join(getatt[-1][1:])\n+\n if resname in valid_getatts:\n if restype not in valid_getatts[resname] and '*' not in valid_getatts[resname]:\n message = 'Invalid GetAtt {0}.{1} for resource {2}'\n", "issue": "E1010 Invalid GetAtt error message could be better\n*cfn-lint version: (`cfn-lint --version`)* 0.7.1\r\n\r\nCode:\r\n```yaml\r\n ElasticSearchHostname:\r\n Value:\r\n Fn::GetAtt: ElasticSearchDomain.DomainEndpoint\r\n```\r\n\r\n(I mixed short and long form)\r\n\r\nCurrent:\r\n```\r\nE1010 Invalid GetAtt E.l.a.s.t.i.c.S.e.a.r.c.h.D.o.m.a.i.n...D.o.m.a.i.n.E.n.d.p.o.i.n.t for resource ElasticSearchHostname\r\ncfn/x.cfn.yaml:342:7\r\n```\r\n\r\nBetter:\r\n```\r\nE1010 GetAtt expects an array of length 2, not String for resource ElasticSearchHostname\r\n```\r\n\r\n(nb, this is also an error in an *output*, not *resource*, but I didn't even notice that until filing the bug report. I guess my eyes skip over everything except the line number)\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass GetAtt(CloudFormationLintRule):\n \"\"\"Check if GetAtt values are correct\"\"\"\n id = 'E1010'\n shortdesc = 'GetAtt validation of parameters'\n description = 'Validates that GetAtt parameters are to valid resources and properties of those resources'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getatt.html'\n tags = ['functions', 'getatt']\n\n def __init__(self):\n resourcespecs = cfnlint.helpers.RESOURCE_SPECS['us-east-1']\n self.resourcetypes = resourcespecs['ResourceTypes']\n self.propertytypes = resourcespecs['PropertyTypes']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation GetAtt\"\"\"\n\n matches = []\n\n getatts = cfn.search_deep_keys('Fn::GetAtt')\n valid_getatts = cfn.get_valid_getatts()\n for getatt in getatts:\n if len(getatt[-1]) < 2:\n message = 'Invalid GetAtt for {0}'\n matches.append(RuleMatch(getatt, message.format('/'.join(map(str, getatt[:-1])))))\n continue\n resname = getatt[-1][0]\n restype = '.'.join(getatt[-1][1:])\n if resname in valid_getatts:\n if restype not in valid_getatts[resname] and '*' not in valid_getatts[resname]:\n message = 'Invalid GetAtt {0}.{1} for resource {2}'\n matches.append(RuleMatch(\n getatt[:-1], message.format(resname, restype, getatt[1])))\n else:\n message = 'Invalid GetAtt {0}.{1} for resource {2}'\n matches.append(RuleMatch(getatt, message.format(resname, restype, getatt[1])))\n\n return matches\n", "path": "src/cfnlint/rules/functions/GetAtt.py"}, {"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport six\nfrom yaml.composer import Composer\nfrom yaml.reader import Reader\nfrom yaml.scanner import Scanner\nfrom yaml.resolver import Resolver\nfrom yaml import ScalarNode\nfrom yaml import SequenceNode\nfrom yaml import MappingNode\nfrom yaml.constructor import SafeConstructor\nfrom yaml.constructor import ConstructorError\nimport cfnlint\nfrom cfnlint.decode.node import str_node, dict_node, list_node\n\ntry:\n from yaml.cyaml import CParser as Parser # pylint: disable=ungrouped-imports\n cyaml = True\nexcept ImportError:\n from yaml.parser import Parser # pylint: disable=ungrouped-imports\n cyaml = False\n\nUNCONVERTED_SUFFIXES = ['Ref', 'Condition']\nFN_PREFIX = 'Fn::'\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass CfnParseError(ConstructorError):\n \"\"\"\n Error thrown when the template contains Cfn Error\n \"\"\"\n def __init__(self, filename, message, line_number, column_number, key=' '):\n\n # Call the base class constructor with the parameters it needs\n super(CfnParseError, self).__init__(message)\n\n # Now for your custom code...\n self.filename = filename\n self.line_number = line_number\n self.column_number = column_number\n self.message = message\n self.match = cfnlint.Match(\n line_number + 1, column_number + 1, line_number + 1,\n column_number + 1 + len(key), filename, cfnlint.ParseError(), message=message)\n\n\nclass NodeConstructor(SafeConstructor):\n \"\"\"\n Node Constructors for loading different types in Yaml\n \"\"\"\n\n def __init__(self, filename):\n # Call the base class constructor\n super(NodeConstructor, self).__init__()\n\n self.filename = filename\n\n # To support lazy loading, the original constructors first yield\n # an empty object, then fill them in when iterated. Due to\n # laziness we omit this behaviour (and will only do \"deep\n # construction\") by first exhausting iterators, then yielding\n # copies.\n def construct_yaml_map(self, node):\n\n # Check for duplicate keys on the current level, this is not desirable\n # because a dict does not support this. It overwrites it with the last\n # occurance, which can give unexpected results\n mapping = {}\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, False)\n value = self.construct_object(value_node, False)\n\n if key in mapping:\n raise CfnParseError(\n self.filename,\n 'Duplicate resource found \"{}\" (line {})'.format(key, key_node.start_mark.line + 1),\n key_node.start_mark.line, key_node.start_mark.column, key)\n mapping[key] = value\n\n obj, = SafeConstructor.construct_yaml_map(self, node)\n return dict_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_str(self, node):\n obj = SafeConstructor.construct_yaml_str(self, node)\n assert isinstance(obj, (six.string_types))\n return str_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_seq(self, node):\n obj, = SafeConstructor.construct_yaml_seq(self, node)\n assert isinstance(obj, list)\n return list_node(obj, node.start_mark, node.end_mark)\n\n def construct_yaml_null_error(self, node):\n \"\"\"Throw a null error\"\"\"\n raise CfnParseError(\n self.filename,\n 'Null value at line {0} column {1}'.format(node.start_mark.line, node.start_mark.column),\n node.start_mark.line, node.start_mark.column, ' ')\n\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:map',\n NodeConstructor.construct_yaml_map)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:str',\n NodeConstructor.construct_yaml_str)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:seq',\n NodeConstructor.construct_yaml_seq)\n\nNodeConstructor.add_constructor(\n u'tag:yaml.org,2002:null',\n NodeConstructor.construct_yaml_null_error)\n\n\nclass MarkedLoader(Reader, Scanner, Parser, Composer, NodeConstructor, Resolver):\n \"\"\"\n Class for marked loading YAML\n \"\"\"\n # pylint: disable=non-parent-init-called,super-init-not-called\n def __init__(self, stream, filename):\n Reader.__init__(self, stream)\n Scanner.__init__(self)\n if cyaml:\n Parser.__init__(self, stream)\n else:\n Parser.__init__(self)\n Composer.__init__(self)\n SafeConstructor.__init__(self)\n Resolver.__init__(self)\n NodeConstructor.__init__(self, filename)\n\n\ndef multi_constructor(loader, tag_suffix, node):\n \"\"\"\n Deal with !Ref style function format\n \"\"\"\n\n if tag_suffix not in UNCONVERTED_SUFFIXES:\n tag_suffix = '{}{}'.format(FN_PREFIX, tag_suffix)\n\n constructor = None\n if tag_suffix == 'Fn::GetAtt':\n constructor = construct_getatt\n elif isinstance(node, ScalarNode):\n constructor = loader.construct_scalar\n elif isinstance(node, SequenceNode):\n constructor = loader.construct_sequence\n elif isinstance(node, MappingNode):\n constructor = loader.construct_mapping\n else:\n raise 'Bad tag: !{}'.format(tag_suffix)\n\n return dict_node({tag_suffix: constructor(node)}, node.start_mark, node.end_mark)\n\n\ndef construct_getatt(node):\n \"\"\"\n Reconstruct !GetAtt into a list\n \"\"\"\n if isinstance(node.value, (six.string_types)):\n return list_node(node.value.split('.'), node.start_mark, node.end_mark)\n if isinstance(node.value, list):\n return list_node([s.value for s in node.value], node.start_mark, node.end_mark)\n\n raise ValueError('Unexpected node type: {}'.format(type(node.value)))\n\n\ndef loads(yaml_string, fname=None):\n \"\"\"\n Load the given YAML string\n \"\"\"\n loader = MarkedLoader(yaml_string, fname)\n loader.add_multi_constructor('!', multi_constructor)\n template = loader.get_single_data()\n # Convert an empty file to an empty dict\n if template is None:\n template = {}\n\n return template\n\n\ndef load(filename):\n \"\"\"\n Load the given YAML file\n \"\"\"\n with open(filename) as fp:\n return loads(fp.read(), filename)\n", "path": "src/cfnlint/decode/cfn_yaml.py"}]} | 3,639 | 426 |
gh_patches_debug_2185 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-913 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setuptools as dependency is problematic w/ pip-tools
https://github.com/googleapis/google-auth-library-python/commit/908da752d01fef728bd5cb3eb5b13f2b5c335e51 (#322) added `setuptools` as a dependency in this package. However, the [pip-tools](https://github.com/jazzband/pip-tools) package that's commonly used for pinning dependencies considers `setuptools` an unsafe dependency to have in a project at all (as discussed in #492), and as such doesn't save it in the pinned requirements file at all.
Since `google-auth` depends on Setuptools but a version couldn't have been pinned in the requirements, we're seeing
```
Collecting setuptools>=40.3.0 (from google-auth==1.19.1->our-proprietary-package==0.31.1)
Downloading https://files.pythonhosted.org/packages/b0/8b/379494d7dbd3854aa7b85b216cb0af54edcb7fce7d086ba3e35522a713cf/setuptools-50.0.0-py3-none-any.whl (783kB)
```
which wreaks havoc on Ubuntu 16.04 + Python 3.5 machines due to https://github.com/pypa/setuptools/issues/2352 / https://github.com/pypa/setuptools/issues/2350 / https://github.com/pypa/setuptools/issues/2356 ...
The workaround is to add `--allow-unsafe` or manually pin `setuptools`, but is the requirement _actually_ necessary in this package? No other package in the 48-line `requirements.txt` for this particular project would have required a version of `setuptools`.
#### Environment details
- OS: Ubuntu 16.04
- Python version: 3.5
- pip version: irrelevant
- `google-auth` version: 1.19.1
#### Steps to reproduce
1. Install `google-auth` on an Ubuntu 16.04 machine
2. It installs `setuptools==50.0.0`
3. https://github.com/pypa/setuptools/issues/2352 and friends
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 from setuptools import find_packages
19 from setuptools import setup
20
21
22 DEPENDENCIES = (
23 "cachetools>=2.0.0,<5.0",
24 "pyasn1-modules>=0.2.1",
25 # rsa==4.5 is the last version to support 2.7
26 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
27 'rsa<4.6; python_version < "3.6"',
28 'rsa>=3.1.4,<5; python_version >= "3.6"',
29 # install enum34 to support 2.7. enum34 only works up to python version 3.3.
30 'enum34>=1.1.10; python_version < "3.4"',
31 "setuptools>=40.3.0",
32 "six>=1.9.0",
33 )
34
35 extras = {
36 "aiohttp": [
37 "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'",
38 "requests >= 2.20.0, < 3.0.0dev",
39 ],
40 "pyopenssl": "pyopenssl>=20.0.0",
41 "reauth": "pyu2f>=0.1.5",
42 }
43
44 with io.open("README.rst", "r") as fh:
45 long_description = fh.read()
46
47 package_root = os.path.abspath(os.path.dirname(__file__))
48
49 version = {}
50 with open(os.path.join(package_root, "google/auth/version.py")) as fp:
51 exec(fp.read(), version)
52 version = version["__version__"]
53
54 setup(
55 name="google-auth",
56 version=version,
57 author="Google Cloud Platform",
58 author_email="[email protected]",
59 description="Google Authentication Library",
60 long_description=long_description,
61 url="https://github.com/googleapis/google-auth-library-python",
62 packages=find_packages(exclude=("tests*", "system_tests*")),
63 namespace_packages=("google",),
64 install_requires=DEPENDENCIES,
65 extras_require=extras,
66 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
67 license="Apache 2.0",
68 keywords="google auth oauth client",
69 classifiers=[
70 "Programming Language :: Python :: 3",
71 "Programming Language :: Python :: 3.6",
72 "Programming Language :: Python :: 3.7",
73 "Programming Language :: Python :: 3.8",
74 "Programming Language :: Python :: 3.9",
75 "Programming Language :: Python :: 3.10",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: POSIX",
80 "Operating System :: Microsoft :: Windows",
81 "Operating System :: MacOS :: MacOS X",
82 "Operating System :: OS Independent",
83 "Topic :: Internet :: WWW/HTTP",
84 ],
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,6 @@
'rsa>=3.1.4,<5; python_version >= "3.6"',
# install enum34 to support 2.7. enum34 only works up to python version 3.3.
'enum34>=1.1.10; python_version < "3.4"',
- "setuptools>=40.3.0",
"six>=1.9.0",
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,6 @@\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n- \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n )\n", "issue": "Setuptools as dependency is problematic w/ pip-tools\nhttps://github.com/googleapis/google-auth-library-python/commit/908da752d01fef728bd5cb3eb5b13f2b5c335e51 (#322) added `setuptools` as a dependency in this package. However, the [pip-tools](https://github.com/jazzband/pip-tools) package that's commonly used for pinning dependencies considers `setuptools` an unsafe dependency to have in a project at all (as discussed in #492), and as such doesn't save it in the pinned requirements file at all.\r\n\r\nSince `google-auth` depends on Setuptools but a version couldn't have been pinned in the requirements, we're seeing\r\n\r\n```\r\nCollecting setuptools>=40.3.0 (from google-auth==1.19.1->our-proprietary-package==0.31.1)\r\n Downloading https://files.pythonhosted.org/packages/b0/8b/379494d7dbd3854aa7b85b216cb0af54edcb7fce7d086ba3e35522a713cf/setuptools-50.0.0-py3-none-any.whl (783kB)\r\n```\r\n\r\nwhich wreaks havoc on Ubuntu 16.04 + Python 3.5 machines due to https://github.com/pypa/setuptools/issues/2352 / https://github.com/pypa/setuptools/issues/2350 / https://github.com/pypa/setuptools/issues/2356 ...\r\n\r\nThe workaround is to add `--allow-unsafe` or manually pin `setuptools`, but is the requirement _actually_ necessary in this package? No other package in the 48-line `requirements.txt` for this particular project would have required a version of `setuptools`.\r\n\r\n#### Environment details\r\n\r\n - OS: Ubuntu 16.04\r\n - Python version: 3.5\r\n - pip version: irrelevant\r\n - `google-auth` version: 1.19.1\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Install `google-auth` on an Ubuntu 16.04 machine\r\n 2. It installs `setuptools==50.0.0`\r\n 3. https://github.com/pypa/setuptools/issues/2352 and friends\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"requests >= 2.20.0, < 3.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 2,045 | 124 |
gh_patches_debug_15572 | rasdani/github-patches | git_diff | scrapy__scrapy-4042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error 302 redirection with headers location starts with 3 slash
### Description
when the 302 response return a headers's location startswith 3 slash, the scrapy redirect to a url different from what the browser do.
### Steps to Reproduce
1. scrapy shell https://www.hjenglish.com/new/p1285798/
**Expected behavior:**
redirect to `https://fr.hujiang.com/new/p1285798/` as browser `Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36` do.
**Actual behavior:**
redirct to `https://www.hjenglish.com/fr.hujiang.com/new/p1285798`
**Reproduces how often:**
everytime
### Versions
Scrapy : 1.7.3
lxml : 4.3.2.0
libxml2 : 2.9.9
cssselect : 1.1.0
parsel : 1.5.2
w3lib : 1.20.0
Twisted : 19.7.0
Python : 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)]
pyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019)
cryptography : 2.6.1
Platform : Windows-10-10.0.17134-SP0
### Additional context
I check the defination of [Location in rfc](https://tools.ietf.org/html/rfc7231#section-7.1.2) and end with [reference resolution](https://tools.ietf.org/html/rfc3986#section-5.3). But I fail to findout how to resolve the Location startswith `///`. So I don't know why Chrome did so.
The behavior of scrapy is determined by [redirect.py#L73](https://github.com/scrapy/scrapy/blob/master/scrapy/downloadermiddlewares/redirect.py#L73), which will truncate `///` to `/`。
I'm wandering the differents betweent scarpy and browser...
</issue>
<code>
[start of scrapy/downloadermiddlewares/redirect.py]
1 import logging
2 from six.moves.urllib.parse import urljoin
3
4 from w3lib.url import safe_url_string
5
6 from scrapy.http import HtmlResponse
7 from scrapy.utils.response import get_meta_refresh
8 from scrapy.exceptions import IgnoreRequest, NotConfigured
9
10 logger = logging.getLogger(__name__)
11
12
13 class BaseRedirectMiddleware(object):
14
15 enabled_setting = 'REDIRECT_ENABLED'
16
17 def __init__(self, settings):
18 if not settings.getbool(self.enabled_setting):
19 raise NotConfigured
20
21 self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
22 self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
23
24 @classmethod
25 def from_crawler(cls, crawler):
26 return cls(crawler.settings)
27
28 def _redirect(self, redirected, request, spider, reason):
29 ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
30 redirects = request.meta.get('redirect_times', 0) + 1
31
32 if ttl and redirects <= self.max_redirect_times:
33 redirected.meta['redirect_times'] = redirects
34 redirected.meta['redirect_ttl'] = ttl - 1
35 redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
36 [request.url]
37 redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \
38 [reason]
39 redirected.dont_filter = request.dont_filter
40 redirected.priority = request.priority + self.priority_adjust
41 logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
42 {'reason': reason, 'redirected': redirected, 'request': request},
43 extra={'spider': spider})
44 return redirected
45 else:
46 logger.debug("Discarding %(request)s: max redirections reached",
47 {'request': request}, extra={'spider': spider})
48 raise IgnoreRequest("max redirections reached")
49
50 def _redirect_request_using_get(self, request, redirect_url):
51 redirected = request.replace(url=redirect_url, method='GET', body='')
52 redirected.headers.pop('Content-Type', None)
53 redirected.headers.pop('Content-Length', None)
54 return redirected
55
56
57 class RedirectMiddleware(BaseRedirectMiddleware):
58 """
59 Handle redirection of requests based on response status
60 and meta-refresh html tag.
61 """
62 def process_response(self, request, response, spider):
63 if (request.meta.get('dont_redirect', False) or
64 response.status in getattr(spider, 'handle_httpstatus_list', []) or
65 response.status in request.meta.get('handle_httpstatus_list', []) or
66 request.meta.get('handle_httpstatus_all', False)):
67 return response
68
69 allowed_status = (301, 302, 303, 307, 308)
70 if 'Location' not in response.headers or response.status not in allowed_status:
71 return response
72
73 location = safe_url_string(response.headers['location'])
74
75 redirected_url = urljoin(request.url, location)
76
77 if response.status in (301, 307, 308) or request.method == 'HEAD':
78 redirected = request.replace(url=redirected_url)
79 return self._redirect(redirected, request, spider, response.status)
80
81 redirected = self._redirect_request_using_get(request, redirected_url)
82 return self._redirect(redirected, request, spider, response.status)
83
84
85 class MetaRefreshMiddleware(BaseRedirectMiddleware):
86
87 enabled_setting = 'METAREFRESH_ENABLED'
88
89 def __init__(self, settings):
90 super(MetaRefreshMiddleware, self).__init__(settings)
91 self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')
92 self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
93 settings.getint('METAREFRESH_MAXDELAY'))
94
95 def process_response(self, request, response, spider):
96 if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
97 not isinstance(response, HtmlResponse):
98 return response
99
100 interval, url = get_meta_refresh(response,
101 ignore_tags=self._ignore_tags)
102 if url and interval < self._maxdelay:
103 redirected = self._redirect_request_using_get(request, url)
104 return self._redirect(redirected, request, spider, 'meta refresh')
105
106 return response
107
[end of scrapy/downloadermiddlewares/redirect.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py
--- a/scrapy/downloadermiddlewares/redirect.py
+++ b/scrapy/downloadermiddlewares/redirect.py
@@ -1,5 +1,5 @@
import logging
-from six.moves.urllib.parse import urljoin
+from six.moves.urllib.parse import urljoin, urlparse
from w3lib.url import safe_url_string
@@ -70,7 +70,10 @@
if 'Location' not in response.headers or response.status not in allowed_status:
return response
- location = safe_url_string(response.headers['location'])
+ location = safe_url_string(response.headers['Location'])
+ if response.headers['Location'].startswith(b'//'):
+ request_scheme = urlparse(request.url).scheme
+ location = request_scheme + '://' + location.lstrip('/')
redirected_url = urljoin(request.url, location)
| {"golden_diff": "diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py\n--- a/scrapy/downloadermiddlewares/redirect.py\n+++ b/scrapy/downloadermiddlewares/redirect.py\n@@ -1,5 +1,5 @@\n import logging\n-from six.moves.urllib.parse import urljoin\n+from six.moves.urllib.parse import urljoin, urlparse\n \n from w3lib.url import safe_url_string\n \n@@ -70,7 +70,10 @@\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n \n- location = safe_url_string(response.headers['location'])\n+ location = safe_url_string(response.headers['Location'])\n+ if response.headers['Location'].startswith(b'//'):\n+ request_scheme = urlparse(request.url).scheme\n+ location = request_scheme + '://' + location.lstrip('/')\n \n redirected_url = urljoin(request.url, location)\n", "issue": "Error 302 redirection with headers location starts with 3 slash\n\r\n### Description\r\n\r\nwhen the 302 response return a headers's location startswith 3 slash, the scrapy redirect to a url different from what the browser do.\r\n\r\n### Steps to Reproduce\r\n\r\n1. scrapy shell https://www.hjenglish.com/new/p1285798/\r\n\r\n**Expected behavior:** \r\nredirect to `https://fr.hujiang.com/new/p1285798/` as browser `Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36` do.\r\n\r\n\r\n**Actual behavior:** \r\nredirct to `https://www.hjenglish.com/fr.hujiang.com/new/p1285798`\r\n\r\n**Reproduces how often:** \r\n\r\neverytime\r\n\r\n### Versions\r\nScrapy : 1.7.3\r\nlxml : 4.3.2.0\r\nlibxml2 : 2.9.9\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.20.0\r\nTwisted : 19.7.0\r\nPython : 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019)\r\ncryptography : 2.6.1\r\nPlatform : Windows-10-10.0.17134-SP0\r\n\r\n\r\n### Additional context\r\n\r\nI check the defination of [Location in rfc](https://tools.ietf.org/html/rfc7231#section-7.1.2) and end with [reference resolution](https://tools.ietf.org/html/rfc3986#section-5.3). But I fail to findout how to resolve the Location startswith `///`. So I don't know why Chrome did so.\r\n\r\nThe behavior of scrapy is determined by [redirect.py#L73](https://github.com/scrapy/scrapy/blob/master/scrapy/downloadermiddlewares/redirect.py#L73), which will truncate `///` to `/`\u3002\r\n\r\nI'm wandering the differents betweent scarpy and browser...\r\n\n", "before_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \\\n [reason]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['location'])\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response,\n ignore_tags=self._ignore_tags)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}]} | 2,241 | 198 |
gh_patches_debug_19543 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-1944 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[V3] Dev commands and discord.ext.commands
Right now, debug/eval (and repl, maybe?) expose `discord.ext.commands` as `commands` Given that we subclass various things in this for our use in `redbot.core.commands`, shouldn't we be exposing our version? In either case, the other is still importable, but it seems our default should be to use our version.
</issue>
<code>
[start of redbot/core/dev_commands.py]
1 import asyncio
2 import inspect
3 import io
4 import textwrap
5 import traceback
6 from contextlib import redirect_stdout
7 from copy import copy
8
9 import discord
10 from . import checks, commands
11 from .i18n import Translator
12 from .utils.chat_formatting import box, pagify
13
14 """
15 Notice:
16
17 95% of the below code came from R.Danny which can be found here:
18
19 https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
20 """
21
22 _ = Translator("Dev", __file__)
23
24
25 class Dev:
26 """Various development focused utilities."""
27
28 def __init__(self):
29 self._last_result = None
30 self.sessions = set()
31
32 @staticmethod
33 def cleanup_code(content):
34 """Automatically removes code blocks from the code."""
35 # remove ```py\n```
36 if content.startswith("```") and content.endswith("```"):
37 return "\n".join(content.split("\n")[1:-1])
38
39 # remove `foo`
40 return content.strip("` \n")
41
42 @staticmethod
43 def get_syntax_error(e):
44 """Format a syntax error to send to the user.
45
46 Returns a string representation of the error formatted as a codeblock.
47 """
48 if e.text is None:
49 return box("{0.__class__.__name__}: {0}".format(e), lang="py")
50 return box("{0.text}{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__), lang="py")
51
52 @staticmethod
53 def get_pages(msg: str):
54 """Pagify the given message for output to the user."""
55 return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
56
57 @staticmethod
58 def sanitize_output(ctx: commands.Context, input_: str) -> str:
59 """Hides the bot's token from a string."""
60 token = ctx.bot.http.token
61 r = "[EXPUNGED]"
62 result = input_.replace(token, r)
63 result = result.replace(token.lower(), r)
64 result = result.replace(token.upper(), r)
65 return result
66
67 @commands.command()
68 @checks.is_owner()
69 async def debug(self, ctx, *, code):
70 """Evaluate a statement of python code.
71
72 The bot will always respond with the return value of the code.
73 If the return value of the code is a coroutine, it will be awaited,
74 and the result of that will be the bot's response.
75
76 Note: Only one statement may be evaluated. Using await, yield or
77 similar restricted keywords will result in a syntax error. For multiple
78 lines or asynchronous code, see [p]repl or [p]eval.
79
80 Environment Variables:
81 ctx - command invokation context
82 bot - bot object
83 channel - the current channel object
84 author - command author's member object
85 message - the command's message object
86 discord - discord.py library
87 commands - discord.py commands extension
88 _ - The result of the last dev command.
89 """
90 env = {
91 "bot": ctx.bot,
92 "ctx": ctx,
93 "channel": ctx.channel,
94 "author": ctx.author,
95 "guild": ctx.guild,
96 "message": ctx.message,
97 "discord": discord,
98 "commands": commands,
99 "_": self._last_result,
100 }
101
102 code = self.cleanup_code(code)
103
104 try:
105 result = eval(code, env)
106 except SyntaxError as e:
107 await ctx.send(self.get_syntax_error(e))
108 return
109 except Exception as e:
110 await ctx.send(box("{}: {!s}".format(type(e).__name__, e), lang="py"))
111 return
112
113 if asyncio.iscoroutine(result):
114 result = await result
115
116 self._last_result = result
117
118 result = self.sanitize_output(ctx, str(result))
119
120 await ctx.send_interactive(self.get_pages(result), box_lang="py")
121
122 @commands.command(name="eval")
123 @checks.is_owner()
124 async def _eval(self, ctx, *, body: str):
125 """Execute asynchronous code.
126
127 This command wraps code into the body of an async function and then
128 calls and awaits it. The bot will respond with anything printed to
129 stdout, as well as the return value of the function.
130
131 The code can be within a codeblock, inline code or neither, as long
132 as they are not mixed and they are formatted correctly.
133
134 Environment Variables:
135 ctx - command invokation context
136 bot - bot object
137 channel - the current channel object
138 author - command author's member object
139 message - the command's message object
140 discord - discord.py library
141 commands - discord.py commands extension
142 _ - The result of the last dev command.
143 """
144 env = {
145 "bot": ctx.bot,
146 "ctx": ctx,
147 "channel": ctx.channel,
148 "author": ctx.author,
149 "guild": ctx.guild,
150 "message": ctx.message,
151 "discord": discord,
152 "commands": commands,
153 "_": self._last_result,
154 }
155
156 body = self.cleanup_code(body)
157 stdout = io.StringIO()
158
159 to_compile = "async def func():\n%s" % textwrap.indent(body, " ")
160
161 try:
162 exec(to_compile, env)
163 except SyntaxError as e:
164 return await ctx.send(self.get_syntax_error(e))
165
166 func = env["func"]
167 result = None
168 try:
169 with redirect_stdout(stdout):
170 result = await func()
171 except:
172 printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
173 else:
174 printed = stdout.getvalue()
175 await ctx.tick()
176
177 if result is not None:
178 self._last_result = result
179 msg = "{}{}".format(printed, result)
180 else:
181 msg = printed
182 msg = self.sanitize_output(ctx, msg)
183
184 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
185
186 @commands.command()
187 @checks.is_owner()
188 async def repl(self, ctx):
189 """Open an interactive REPL.
190
191 The REPL will only recognise code as messages which start with a
192 backtick. This includes codeblocks, and as such multiple lines can be
193 evaluated.
194
195 You may not await any code in this REPL unless you define it inside an
196 async function.
197 """
198 variables = {
199 "ctx": ctx,
200 "bot": ctx.bot,
201 "message": ctx.message,
202 "guild": ctx.guild,
203 "channel": ctx.channel,
204 "author": ctx.author,
205 "_": None,
206 }
207
208 if ctx.channel.id in self.sessions:
209 await ctx.send(
210 _("Already running a REPL session in this channel. Exit it with `quit`.")
211 )
212 return
213
214 self.sessions.add(ctx.channel.id)
215 await ctx.send(_("Enter code to execute or evaluate. `exit()` or `quit` to exit."))
216
217 msg_check = lambda m: (
218 m.author == ctx.author and m.channel == ctx.channel and m.content.startswith("`")
219 )
220
221 while True:
222 response = await ctx.bot.wait_for("message", check=msg_check)
223
224 cleaned = self.cleanup_code(response.content)
225
226 if cleaned in ("quit", "exit", "exit()"):
227 await ctx.send("Exiting.")
228 self.sessions.remove(ctx.channel.id)
229 return
230
231 executor = exec
232 if cleaned.count("\n") == 0:
233 # single statement, potentially 'eval'
234 try:
235 code = compile(cleaned, "<repl session>", "eval")
236 except SyntaxError:
237 pass
238 else:
239 executor = eval
240
241 if executor is exec:
242 try:
243 code = compile(cleaned, "<repl session>", "exec")
244 except SyntaxError as e:
245 await ctx.send(self.get_syntax_error(e))
246 continue
247
248 variables["message"] = response
249
250 stdout = io.StringIO()
251
252 msg = ""
253
254 try:
255 with redirect_stdout(stdout):
256 result = executor(code, variables)
257 if inspect.isawaitable(result):
258 result = await result
259 except:
260 value = stdout.getvalue()
261 msg = "{}{}".format(value, traceback.format_exc())
262 else:
263 value = stdout.getvalue()
264 if result is not None:
265 msg = "{}{}".format(value, result)
266 variables["_"] = result
267 elif value:
268 msg = "{}".format(value)
269
270 msg = self.sanitize_output(ctx, msg)
271
272 try:
273 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
274 except discord.Forbidden:
275 pass
276 except discord.HTTPException as e:
277 await ctx.send(_("Unexpected error: `{}`").format(e))
278
279 @commands.command()
280 @checks.is_owner()
281 async def mock(self, ctx, user: discord.Member, *, command):
282 """Mock another user invoking a command.
283
284 The prefix must not be entered.
285 """
286 msg = copy(ctx.message)
287 msg.author = user
288 msg.content = ctx.prefix + command
289
290 ctx.bot.dispatch("message", msg)
291
292 @commands.command(name="mockmsg")
293 @checks.is_owner()
294 async def mock_msg(self, ctx, user: discord.Member, *, content: str):
295 """Dispatch a message event as if it were sent by a different user.
296
297 Only reads the raw content of the message. Attachments, embeds etc. are
298 ignored.
299 """
300 old_author = ctx.author
301 old_content = ctx.message.content
302 ctx.message.author = user
303 ctx.message.content = content
304
305 ctx.bot.dispatch("message", ctx.message)
306
307 # If we change the author and content back too quickly,
308 # the bot won't process the mocked message in time.
309 await asyncio.sleep(2)
310 ctx.message.author = old_author
311 ctx.message.content = old_content
312
[end of redbot/core/dev_commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py
--- a/redbot/core/dev_commands.py
+++ b/redbot/core/dev_commands.py
@@ -84,7 +84,7 @@
author - command author's member object
message - the command's message object
discord - discord.py library
- commands - discord.py commands extension
+ commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = {
@@ -138,7 +138,7 @@
author - command author's member object
message - the command's message object
discord - discord.py library
- commands - discord.py commands extension
+ commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = {
| {"golden_diff": "diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py\n--- a/redbot/core/dev_commands.py\n+++ b/redbot/core/dev_commands.py\n@@ -84,7 +84,7 @@\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n- commands - discord.py commands extension\n+ commands - redbot.core.commands\n _ - The result of the last dev command.\n \"\"\"\n env = {\n@@ -138,7 +138,7 @@\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n- commands - discord.py commands extension\n+ commands - redbot.core.commands\n _ - The result of the last dev command.\n \"\"\"\n env = {\n", "issue": "[V3] Dev commands and discord.ext.commands\n\r\nRight now, debug/eval (and repl, maybe?) expose `discord.ext.commands` as `commands` Given that we subclass various things in this for our use in `redbot.core.commands`, shouldn't we be exposing our version? In either case, the other is still importable, but it seems our default should be to use our version.\n", "before_files": [{"content": "import asyncio\nimport inspect\nimport io\nimport textwrap\nimport traceback\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\nfrom . import checks, commands\nfrom .i18n import Translator\nfrom .utils.chat_formatting import box, pagify\n\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = Translator(\"Dev\", __file__)\n\n\nclass Dev:\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith(\"```\") and content.endswith(\"```\"):\n return \"\\n\".join(content.split(\"\\n\")[1:-1])\n\n # remove `foo`\n return content.strip(\"` \\n\")\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box(\"{0.__class__.__name__}: {0}\".format(e), lang=\"py\")\n return box(\"{0.text}{1:>{0.offset}}\\n{2}: {0}\".format(e, \"^\", type(e).__name__), lang=\"py\")\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n r = \"[EXPUNGED]\"\n result = input_.replace(token, r)\n result = result.replace(token.lower(), r)\n result = result.replace(token.upper(), r)\n return result\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using await, yield or\n similar restricted keywords will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n \"bot\": ctx.bot,\n \"ctx\": ctx,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"guild\": ctx.guild,\n \"message\": ctx.message,\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n }\n\n code = self.cleanup_code(code)\n\n try:\n result = eval(code, env)\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(box(\"{}: {!s}\".format(type(e).__name__, e), lang=\"py\"))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n self._last_result = result\n\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name=\"eval\")\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n \"bot\": ctx.bot,\n \"ctx\": ctx,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"guild\": ctx.guild,\n \"message\": ctx.message,\n \"discord\": discord,\n \"commands\": commands,\n \"_\": self._last_result,\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = \"async def func():\\n%s\" % textwrap.indent(body, \" \")\n\n try:\n exec(to_compile, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env[\"func\"]\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n\n You may not await any code in this REPL unless you define it inside an\n async function.\n \"\"\"\n variables = {\n \"ctx\": ctx,\n \"bot\": ctx.bot,\n \"message\": ctx.message,\n \"guild\": ctx.guild,\n \"channel\": ctx.channel,\n \"author\": ctx.author,\n \"_\": None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(\n _(\"Already running a REPL session in this channel. Exit it with `quit`.\")\n )\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_(\"Enter code to execute or evaluate. `exit()` or `quit` to exit.\"))\n\n msg_check = lambda m: (\n m.author == ctx.author and m.channel == ctx.channel and m.content.startswith(\"`\")\n )\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=msg_check)\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in (\"quit\", \"exit\", \"exit()\"):\n await ctx.send(\"Exiting.\")\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count(\"\\n\") == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, \"<repl session>\", \"eval\")\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, \"<repl session>\", \"exec\")\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables[\"message\"] = response\n\n stdout = io.StringIO()\n\n msg = \"\"\n\n try:\n with redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables[\"_\"] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_(\"Unexpected error: `{}`\").format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch(\"message\", msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n", "path": "redbot/core/dev_commands.py"}]} | 3,591 | 192 |
gh_patches_debug_44181 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-1851 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] FedOpt algorithm not working as expected in cifar10 example
**Describe the bug**
The FedOpt algorithm is not working as expected in cifar10 example when I change the model from the pre-existing ModerateCNN to another model like MobileNetv2 or Resnet18 and others. The problem is that the accuracy of the global model is not increasing or increasing too slow with the FedOpt algorithm while the other algorithms works just fine even changing the model.
**To Reproduce**
1. Add in 'cifar10_nets.py' the new model :
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
model = models.mobilenet_v2(weights='DEFAULT')
model.classifier = nn.Sequential(
nn.Dropout(0.4),
nn.Linear(1280, 10),
)
self.model = model
def forward(self, x):
return self.model(x)
2. Import and change the model in file 'cifar10_learner.py'
3. Launch the example with `./run_simulator.sh cifar10_fedopt 0.1 8 8`
4. See the results in tensorboard with `tensorboard --logdir=/tmp/nvflare/sim_cifar10` under the section 'val_acc_global_model'
**Expected behavior**
I expect reading the algorithm proposed in Reddi, Sashank, et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020), to obtain the same performance of FedAvg using SGD optimizer with lr = 1.0 and no scheduler. Also obtain better results changing optimizer and adding a scheduler.
**Screenshots**

Purple = FedAvg
Pink = FedOpt
**Desktop (please complete the following information):**
- OS: ubuntu 22.04
- Python Version 3.10
- NVFlare Version 2.3.0
Ty in advance!
</issue>
<code>
[start of nvflare/app_opt/pt/fedopt.py]
1 # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import time
16
17 import torch
18
19 from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
20 from nvflare.apis.event_type import EventType
21 from nvflare.apis.fl_context import FLContext
22 from nvflare.apis.shareable import Shareable
23 from nvflare.app_common.abstract.learnable import Learnable
24 from nvflare.app_common.abstract.model import make_model_learnable
25 from nvflare.app_common.app_constant import AppConstants
26 from nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator
27 from nvflare.security.logging import secure_format_exception
28
29
30 class PTFedOptModelShareableGenerator(FullModelShareableGenerator):
31 def __init__(
32 self,
33 optimizer_args: dict = None,
34 lr_scheduler_args: dict = None,
35 source_model="model",
36 device=None,
37 ):
38 """Implement the FedOpt algorithm.
39
40 The algorithm is proposed in Reddi, Sashank, et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020).
41 This SharableGenerator will update the global model using the specified
42 PyTorch optimizer and learning rate scheduler.
43
44 Args:
45 optimizer_args: dictionary of optimizer arguments, e.g.
46 {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}} (default).
47 lr_scheduler_args: dictionary of server-side learning rate scheduler arguments, e.g.
48 {'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}} (default: None).
49 source_model: either a valid torch model object or a component ID of a torch model object
50 device: specify the device to run server-side optimization, e.g. "cpu" or "cuda:0"
51 (will default to cuda if available and no device is specified).
52
53 Raises:
54 TypeError: when any of input arguments does not have correct type
55 """
56 super().__init__()
57 if not optimizer_args:
58 self.logger("No optimizer_args provided. Using FedOpt with SGD and lr 1.0")
59 optimizer_args = {"name": "SGD", "args": {"lr": 1.0}}
60
61 if not isinstance(optimizer_args, dict):
62 raise TypeError(
63 "optimizer_args must be a dict of format, e.g. {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}}."
64 )
65 if lr_scheduler_args is not None:
66 if not isinstance(lr_scheduler_args, dict):
67 raise TypeError(
68 "optimizer_args must be a dict of format, e.g. "
69 "{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}}."
70 )
71 self.source_model = source_model
72 self.optimizer_args = optimizer_args
73 self.lr_scheduler_args = lr_scheduler_args
74 self.model = None
75 self.optimizer = None
76 self.lr_scheduler = None
77 if device is None:
78 self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
79 else:
80 self.device = torch.device(device)
81 self.optimizer_name = None
82 self.lr_scheduler_name = None
83
84 def _get_component_name(self, component_args):
85 if component_args is not None:
86 name = component_args.get("path", None)
87 if name is None:
88 name = component_args.get("name", None)
89 return name
90 else:
91 return None
92
93 def handle_event(self, event_type: str, fl_ctx: FLContext):
94 if event_type == EventType.START_RUN:
95 # Initialize the optimizer with current global model params
96 engine = fl_ctx.get_engine()
97
98 if isinstance(self.source_model, str):
99 self.model = engine.get_component(self.source_model)
100 else:
101 self.model = self.source_model
102
103 if self.model is None:
104 self.system_panic(
105 "Model is not available",
106 fl_ctx,
107 )
108 return
109 elif not isinstance(self.model, torch.nn.Module):
110 self.system_panic(
111 f"Expected model to be a torch.nn.Module but got {type(self.model)}",
112 fl_ctx,
113 )
114 return
115 else:
116 print("server model", self.model)
117
118 self.model.to(self.device)
119
120 # set up optimizer
121 try:
122 # use provided or default optimizer arguments and add the model parameters
123 if "args" not in self.optimizer_args:
124 self.optimizer_args["args"] = {}
125 self.optimizer_args["args"]["params"] = self.model.parameters()
126 self.optimizer = engine.build_component(self.optimizer_args)
127 # get optimizer name for log
128 self.optimizer_name = self._get_component_name(self.optimizer_args)
129 except Exception as e:
130 self.system_panic(
131 f"Exception while parsing `optimizer_args`({self.optimizer_args}): {secure_format_exception(e)}",
132 fl_ctx,
133 )
134 return
135
136 # set up lr scheduler
137 if self.lr_scheduler_args is not None:
138 try:
139 self.lr_scheduler_name = self._get_component_name(self.lr_scheduler_args)
140 # use provided or default lr scheduler argument and add the optimizer
141 if "args" not in self.lr_scheduler_args:
142 self.lr_scheduler_args["args"] = {}
143 self.lr_scheduler_args["args"]["optimizer"] = self.optimizer
144 self.lr_scheduler = engine.build_component(self.lr_scheduler_args)
145 except Exception as e:
146 self.system_panic(
147 f"Exception while parsing `lr_scheduler_args`({self.lr_scheduler_args}): {secure_format_exception(e)}",
148 fl_ctx,
149 )
150 return
151
152 def server_update(self, model_diff):
153 """Updates the global model using the specified optimizer.
154
155 Args:
156 model_diff: the aggregated model differences from clients.
157
158 Returns:
159 The updated PyTorch model state dictionary.
160
161 """
162 self.model.train()
163 self.optimizer.zero_grad()
164
165 # Apply the update to the model. We must multiply weights_delta by -1.0 to
166 # view it as a gradient that should be applied to the server_optimizer.
167 for name, param in self.model.named_parameters():
168 param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)
169
170 self.optimizer.step()
171 if self.lr_scheduler is not None:
172 self.lr_scheduler.step()
173
174 return self.model.state_dict()
175
176 def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> Learnable:
177 """Convert Shareable to Learnable while doing a FedOpt update step.
178
179 Supporting data_kind == DataKind.WEIGHT_DIFF
180
181 Args:
182 shareable (Shareable): Shareable to be converted
183 fl_ctx (FLContext): FL context
184
185 Returns:
186 Model: Updated global ModelLearnable.
187 """
188 # check types
189 dxo = from_shareable(shareable)
190
191 if dxo.data_kind != DataKind.WEIGHT_DIFF:
192 self.system_panic(
193 "FedOpt is only implemented for " "data_kind == DataKind.WEIGHT_DIFF",
194 fl_ctx,
195 )
196 return Learnable()
197
198 processed_algorithm = dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)
199 if processed_algorithm is not None:
200 self.system_panic(
201 f"FedOpt is not implemented for shareable processed by {processed_algorithm}",
202 fl_ctx,
203 )
204 return Learnable()
205
206 model_diff = dxo.data
207
208 start = time.time()
209 weights = self.server_update(model_diff)
210 secs = time.time() - start
211
212 # convert to numpy dict of weights
213 start = time.time()
214 for key in weights:
215 weights[key] = weights[key].detach().cpu().numpy()
216 secs_detach = time.time() - start
217
218 self.log_info(
219 fl_ctx,
220 f"FedOpt ({self.optimizer_name}, {self.device}) server model update "
221 f"round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}, "
222 f"{self.lr_scheduler_name if self.lr_scheduler_name else ''} "
223 f"lr: {self.optimizer.param_groups[-1]['lr']}, "
224 f"update: {secs} secs., detach: {secs_detach} secs.",
225 )
226 # TODO: write server-side lr to tensorboard
227
228 return make_model_learnable(weights, dxo.get_meta_props())
229
[end of nvflare/app_opt/pt/fedopt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/app_opt/pt/fedopt.py b/nvflare/app_opt/pt/fedopt.py
--- a/nvflare/app_opt/pt/fedopt.py
+++ b/nvflare/app_opt/pt/fedopt.py
@@ -21,7 +21,7 @@
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.learnable import Learnable
-from nvflare.app_common.abstract.model import make_model_learnable
+from nvflare.app_common.abstract.model import ModelLearnableKey, make_model_learnable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator
from nvflare.security.logging import secure_format_exception
@@ -40,6 +40,8 @@
The algorithm is proposed in Reddi, Sashank, et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020).
This SharableGenerator will update the global model using the specified
PyTorch optimizer and learning rate scheduler.
+ Note: This class will use FedOpt to optimize the global trainable parameters (i.e. `self.model.named_parameters()`)
+ but use FedAvg to update any other layers such as batch norm statistics.
Args:
optimizer_args: dictionary of optimizer arguments, e.g.
@@ -164,14 +166,17 @@
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
+ updated_params = []
for name, param in self.model.named_parameters():
- param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)
+ if name in model_diff:
+ param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)
+ updated_params.append(name)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
- return self.model.state_dict()
+ return self.model.state_dict(), updated_params
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> Learnable:
"""Convert Shareable to Learnable while doing a FedOpt update step.
@@ -206,7 +211,7 @@
model_diff = dxo.data
start = time.time()
- weights = self.server_update(model_diff)
+ weights, updated_params = self.server_update(model_diff)
secs = time.time() - start
# convert to numpy dict of weights
@@ -215,12 +220,28 @@
weights[key] = weights[key].detach().cpu().numpy()
secs_detach = time.time() - start
+ # update unnamed parameters such as batch norm layers if there are any using the averaged update
+ base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
+ if not base_model:
+ self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
+ return base_model
+
+ base_model_weights = base_model[ModelLearnableKey.WEIGHTS]
+
+ n_fedavg = 0
+ for key, value in model_diff.items():
+ if key not in updated_params:
+ weights[key] = base_model_weights[key] + value
+ n_fedavg += 1
+
self.log_info(
fl_ctx,
f"FedOpt ({self.optimizer_name}, {self.device}) server model update "
f"round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}, "
f"{self.lr_scheduler_name if self.lr_scheduler_name else ''} "
f"lr: {self.optimizer.param_groups[-1]['lr']}, "
+ f"fedopt layers: {len(updated_params)}, "
+ f"fedavg layers: {n_fedavg}, "
f"update: {secs} secs., detach: {secs_detach} secs.",
)
# TODO: write server-side lr to tensorboard
| {"golden_diff": "diff --git a/nvflare/app_opt/pt/fedopt.py b/nvflare/app_opt/pt/fedopt.py\n--- a/nvflare/app_opt/pt/fedopt.py\n+++ b/nvflare/app_opt/pt/fedopt.py\n@@ -21,7 +21,7 @@\n from nvflare.apis.fl_context import FLContext\n from nvflare.apis.shareable import Shareable\n from nvflare.app_common.abstract.learnable import Learnable\n-from nvflare.app_common.abstract.model import make_model_learnable\n+from nvflare.app_common.abstract.model import ModelLearnableKey, make_model_learnable\n from nvflare.app_common.app_constant import AppConstants\n from nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator\n from nvflare.security.logging import secure_format_exception\n@@ -40,6 +40,8 @@\n The algorithm is proposed in Reddi, Sashank, et al. \"Adaptive federated optimization.\" arXiv preprint arXiv:2003.00295 (2020).\n This SharableGenerator will update the global model using the specified\n PyTorch optimizer and learning rate scheduler.\n+ Note: This class will use FedOpt to optimize the global trainable parameters (i.e. `self.model.named_parameters()`)\n+ but use FedAvg to update any other layers such as batch norm statistics.\n \n Args:\n optimizer_args: dictionary of optimizer arguments, e.g.\n@@ -164,14 +166,17 @@\n \n # Apply the update to the model. We must multiply weights_delta by -1.0 to\n # view it as a gradient that should be applied to the server_optimizer.\n+ updated_params = []\n for name, param in self.model.named_parameters():\n- param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)\n+ if name in model_diff:\n+ param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)\n+ updated_params.append(name)\n \n self.optimizer.step()\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n \n- return self.model.state_dict()\n+ return self.model.state_dict(), updated_params\n \n def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> Learnable:\n \"\"\"Convert Shareable to Learnable while doing a FedOpt update step.\n@@ -206,7 +211,7 @@\n model_diff = dxo.data\n \n start = time.time()\n- weights = self.server_update(model_diff)\n+ weights, updated_params = self.server_update(model_diff)\n secs = time.time() - start\n \n # convert to numpy dict of weights\n@@ -215,12 +220,28 @@\n weights[key] = weights[key].detach().cpu().numpy()\n secs_detach = time.time() - start\n \n+ # update unnamed parameters such as batch norm layers if there are any using the averaged update\n+ base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)\n+ if not base_model:\n+ self.system_panic(reason=\"No global base model!\", fl_ctx=fl_ctx)\n+ return base_model\n+\n+ base_model_weights = base_model[ModelLearnableKey.WEIGHTS]\n+\n+ n_fedavg = 0\n+ for key, value in model_diff.items():\n+ if key not in updated_params:\n+ weights[key] = base_model_weights[key] + value\n+ n_fedavg += 1\n+\n self.log_info(\n fl_ctx,\n f\"FedOpt ({self.optimizer_name}, {self.device}) server model update \"\n f\"round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}, \"\n f\"{self.lr_scheduler_name if self.lr_scheduler_name else ''} \"\n f\"lr: {self.optimizer.param_groups[-1]['lr']}, \"\n+ f\"fedopt layers: {len(updated_params)}, \"\n+ f\"fedavg layers: {n_fedavg}, \"\n f\"update: {secs} secs., detach: {secs_detach} secs.\",\n )\n # TODO: write server-side lr to tensorboard\n", "issue": "[BUG] FedOpt algorithm not working as expected in cifar10 example\n**Describe the bug**\r\nThe FedOpt algorithm is not working as expected in cifar10 example when I change the model from the pre-existing ModerateCNN to another model like MobileNetv2 or Resnet18 and others. The problem is that the accuracy of the global model is not increasing or increasing too slow with the FedOpt algorithm while the other algorithms works just fine even changing the model.\r\n\r\n**To Reproduce**\r\n1. Add in 'cifar10_nets.py' the new model : \r\n class MyModel(nn.Module):\r\n def __init__(self):\r\n super(MyModel, self).__init__()\r\n model = models.mobilenet_v2(weights='DEFAULT')\r\n model.classifier = nn.Sequential(\r\n nn.Dropout(0.4),\r\n nn.Linear(1280, 10),\r\n )\r\n self.model = model\r\n \r\n def forward(self, x):\r\n return self.model(x)\r\n2. Import and change the model in file 'cifar10_learner.py'\r\n3. Launch the example with `./run_simulator.sh cifar10_fedopt 0.1 8 8`\r\n4. See the results in tensorboard with `tensorboard --logdir=/tmp/nvflare/sim_cifar10` under the section 'val_acc_global_model'\r\n\r\n**Expected behavior**\r\nI expect reading the algorithm proposed in Reddi, Sashank, et al. \"Adaptive federated optimization.\" arXiv preprint arXiv:2003.00295 (2020), to obtain the same performance of FedAvg using SGD optimizer with lr = 1.0 and no scheduler. Also obtain better results changing optimizer and adding a scheduler.\r\n\r\n**Screenshots**\r\n\r\nPurple = FedAvg\r\nPink = FedOpt\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: ubuntu 22.04\r\n - Python Version 3.10\r\n - NVFlare Version 2.3.0\r\n\r\nTy in advance!\r\n\n", "before_files": [{"content": "# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nimport torch\n\nfrom nvflare.apis.dxo import DataKind, MetaKey, from_shareable\nfrom nvflare.apis.event_type import EventType\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.shareable import Shareable\nfrom nvflare.app_common.abstract.learnable import Learnable\nfrom nvflare.app_common.abstract.model import make_model_learnable\nfrom nvflare.app_common.app_constant import AppConstants\nfrom nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator\nfrom nvflare.security.logging import secure_format_exception\n\n\nclass PTFedOptModelShareableGenerator(FullModelShareableGenerator):\n def __init__(\n self,\n optimizer_args: dict = None,\n lr_scheduler_args: dict = None,\n source_model=\"model\",\n device=None,\n ):\n \"\"\"Implement the FedOpt algorithm.\n\n The algorithm is proposed in Reddi, Sashank, et al. \"Adaptive federated optimization.\" arXiv preprint arXiv:2003.00295 (2020).\n This SharableGenerator will update the global model using the specified\n PyTorch optimizer and learning rate scheduler.\n\n Args:\n optimizer_args: dictionary of optimizer arguments, e.g.\n {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}} (default).\n lr_scheduler_args: dictionary of server-side learning rate scheduler arguments, e.g.\n {'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}} (default: None).\n source_model: either a valid torch model object or a component ID of a torch model object\n device: specify the device to run server-side optimization, e.g. \"cpu\" or \"cuda:0\"\n (will default to cuda if available and no device is specified).\n\n Raises:\n TypeError: when any of input arguments does not have correct type\n \"\"\"\n super().__init__()\n if not optimizer_args:\n self.logger(\"No optimizer_args provided. Using FedOpt with SGD and lr 1.0\")\n optimizer_args = {\"name\": \"SGD\", \"args\": {\"lr\": 1.0}}\n\n if not isinstance(optimizer_args, dict):\n raise TypeError(\n \"optimizer_args must be a dict of format, e.g. {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}}.\"\n )\n if lr_scheduler_args is not None:\n if not isinstance(lr_scheduler_args, dict):\n raise TypeError(\n \"optimizer_args must be a dict of format, e.g. \"\n \"{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}}.\"\n )\n self.source_model = source_model\n self.optimizer_args = optimizer_args\n self.lr_scheduler_args = lr_scheduler_args\n self.model = None\n self.optimizer = None\n self.lr_scheduler = None\n if device is None:\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n else:\n self.device = torch.device(device)\n self.optimizer_name = None\n self.lr_scheduler_name = None\n\n def _get_component_name(self, component_args):\n if component_args is not None:\n name = component_args.get(\"path\", None)\n if name is None:\n name = component_args.get(\"name\", None)\n return name\n else:\n return None\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n if event_type == EventType.START_RUN:\n # Initialize the optimizer with current global model params\n engine = fl_ctx.get_engine()\n\n if isinstance(self.source_model, str):\n self.model = engine.get_component(self.source_model)\n else:\n self.model = self.source_model\n\n if self.model is None:\n self.system_panic(\n \"Model is not available\",\n fl_ctx,\n )\n return\n elif not isinstance(self.model, torch.nn.Module):\n self.system_panic(\n f\"Expected model to be a torch.nn.Module but got {type(self.model)}\",\n fl_ctx,\n )\n return\n else:\n print(\"server model\", self.model)\n\n self.model.to(self.device)\n\n # set up optimizer\n try:\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.optimizer_args:\n self.optimizer_args[\"args\"] = {}\n self.optimizer_args[\"args\"][\"params\"] = self.model.parameters()\n self.optimizer = engine.build_component(self.optimizer_args)\n # get optimizer name for log\n self.optimizer_name = self._get_component_name(self.optimizer_args)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `optimizer_args`({self.optimizer_args}): {secure_format_exception(e)}\",\n fl_ctx,\n )\n return\n\n # set up lr scheduler\n if self.lr_scheduler_args is not None:\n try:\n self.lr_scheduler_name = self._get_component_name(self.lr_scheduler_args)\n # use provided or default lr scheduler argument and add the optimizer\n if \"args\" not in self.lr_scheduler_args:\n self.lr_scheduler_args[\"args\"] = {}\n self.lr_scheduler_args[\"args\"][\"optimizer\"] = self.optimizer\n self.lr_scheduler = engine.build_component(self.lr_scheduler_args)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `lr_scheduler_args`({self.lr_scheduler_args}): {secure_format_exception(e)}\",\n fl_ctx,\n )\n return\n\n def server_update(self, model_diff):\n \"\"\"Updates the global model using the specified optimizer.\n\n Args:\n model_diff: the aggregated model differences from clients.\n\n Returns:\n The updated PyTorch model state dictionary.\n\n \"\"\"\n self.model.train()\n self.optimizer.zero_grad()\n\n # Apply the update to the model. We must multiply weights_delta by -1.0 to\n # view it as a gradient that should be applied to the server_optimizer.\n for name, param in self.model.named_parameters():\n param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)\n\n self.optimizer.step()\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return self.model.state_dict()\n\n def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> Learnable:\n \"\"\"Convert Shareable to Learnable while doing a FedOpt update step.\n\n Supporting data_kind == DataKind.WEIGHT_DIFF\n\n Args:\n shareable (Shareable): Shareable to be converted\n fl_ctx (FLContext): FL context\n\n Returns:\n Model: Updated global ModelLearnable.\n \"\"\"\n # check types\n dxo = from_shareable(shareable)\n\n if dxo.data_kind != DataKind.WEIGHT_DIFF:\n self.system_panic(\n \"FedOpt is only implemented for \" \"data_kind == DataKind.WEIGHT_DIFF\",\n fl_ctx,\n )\n return Learnable()\n\n processed_algorithm = dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)\n if processed_algorithm is not None:\n self.system_panic(\n f\"FedOpt is not implemented for shareable processed by {processed_algorithm}\",\n fl_ctx,\n )\n return Learnable()\n\n model_diff = dxo.data\n\n start = time.time()\n weights = self.server_update(model_diff)\n secs = time.time() - start\n\n # convert to numpy dict of weights\n start = time.time()\n for key in weights:\n weights[key] = weights[key].detach().cpu().numpy()\n secs_detach = time.time() - start\n\n self.log_info(\n fl_ctx,\n f\"FedOpt ({self.optimizer_name}, {self.device}) server model update \"\n f\"round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}, \"\n f\"{self.lr_scheduler_name if self.lr_scheduler_name else ''} \"\n f\"lr: {self.optimizer.param_groups[-1]['lr']}, \"\n f\"update: {secs} secs., detach: {secs_detach} secs.\",\n )\n # TODO: write server-side lr to tensorboard\n\n return make_model_learnable(weights, dxo.get_meta_props())\n", "path": "nvflare/app_opt/pt/fedopt.py"}]} | 3,590 | 916 |
gh_patches_debug_25648 | rasdani/github-patches | git_diff | huggingface__transformers-10688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hf_argparser doesn't set the required flag on non-defaulted enums
## Environment info
- `transformers` version: 3.0.0-4.3.3
- Platform: macOS
- Python version: 3.9
- PyTorch version (GPU?): n/a
- Tensorflow version (GPU?): n/a
- Using GPU in script?: n/a
- Using distributed or parallel set-up in script?: n/a
### Who can help
I'm not sure who the owner is of hf_argparser.
## Information
We're using hf_argparser to parse our experiment config into dataclasses before training.
## To reproduce
Steps to reproduce the behavior:
1. Add an enum argument without a default to a dataclass
2. Parse the command line arguments without supplying the enum argument
3. Should have raised an exception and printed the usage, instead defaults the value to `None`.
## Expected behavior
It should raise an exception. The issue is on https://github.com/huggingface/transformers/blob/master/src/transformers/hf_argparser.py#L100, the if statement should have an else which sets `kwargs["required"]=True`, the same way line [134](https://github.com/huggingface/transformers/blob/master/src/transformers/hf_argparser.py#L134) does.
I can work up a patch if you agree this is an issue. I think it will also occur with anything that falls into [this branch](https://github.com/huggingface/transformers/blob/master/src/transformers/hf_argparser.py#L118) of the if too.
</issue>
<code>
[start of src/transformers/hf_argparser.py]
1 # Copyright 2020 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import dataclasses
16 import json
17 import re
18 import sys
19 from argparse import ArgumentParser, ArgumentTypeError
20 from enum import Enum
21 from pathlib import Path
22 from typing import Any, Iterable, List, NewType, Optional, Tuple, Union
23
24
25 DataClass = NewType("DataClass", Any)
26 DataClassType = NewType("DataClassType", Any)
27
28
29 # From https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
30 def string_to_bool(v):
31 if isinstance(v, bool):
32 return v
33 if v.lower() in ("yes", "true", "t", "y", "1"):
34 return True
35 elif v.lower() in ("no", "false", "f", "n", "0"):
36 return False
37 else:
38 raise ArgumentTypeError(
39 f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
40 )
41
42
43 class HfArgumentParser(ArgumentParser):
44 """
45 This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
46
47 The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
48 arguments to the parser after initialization and you'll get the output back after parsing as an additional
49 namespace.
50 """
51
52 dataclass_types: Iterable[DataClassType]
53
54 def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
55 """
56 Args:
57 dataclass_types:
58 Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
59 kwargs:
60 (Optional) Passed to `argparse.ArgumentParser()` in the regular way.
61 """
62 super().__init__(**kwargs)
63 if dataclasses.is_dataclass(dataclass_types):
64 dataclass_types = [dataclass_types]
65 self.dataclass_types = dataclass_types
66 for dtype in self.dataclass_types:
67 self._add_dataclass_arguments(dtype)
68
69 def _add_dataclass_arguments(self, dtype: DataClassType):
70 for field in dataclasses.fields(dtype):
71 if not field.init:
72 continue
73 field_name = f"--{field.name}"
74 kwargs = field.metadata.copy()
75 # field.metadata is not used at all by Data Classes,
76 # it is provided as a third-party extension mechanism.
77 if isinstance(field.type, str):
78 raise ImportError(
79 "This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
80 "which can be opted in from Python 3.7 with `from __future__ import annotations`."
81 "We will add compatibility when Python 3.9 is released."
82 )
83 typestring = str(field.type)
84 for prim_type in (int, float, str):
85 for collection in (List,):
86 if (
87 typestring == f"typing.Union[{collection[prim_type]}, NoneType]"
88 or typestring == f"typing.Optional[{collection[prim_type]}]"
89 ):
90 field.type = collection[prim_type]
91 if (
92 typestring == f"typing.Union[{prim_type.__name__}, NoneType]"
93 or typestring == f"typing.Optional[{prim_type.__name__}]"
94 ):
95 field.type = prim_type
96
97 if isinstance(field.type, type) and issubclass(field.type, Enum):
98 kwargs["choices"] = [x.value for x in field.type]
99 kwargs["type"] = type(kwargs["choices"][0])
100 if field.default is not dataclasses.MISSING:
101 kwargs["default"] = field.default
102 elif field.type is bool or field.type == Optional[bool]:
103 if field.default is True:
104 self.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **kwargs)
105
106 # Hack because type=bool in argparse does not behave as we want.
107 kwargs["type"] = string_to_bool
108 if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
109 # Default value is True if we have no default when of type bool.
110 default = True if field.default is dataclasses.MISSING else field.default
111 # This is the value that will get picked if we don't include --field_name in any way
112 kwargs["default"] = default
113 # This tells argparse we accept 0 or 1 value after --field_name
114 kwargs["nargs"] = "?"
115 # This is the value that will get picked if we do --field_name (without value)
116 kwargs["const"] = True
117 elif (
118 hasattr(field.type, "__origin__") and re.search(r"^typing\.List\[(.*)\]$", str(field.type)) is not None
119 ):
120 kwargs["nargs"] = "+"
121 kwargs["type"] = field.type.__args__[0]
122 assert all(
123 x == kwargs["type"] for x in field.type.__args__
124 ), "{} cannot be a List of mixed types".format(field.name)
125 if field.default_factory is not dataclasses.MISSING:
126 kwargs["default"] = field.default_factory()
127 else:
128 kwargs["type"] = field.type
129 if field.default is not dataclasses.MISSING:
130 kwargs["default"] = field.default
131 elif field.default_factory is not dataclasses.MISSING:
132 kwargs["default"] = field.default_factory()
133 else:
134 kwargs["required"] = True
135 self.add_argument(field_name, **kwargs)
136
137 def parse_args_into_dataclasses(
138 self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None
139 ) -> Tuple[DataClass, ...]:
140 """
141 Parse command-line args into instances of the specified dataclass types.
142
143 This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
144 docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
145
146 Args:
147 args:
148 List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
149 return_remaining_strings:
150 If true, also return a list of remaining argument strings.
151 look_for_args_file:
152 If true, will look for a ".args" file with the same base name as the entry point script for this
153 process, and will append its potential content to the command line args.
154 args_filename:
155 If not None, will uses this file instead of the ".args" file specified in the previous argument.
156
157 Returns:
158 Tuple consisting of:
159
160 - the dataclass instances in the same order as they were passed to the initializer.abspath
161 - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
162 after initialization.
163 - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
164 """
165 if args_filename or (look_for_args_file and len(sys.argv)):
166 if args_filename:
167 args_file = Path(args_filename)
168 else:
169 args_file = Path(sys.argv[0]).with_suffix(".args")
170
171 if args_file.exists():
172 fargs = args_file.read_text().split()
173 args = fargs + args if args is not None else fargs + sys.argv[1:]
174 # in case of duplicate arguments the first one has precedence
175 # so we append rather than prepend.
176 namespace, remaining_args = self.parse_known_args(args=args)
177 outputs = []
178 for dtype in self.dataclass_types:
179 keys = {f.name for f in dataclasses.fields(dtype) if f.init}
180 inputs = {k: v for k, v in vars(namespace).items() if k in keys}
181 for k in keys:
182 delattr(namespace, k)
183 obj = dtype(**inputs)
184 outputs.append(obj)
185 if len(namespace.__dict__) > 0:
186 # additional namespace.
187 outputs.append(namespace)
188 if return_remaining_strings:
189 return (*outputs, remaining_args)
190 else:
191 if remaining_args:
192 raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
193
194 return (*outputs,)
195
196 def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]:
197 """
198 Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
199 dataclass types.
200 """
201 data = json.loads(Path(json_file).read_text())
202 outputs = []
203 for dtype in self.dataclass_types:
204 keys = {f.name for f in dataclasses.fields(dtype) if f.init}
205 inputs = {k: v for k, v in data.items() if k in keys}
206 obj = dtype(**inputs)
207 outputs.append(obj)
208 return (*outputs,)
209
210 def parse_dict(self, args: dict) -> Tuple[DataClass, ...]:
211 """
212 Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
213 types.
214 """
215 outputs = []
216 for dtype in self.dataclass_types:
217 keys = {f.name for f in dataclasses.fields(dtype) if f.init}
218 inputs = {k: v for k, v in args.items() if k in keys}
219 obj = dtype(**inputs)
220 outputs.append(obj)
221 return (*outputs,)
222
[end of src/transformers/hf_argparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/transformers/hf_argparser.py b/src/transformers/hf_argparser.py
--- a/src/transformers/hf_argparser.py
+++ b/src/transformers/hf_argparser.py
@@ -99,6 +99,8 @@
kwargs["type"] = type(kwargs["choices"][0])
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
+ else:
+ kwargs["required"] = True
elif field.type is bool or field.type == Optional[bool]:
if field.default is True:
self.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **kwargs)
@@ -124,6 +126,8 @@
), "{} cannot be a List of mixed types".format(field.name)
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
+ elif field.default is dataclasses.MISSING:
+ kwargs["required"] = True
else:
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
| {"golden_diff": "diff --git a/src/transformers/hf_argparser.py b/src/transformers/hf_argparser.py\n--- a/src/transformers/hf_argparser.py\n+++ b/src/transformers/hf_argparser.py\n@@ -99,6 +99,8 @@\n kwargs[\"type\"] = type(kwargs[\"choices\"][0])\n if field.default is not dataclasses.MISSING:\n kwargs[\"default\"] = field.default\n+ else:\n+ kwargs[\"required\"] = True\n elif field.type is bool or field.type == Optional[bool]:\n if field.default is True:\n self.add_argument(f\"--no_{field.name}\", action=\"store_false\", dest=field.name, **kwargs)\n@@ -124,6 +126,8 @@\n ), \"{} cannot be a List of mixed types\".format(field.name)\n if field.default_factory is not dataclasses.MISSING:\n kwargs[\"default\"] = field.default_factory()\n+ elif field.default is dataclasses.MISSING:\n+ kwargs[\"required\"] = True\n else:\n kwargs[\"type\"] = field.type\n if field.default is not dataclasses.MISSING:\n", "issue": "hf_argparser doesn't set the required flag on non-defaulted enums\n## Environment info\r\n\r\n- `transformers` version: 3.0.0-4.3.3\r\n- Platform: macOS\r\n- Python version: 3.9\r\n- PyTorch version (GPU?): n/a\r\n- Tensorflow version (GPU?): n/a\r\n- Using GPU in script?: n/a\r\n- Using distributed or parallel set-up in script?: n/a\r\n\r\n### Who can help\r\n\r\nI'm not sure who the owner is of hf_argparser.\r\n\r\n## Information\r\n\r\nWe're using hf_argparser to parse our experiment config into dataclasses before training.\r\n\r\n## To reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Add an enum argument without a default to a dataclass\r\n2. Parse the command line arguments without supplying the enum argument\r\n3. Should have raised an exception and printed the usage, instead defaults the value to `None`.\r\n\r\n## Expected behavior\r\n\r\nIt should raise an exception. The issue is on https://github.com/huggingface/transformers/blob/master/src/transformers/hf_argparser.py#L100, the if statement should have an else which sets `kwargs[\"required\"]=True`, the same way line [134](https://github.com/huggingface/transformers/blob/master/src/transformers/hf_argparser.py#L134) does.\r\n\r\nI can work up a patch if you agree this is an issue. I think it will also occur with anything that falls into [this branch](https://github.com/huggingface/transformers/blob/master/src/transformers/hf_argparser.py#L118) of the if too.\r\n\n", "before_files": [{"content": "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport json\nimport re\nimport sys\nfrom argparse import ArgumentParser, ArgumentTypeError\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any, Iterable, List, NewType, Optional, Tuple, Union\n\n\nDataClass = NewType(\"DataClass\", Any)\nDataClassType = NewType(\"DataClassType\", Any)\n\n\n# From https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse\ndef string_to_bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise ArgumentTypeError(\n f\"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).\"\n )\n\n\nclass HfArgumentParser(ArgumentParser):\n \"\"\"\n This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.\n\n The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)\n arguments to the parser after initialization and you'll get the output back after parsing as an additional\n namespace.\n \"\"\"\n\n dataclass_types: Iterable[DataClassType]\n\n def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):\n \"\"\"\n Args:\n dataclass_types:\n Dataclass type, or list of dataclass types for which we will \"fill\" instances with the parsed args.\n kwargs:\n (Optional) Passed to `argparse.ArgumentParser()` in the regular way.\n \"\"\"\n super().__init__(**kwargs)\n if dataclasses.is_dataclass(dataclass_types):\n dataclass_types = [dataclass_types]\n self.dataclass_types = dataclass_types\n for dtype in self.dataclass_types:\n self._add_dataclass_arguments(dtype)\n\n def _add_dataclass_arguments(self, dtype: DataClassType):\n for field in dataclasses.fields(dtype):\n if not field.init:\n continue\n field_name = f\"--{field.name}\"\n kwargs = field.metadata.copy()\n # field.metadata is not used at all by Data Classes,\n # it is provided as a third-party extension mechanism.\n if isinstance(field.type, str):\n raise ImportError(\n \"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),\"\n \"which can be opted in from Python 3.7 with `from __future__ import annotations`.\"\n \"We will add compatibility when Python 3.9 is released.\"\n )\n typestring = str(field.type)\n for prim_type in (int, float, str):\n for collection in (List,):\n if (\n typestring == f\"typing.Union[{collection[prim_type]}, NoneType]\"\n or typestring == f\"typing.Optional[{collection[prim_type]}]\"\n ):\n field.type = collection[prim_type]\n if (\n typestring == f\"typing.Union[{prim_type.__name__}, NoneType]\"\n or typestring == f\"typing.Optional[{prim_type.__name__}]\"\n ):\n field.type = prim_type\n\n if isinstance(field.type, type) and issubclass(field.type, Enum):\n kwargs[\"choices\"] = [x.value for x in field.type]\n kwargs[\"type\"] = type(kwargs[\"choices\"][0])\n if field.default is not dataclasses.MISSING:\n kwargs[\"default\"] = field.default\n elif field.type is bool or field.type == Optional[bool]:\n if field.default is True:\n self.add_argument(f\"--no_{field.name}\", action=\"store_false\", dest=field.name, **kwargs)\n\n # Hack because type=bool in argparse does not behave as we want.\n kwargs[\"type\"] = string_to_bool\n if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):\n # Default value is True if we have no default when of type bool.\n default = True if field.default is dataclasses.MISSING else field.default\n # This is the value that will get picked if we don't include --field_name in any way\n kwargs[\"default\"] = default\n # This tells argparse we accept 0 or 1 value after --field_name\n kwargs[\"nargs\"] = \"?\"\n # This is the value that will get picked if we do --field_name (without value)\n kwargs[\"const\"] = True\n elif (\n hasattr(field.type, \"__origin__\") and re.search(r\"^typing\\.List\\[(.*)\\]$\", str(field.type)) is not None\n ):\n kwargs[\"nargs\"] = \"+\"\n kwargs[\"type\"] = field.type.__args__[0]\n assert all(\n x == kwargs[\"type\"] for x in field.type.__args__\n ), \"{} cannot be a List of mixed types\".format(field.name)\n if field.default_factory is not dataclasses.MISSING:\n kwargs[\"default\"] = field.default_factory()\n else:\n kwargs[\"type\"] = field.type\n if field.default is not dataclasses.MISSING:\n kwargs[\"default\"] = field.default\n elif field.default_factory is not dataclasses.MISSING:\n kwargs[\"default\"] = field.default_factory()\n else:\n kwargs[\"required\"] = True\n self.add_argument(field_name, **kwargs)\n\n def parse_args_into_dataclasses(\n self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None\n ) -> Tuple[DataClass, ...]:\n \"\"\"\n Parse command-line args into instances of the specified dataclass types.\n\n This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:\n docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args\n\n Args:\n args:\n List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)\n return_remaining_strings:\n If true, also return a list of remaining argument strings.\n look_for_args_file:\n If true, will look for a \".args\" file with the same base name as the entry point script for this\n process, and will append its potential content to the command line args.\n args_filename:\n If not None, will uses this file instead of the \".args\" file specified in the previous argument.\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.abspath\n - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser\n after initialization.\n - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)\n \"\"\"\n if args_filename or (look_for_args_file and len(sys.argv)):\n if args_filename:\n args_file = Path(args_filename)\n else:\n args_file = Path(sys.argv[0]).with_suffix(\".args\")\n\n if args_file.exists():\n fargs = args_file.read_text().split()\n args = fargs + args if args is not None else fargs + sys.argv[1:]\n # in case of duplicate arguments the first one has precedence\n # so we append rather than prepend.\n namespace, remaining_args = self.parse_known_args(args=args)\n outputs = []\n for dtype in self.dataclass_types:\n keys = {f.name for f in dataclasses.fields(dtype) if f.init}\n inputs = {k: v for k, v in vars(namespace).items() if k in keys}\n for k in keys:\n delattr(namespace, k)\n obj = dtype(**inputs)\n outputs.append(obj)\n if len(namespace.__dict__) > 0:\n # additional namespace.\n outputs.append(namespace)\n if return_remaining_strings:\n return (*outputs, remaining_args)\n else:\n if remaining_args:\n raise ValueError(f\"Some specified arguments are not used by the HfArgumentParser: {remaining_args}\")\n\n return (*outputs,)\n\n def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]:\n \"\"\"\n Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the\n dataclass types.\n \"\"\"\n data = json.loads(Path(json_file).read_text())\n outputs = []\n for dtype in self.dataclass_types:\n keys = {f.name for f in dataclasses.fields(dtype) if f.init}\n inputs = {k: v for k, v in data.items() if k in keys}\n obj = dtype(**inputs)\n outputs.append(obj)\n return (*outputs,)\n\n def parse_dict(self, args: dict) -> Tuple[DataClass, ...]:\n \"\"\"\n Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass\n types.\n \"\"\"\n outputs = []\n for dtype in self.dataclass_types:\n keys = {f.name for f in dataclasses.fields(dtype) if f.init}\n inputs = {k: v for k, v in args.items() if k in keys}\n obj = dtype(**inputs)\n outputs.append(obj)\n return (*outputs,)\n", "path": "src/transformers/hf_argparser.py"}]} | 3,599 | 249 |
gh_patches_debug_50089 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't change filename when send document after upgrading to v11.1.0
### Steps to reproduce
1. Generate a pickle file "test" (I didn't test other common files yet)
2. Send this file to user
`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'), filename="test")`
or
`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'))`
### Expected behaviour
User will receive a file named **test**
### Actual behaviour
User received a file named **application.octet-stream**
### Configuration
**Operating System:**
Debian (Server, where I first found this issue)
Ubuntu(Local, **I test on v10.1.0, everything is fine**, so I upgrade to v11.1.0, then I have the same issue as Debian Server)
**Version of Python, python-telegram-bot & dependencies:**
``$ python -m telegram``
*My Local Ubuntu After Upgrade:*
python-telegram-bot 11.1.0
certifi 2018.08.24
future 0.16.0
Python 3.6.6 (default, Sep 12 2018, 18:26:19) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]
The pictures shows results of python-telegram-bot v10.1.0 (the first one) and v11.1.0 (the second one) :

</issue>
<code>
[start of telegram/files/inputfile.py]
1 #!/usr/bin/env python
2 # pylint: disable=W0622,E0611
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2018
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram InputFile."""
21
22 import imghdr
23 import mimetypes
24 import os
25 from uuid import uuid4
26
27 from telegram import TelegramError
28
29 DEFAULT_MIME_TYPE = 'application/octet-stream'
30
31
32 class InputFile(object):
33 """This object represents a Telegram InputFile.
34
35 Attributes:
36 input_file_content (:obj:`bytes`): The binaray content of the file to send.
37 filename (:obj:`str`): Optional, Filename for the file to be sent.
38 attach (:obj:`str`): Optional, attach id for sending multiple files.
39
40 Args:
41 obj (:obj:`File handler`): An open file descriptor.
42 filename (:obj:`str`, optional): Filename for this InputFile.
43 attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a
44 collection of files.
45
46 Raises:
47 TelegramError
48
49 """
50
51 def __init__(self, obj, filename=None, attach=None):
52 self.filename = None
53 self.input_file_content = obj.read()
54 self.attach = 'attached' + uuid4().hex if attach else None
55
56 if filename:
57 self.filename = filename
58 elif (hasattr(obj, 'name') and
59 not isinstance(obj.name, int) and # py3
60 obj.name != '<fdopen>'): # py2
61 # on py2.7, pylint fails to understand this properly
62 # pylint: disable=E1101
63 self.filename = os.path.basename(obj.name)
64
65 try:
66 self.mimetype = self.is_image(self.input_file_content)
67 except TelegramError:
68 if self.filename:
69 self.mimetype = mimetypes.guess_type(
70 self.filename)[0] or DEFAULT_MIME_TYPE
71 else:
72 self.mimetype = DEFAULT_MIME_TYPE
73 if not self.filename or '.' not in self.filename:
74 self.filename = self.mimetype.replace('/', '.')
75
76 @property
77 def field_tuple(self):
78 return self.filename, self.input_file_content, self.mimetype
79
80 @staticmethod
81 def is_image(stream):
82 """Check if the content file is an image by analyzing its headers.
83
84 Args:
85 stream (:obj:`str`): A str representing the content of a file.
86
87 Returns:
88 :obj:`str`: The str mime-type of an image.
89
90 """
91 image = imghdr.what(None, stream)
92 if image:
93 return 'image/%s' % image
94
95 raise TelegramError('Could not parse file content')
96
97 @staticmethod
98 def is_file(obj):
99 return hasattr(obj, 'read')
100
101 def to_dict(self):
102 if self.attach:
103 return 'attach://' + self.attach
104
[end of telegram/files/inputfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py
--- a/telegram/files/inputfile.py
+++ b/telegram/files/inputfile.py
@@ -70,7 +70,7 @@
self.filename)[0] or DEFAULT_MIME_TYPE
else:
self.mimetype = DEFAULT_MIME_TYPE
- if not self.filename or '.' not in self.filename:
+ if not self.filename:
self.filename = self.mimetype.replace('/', '.')
@property
| {"golden_diff": "diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py\n--- a/telegram/files/inputfile.py\n+++ b/telegram/files/inputfile.py\n@@ -70,7 +70,7 @@\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n- if not self.filename or '.' not in self.filename:\n+ if not self.filename:\n self.filename = self.mimetype.replace('/', '.')\n \n @property\n", "issue": "Can't change filename when send document after upgrading to v11.1.0\n### Steps to reproduce\r\n1. Generate a pickle file \"test\" (I didn't test other common files yet)\r\n\r\n2. Send this file to user\r\n\r\n`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'), filename=\"test\")`\r\n\r\nor\r\n\r\n`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'))`\r\n\r\n### Expected behaviour\r\nUser will receive a file named **test**\r\n\r\n### Actual behaviour\r\nUser received a file named **application.octet-stream**\r\n\r\n### Configuration\r\n**Operating System:** \r\n\r\nDebian (Server, where I first found this issue)\r\n\r\nUbuntu(Local, **I test on v10.1.0, everything is fine**, so I upgrade to v11.1.0, then I have the same issue as Debian Server)\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n\r\n*My Local Ubuntu After Upgrade:*\r\npython-telegram-bot 11.1.0\r\ncertifi 2018.08.24\r\nfuture 0.16.0\r\nPython 3.6.6 (default, Sep 12 2018, 18:26:19) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]\r\n\r\nThe pictures shows results of python-telegram-bot v10.1.0 (the first one) and v11.1.0 (the second one) :\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport mimetypes\nimport os\nfrom uuid import uuid4\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\n\n\nclass InputFile(object):\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binaray content of the file to send.\n filename (:obj:`str`): Optional, Filename for the file to be sent.\n attach (:obj:`str`): Optional, attach id for sending multiple files.\n\n Args:\n obj (:obj:`File handler`): An open file descriptor.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a\n collection of files.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, obj, filename=None, attach=None):\n self.filename = None\n self.input_file_content = obj.read()\n self.attach = 'attached' + uuid4().hex if attach else None\n\n if filename:\n self.filename = filename\n elif (hasattr(obj, 'name') and\n not isinstance(obj.name, int) and # py3\n obj.name != '<fdopen>'): # py2\n # on py2.7, pylint fails to understand this properly\n # pylint: disable=E1101\n self.filename = os.path.basename(obj.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n if not self.filename or '.' not in self.filename:\n self.filename = self.mimetype.replace('/', '.')\n\n @property\n def field_tuple(self):\n return self.filename, self.input_file_content, self.mimetype\n\n @staticmethod\n def is_image(stream):\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`str`): A str representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_file(obj):\n return hasattr(obj, 'read')\n\n def to_dict(self):\n if self.attach:\n return 'attach://' + self.attach\n", "path": "telegram/files/inputfile.py"}]} | 1,978 | 109 |
gh_patches_debug_7844 | rasdani/github-patches | git_diff | carpentries__amy-2219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide lesson program specific checkout steps
Now that we have a single Instructor badge, we do not want to see the lesson program specific checkout steps. These should be hidden from view so only four options remain.
- ~DC Demo~
- ~DC Homework~
- Demo
- Discussion
- Homework
- ~LC Demo~
- ~LC Homework~
- ~SWC Demo~
- ~SWC Homework~
- Training
</issue>
<code>
[start of amy/trainings/forms.py]
1 from crispy_forms.layout import Layout, Submit
2 from django import forms
3 from django.core.exceptions import ValidationError
4 from django.db.models import Q
5 from django.forms import RadioSelect, TextInput
6
7 # this is used instead of Django Autocomplete Light widgets
8 # see issue #1330: https://github.com/swcarpentry/amy/issues/1330
9 from workshops.fields import ModelSelect2Widget
10 from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper
11 from workshops.models import Event, Person, TrainingProgress, TrainingRequirement
12
13
14 class TrainingProgressForm(forms.ModelForm):
15 trainee = forms.ModelChoiceField(
16 label="Trainee",
17 required=True,
18 queryset=Person.objects.all(),
19 widget=ModelSelect2Widget(data_view="person-lookup"),
20 )
21 evaluated_by = forms.ModelChoiceField(
22 label="Evaluated by",
23 required=False,
24 queryset=Person.objects.all(),
25 widget=ModelSelect2Widget(data_view="admin-lookup"),
26 )
27 event = forms.ModelChoiceField(
28 label="Event",
29 required=False,
30 queryset=Event.objects.all(),
31 widget=ModelSelect2Widget(data_view="event-lookup", attrs=SELECT2_SIDEBAR),
32 )
33
34 # helper used in edit view
35 helper = BootstrapHelper(
36 duplicate_buttons_on_top=True,
37 submit_label="Update",
38 add_delete_button=True,
39 additional_form_class="training-progress",
40 add_cancel_button=False,
41 )
42
43 # helper used in create view
44 create_helper = BootstrapHelper(
45 duplicate_buttons_on_top=True,
46 submit_label="Add",
47 additional_form_class="training-progress",
48 add_cancel_button=False,
49 )
50
51 class Meta:
52 model = TrainingProgress
53 fields = [
54 "trainee",
55 "evaluated_by",
56 "requirement",
57 "state",
58 "discarded",
59 "event",
60 "url",
61 "notes",
62 ]
63 widgets = {
64 "state": RadioSelect,
65 }
66
67 def clean(self):
68 cleaned_data = super().clean()
69
70 trainee = cleaned_data.get("trainee")
71
72 # check if trainee has at least one training task
73 training_tasks = trainee.get_training_tasks()
74
75 if not training_tasks:
76 raise ValidationError(
77 "It's not possible to add training progress "
78 "to a trainee without any training task."
79 )
80
81
82 class BulkAddTrainingProgressForm(forms.ModelForm):
83 event = forms.ModelChoiceField(
84 label="Training",
85 required=False,
86 queryset=Event.objects.filter(tags__name="TTT"),
87 widget=ModelSelect2Widget(data_view="ttt-event-lookup", attrs=SELECT2_SIDEBAR),
88 )
89
90 trainees = forms.ModelMultipleChoiceField(queryset=Person.objects.all())
91
92 requirement = forms.ModelChoiceField(
93 queryset=TrainingRequirement.objects.exclude(
94 Q(name__startswith="SWC")
95 | Q(name__startswith="DC")
96 | Q(name__startswith="LC")
97 ),
98 label="Type",
99 required=True,
100 )
101
102 helper = BootstrapHelper(
103 additional_form_class="training-progress",
104 submit_label="Add",
105 form_tag=False,
106 add_cancel_button=False,
107 )
108 helper.layout = Layout(
109 # no 'trainees' -- you should take care of generating it manually in
110 # the template where this form is used
111 "requirement",
112 "state",
113 "event",
114 "url",
115 "notes",
116 )
117
118 class Meta:
119 model = TrainingProgress
120 fields = [
121 # no 'trainees'
122 "requirement",
123 "state",
124 "event",
125 "url",
126 "notes",
127 ]
128 widgets = {
129 "state": RadioSelect,
130 "notes": TextInput,
131 }
132
133 def clean(self):
134 cleaned_data = super().clean()
135
136 trainees = cleaned_data.get("trainees", [])
137
138 # check if all trainees have at least one training task
139 for trainee in trainees:
140 training_tasks = trainee.get_training_tasks()
141
142 if not training_tasks:
143 raise ValidationError(
144 "It's not possible to add training "
145 "progress to a trainee without any "
146 "training task."
147 )
148
149
150 class BulkDiscardProgressesForm(forms.Form):
151 """Form used to bulk discard all TrainingProgresses associated with
152 selected trainees."""
153
154 trainees = forms.ModelMultipleChoiceField(queryset=Person.objects.all())
155
156 helper = BootstrapHelper(
157 add_submit_button=False,
158 form_tag=False,
159 display_labels=False,
160 add_cancel_button=False,
161 )
162
163 SUBMIT_POPOVER = """<p>Discarded progress will be displayed in the following
164 way: <span class='badge badge-dark'><strike>Discarded</strike></span>.</p>
165
166 <p>If you want to permanently remove records from system,
167 click one of the progress labels and, then, click "delete" button.</p>"""
168
169 helper.layout = Layout(
170 # no 'trainees' -- you should take care of generating it manually in
171 # the template where this form is used
172 # We use formnovalidate on submit button to disable browser
173 # validation. This is necessary because this form is used along with
174 # BulkAddTrainingProgressForm, which have required fields. Both forms
175 # live inside the same <form> tag. Without this attribute, when you
176 # click the following submit button, the browser reports missing
177 # values in required fields in BulkAddTrainingProgressForm.
178 Submit(
179 "discard",
180 "Discard all progress of selected trainees",
181 formnovalidate="formnovalidate",
182 **{
183 "data-toggle": "popover",
184 "data-trigger": "hover",
185 "data-html": "true",
186 "data-content": SUBMIT_POPOVER,
187 "css_class": "btn btn-warning",
188 },
189 ),
190 )
191
[end of amy/trainings/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/trainings/forms.py b/amy/trainings/forms.py
--- a/amy/trainings/forms.py
+++ b/amy/trainings/forms.py
@@ -24,6 +24,16 @@
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="admin-lookup"),
)
+ requirement = forms.ModelChoiceField(
+ queryset=TrainingRequirement.objects.exclude(
+ Q(name__startswith="SWC")
+ | Q(name__startswith="DC")
+ | Q(name__startswith="LC")
+ ),
+ label="Type",
+ required=True,
+ )
+
event = forms.ModelChoiceField(
label="Event",
required=False,
| {"golden_diff": "diff --git a/amy/trainings/forms.py b/amy/trainings/forms.py\n--- a/amy/trainings/forms.py\n+++ b/amy/trainings/forms.py\n@@ -24,6 +24,16 @@\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\"),\n )\n+ requirement = forms.ModelChoiceField(\n+ queryset=TrainingRequirement.objects.exclude(\n+ Q(name__startswith=\"SWC\")\n+ | Q(name__startswith=\"DC\")\n+ | Q(name__startswith=\"LC\")\n+ ),\n+ label=\"Type\",\n+ required=True,\n+ )\n+\n event = forms.ModelChoiceField(\n label=\"Event\",\n required=False,\n", "issue": "Hide lesson program specific checkout steps\nNow that we have a single Instructor badge, we do not want to see the lesson program specific checkout steps. These should be hidden from view so only four options remain.\r\n\r\n- ~DC Demo~\r\n- ~DC Homework~\r\n- Demo\r\n- Discussion\r\n- Homework\r\n- ~LC Demo~\r\n- ~LC Homework~\r\n- ~SWC Demo~\r\n- ~SWC Homework~\r\n- Training\n", "before_files": [{"content": "from crispy_forms.layout import Layout, Submit\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.forms import RadioSelect, TextInput\n\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper\nfrom workshops.models import Event, Person, TrainingProgress, TrainingRequirement\n\n\nclass TrainingProgressForm(forms.ModelForm):\n trainee = forms.ModelChoiceField(\n label=\"Trainee\",\n required=True,\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"person-lookup\"),\n )\n evaluated_by = forms.ModelChoiceField(\n label=\"Evaluated by\",\n required=False,\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\"),\n )\n event = forms.ModelChoiceField(\n label=\"Event\",\n required=False,\n queryset=Event.objects.all(),\n widget=ModelSelect2Widget(data_view=\"event-lookup\", attrs=SELECT2_SIDEBAR),\n )\n\n # helper used in edit view\n helper = BootstrapHelper(\n duplicate_buttons_on_top=True,\n submit_label=\"Update\",\n add_delete_button=True,\n additional_form_class=\"training-progress\",\n add_cancel_button=False,\n )\n\n # helper used in create view\n create_helper = BootstrapHelper(\n duplicate_buttons_on_top=True,\n submit_label=\"Add\",\n additional_form_class=\"training-progress\",\n add_cancel_button=False,\n )\n\n class Meta:\n model = TrainingProgress\n fields = [\n \"trainee\",\n \"evaluated_by\",\n \"requirement\",\n \"state\",\n \"discarded\",\n \"event\",\n \"url\",\n \"notes\",\n ]\n widgets = {\n \"state\": RadioSelect,\n }\n\n def clean(self):\n cleaned_data = super().clean()\n\n trainee = cleaned_data.get(\"trainee\")\n\n # check if trainee has at least one training task\n training_tasks = trainee.get_training_tasks()\n\n if not training_tasks:\n raise ValidationError(\n \"It's not possible to add training progress \"\n \"to a trainee without any training task.\"\n )\n\n\nclass BulkAddTrainingProgressForm(forms.ModelForm):\n event = forms.ModelChoiceField(\n label=\"Training\",\n required=False,\n queryset=Event.objects.filter(tags__name=\"TTT\"),\n widget=ModelSelect2Widget(data_view=\"ttt-event-lookup\", attrs=SELECT2_SIDEBAR),\n )\n\n trainees = forms.ModelMultipleChoiceField(queryset=Person.objects.all())\n\n requirement = forms.ModelChoiceField(\n queryset=TrainingRequirement.objects.exclude(\n Q(name__startswith=\"SWC\")\n | Q(name__startswith=\"DC\")\n | Q(name__startswith=\"LC\")\n ),\n label=\"Type\",\n required=True,\n )\n\n helper = BootstrapHelper(\n additional_form_class=\"training-progress\",\n submit_label=\"Add\",\n form_tag=False,\n add_cancel_button=False,\n )\n helper.layout = Layout(\n # no 'trainees' -- you should take care of generating it manually in\n # the template where this form is used\n \"requirement\",\n \"state\",\n \"event\",\n \"url\",\n \"notes\",\n )\n\n class Meta:\n model = TrainingProgress\n fields = [\n # no 'trainees'\n \"requirement\",\n \"state\",\n \"event\",\n \"url\",\n \"notes\",\n ]\n widgets = {\n \"state\": RadioSelect,\n \"notes\": TextInput,\n }\n\n def clean(self):\n cleaned_data = super().clean()\n\n trainees = cleaned_data.get(\"trainees\", [])\n\n # check if all trainees have at least one training task\n for trainee in trainees:\n training_tasks = trainee.get_training_tasks()\n\n if not training_tasks:\n raise ValidationError(\n \"It's not possible to add training \"\n \"progress to a trainee without any \"\n \"training task.\"\n )\n\n\nclass BulkDiscardProgressesForm(forms.Form):\n \"\"\"Form used to bulk discard all TrainingProgresses associated with\n selected trainees.\"\"\"\n\n trainees = forms.ModelMultipleChoiceField(queryset=Person.objects.all())\n\n helper = BootstrapHelper(\n add_submit_button=False,\n form_tag=False,\n display_labels=False,\n add_cancel_button=False,\n )\n\n SUBMIT_POPOVER = \"\"\"<p>Discarded progress will be displayed in the following\n way: <span class='badge badge-dark'><strike>Discarded</strike></span>.</p>\n\n <p>If you want to permanently remove records from system,\n click one of the progress labels and, then, click \"delete\" button.</p>\"\"\"\n\n helper.layout = Layout(\n # no 'trainees' -- you should take care of generating it manually in\n # the template where this form is used\n # We use formnovalidate on submit button to disable browser\n # validation. This is necessary because this form is used along with\n # BulkAddTrainingProgressForm, which have required fields. Both forms\n # live inside the same <form> tag. Without this attribute, when you\n # click the following submit button, the browser reports missing\n # values in required fields in BulkAddTrainingProgressForm.\n Submit(\n \"discard\",\n \"Discard all progress of selected trainees\",\n formnovalidate=\"formnovalidate\",\n **{\n \"data-toggle\": \"popover\",\n \"data-trigger\": \"hover\",\n \"data-html\": \"true\",\n \"data-content\": SUBMIT_POPOVER,\n \"css_class\": \"btn btn-warning\",\n },\n ),\n )\n", "path": "amy/trainings/forms.py"}]} | 2,348 | 155 |
gh_patches_debug_20811 | rasdani/github-patches | git_diff | xonsh__xonsh-1516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invalid sys.argv when running a xonsh script
``` sh
$ cat test.py
#!/usr/bin/env python3
import sys
print(sys.argv)
$ ./test.py --hello world
['./test.py', '--hello', 'world']
$ python3 test.py --hello world
['test.py', '--hello', 'world']
$ sed -i s/python3/xonsh/ test.py
$ ./test.py --hello world
['--hello', 'world']
$ xonsh test.py --hello world
['--hello', 'world']
$ execx($(cat test.py))
['/usr/local/bin/xonsh']
$
```
As you can see, program name is not passed into `argv[0]`. This behaviour differs from _every other_ existing programming language, including Python. Thus, none of the option parsing libraries work with default settings, since they try to use `sys.argv[1:]`.
Hoping this is a bug that's going to be fixed, I'm using the following workaround for now:
``` py
if semver.match(re.search(r"([\w.-]+)\W*$", $(xonsh --version)).group(1), "<=0.4.4"):
args = sys.argv
else:
args = sys.argv[1:]
```
By the way, why does `xonsh --version` print a `repr` of a tuple?
</issue>
<code>
[start of xonsh/main.py]
1 # -*- coding: utf-8 -*-
2 """The main xonsh script."""
3 import os
4 import sys
5 import enum
6 import argparse
7 import builtins
8 import contextlib
9
10 from xonsh import __version__
11 from xonsh.lazyasd import lazyobject
12 from xonsh.shell import Shell
13 from xonsh.pretty import pretty
14 from xonsh.proc import HiddenCompletedCommand
15 from xonsh.jobs import ignore_sigtstp
16 from xonsh.tools import setup_win_unicode_console, print_color
17 from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS
18 from xonsh.codecache import run_script_with_cache, run_code_with_cache
19 from xonsh.xonfig import xonfig_main
20 from xonsh.lazyimps import pygments, pyghooks
21
22
23 def get_setproctitle():
24 """Proxy function for loading process title"""
25 try:
26 from setproctitle import setproctitle as spt
27 except ImportError:
28 return
29 return spt
30
31
32 def path_argument(s):
33 """Return a path only if the path is actually legal
34
35 This is very similar to argparse.FileType, except that it doesn't return
36 an open file handle, but rather simply validates the path."""
37
38 s = os.path.abspath(os.path.expanduser(s))
39 if not os.path.isfile(s):
40 msg = '{0!r} must be a valid path to a file'.format(s)
41 raise argparse.ArgumentTypeError(msg)
42 return s
43
44
45 @lazyobject
46 def parser():
47 p = argparse.ArgumentParser(description='xonsh', add_help=False)
48 p.add_argument('-h', '--help',
49 dest='help',
50 action='store_true',
51 default=False,
52 help='show help and exit')
53 p.add_argument('-V', '--version',
54 dest='version',
55 action='store_true',
56 default=False,
57 help='show version information and exit')
58 p.add_argument('-c',
59 help="Run a single command and exit",
60 dest='command',
61 required=False,
62 default=None)
63 p.add_argument('-i', '--interactive',
64 help='force running in interactive mode',
65 dest='force_interactive',
66 action='store_true',
67 default=False)
68 p.add_argument('-l', '--login',
69 help='run as a login shell',
70 dest='login',
71 action='store_true',
72 default=False)
73 p.add_argument('--config-path',
74 help='specify a custom static configuration file',
75 dest='config_path',
76 default=None,
77 type=path_argument)
78 p.add_argument('--no-rc',
79 help="Do not load the .xonshrc files",
80 dest='norc',
81 action='store_true',
82 default=False)
83 p.add_argument('--no-script-cache',
84 help="Do not cache scripts as they are run",
85 dest='scriptcache',
86 action='store_false',
87 default=True)
88 p.add_argument('--cache-everything',
89 help="Use a cache, even for interactive commands",
90 dest='cacheall',
91 action='store_true',
92 default=False)
93 p.add_argument('-D',
94 dest='defines',
95 help='define an environment variable, in the form of '
96 '-DNAME=VAL. May be used many times.',
97 metavar='ITEM',
98 action='append',
99 default=None)
100 p.add_argument('--shell-type',
101 help='What kind of shell should be used. '
102 'Possible options: readline, prompt_toolkit, random. '
103 'Warning! If set this overrides $SHELL_TYPE variable.',
104 dest='shell_type',
105 choices=('readline', 'prompt_toolkit', 'best', 'random'),
106 default=None)
107 p.add_argument('file',
108 metavar='script-file',
109 help='If present, execute the script in script-file'
110 ' and exit',
111 nargs='?',
112 default=None)
113 p.add_argument('args',
114 metavar='args',
115 help='Additional arguments to the script specified '
116 'by script-file',
117 nargs=argparse.REMAINDER,
118 default=[])
119 return p
120
121
122 def _pprint_displayhook(value):
123 if value is None:
124 return
125 builtins._ = None # Set '_' to None to avoid recursion
126 if isinstance(value, HiddenCompletedCommand):
127 builtins._ = value
128 return
129 env = builtins.__xonsh_env__
130 if env.get('PRETTY_PRINT_RESULTS'):
131 printed_val = pretty(value)
132 else:
133 printed_val = repr(value)
134 if HAS_PYGMENTS and env.get('COLOR_RESULTS'):
135 tokens = list(pygments.lex(printed_val, lexer=pyghooks.XonshLexer()))
136 print_color(tokens)
137 else:
138 print(printed_val) # black & white case
139 builtins._ = value
140
141
142 class XonshMode(enum.Enum):
143 single_command = 0
144 script_from_file = 1
145 script_from_stdin = 2
146 interactive = 3
147
148
149 def premain(argv=None):
150 """Setup for main xonsh entry point, returns parsed arguments."""
151 if argv is None:
152 argv = sys.argv[1:]
153 setproctitle = get_setproctitle()
154 if setproctitle is not None:
155 setproctitle(' '.join(['xonsh'] + argv))
156 builtins.__xonsh_ctx__ = {}
157 args = parser.parse_args(argv)
158 if args.help:
159 parser.print_help()
160 parser.exit()
161 if args.version:
162 version = '/'.join(('xonsh', __version__)),
163 print(version)
164 parser.exit()
165 shell_kwargs = {'shell_type': args.shell_type,
166 'completer': False,
167 'login': False,
168 'scriptcache': args.scriptcache,
169 'cacheall': args.cacheall,
170 'ctx': builtins.__xonsh_ctx__}
171 if args.login:
172 shell_kwargs['login'] = True
173 if args.config_path is not None:
174 shell_kwargs['config'] = args.config_path
175 if args.norc:
176 shell_kwargs['rc'] = ()
177 setattr(sys, 'displayhook', _pprint_displayhook)
178 if args.command is not None:
179 args.mode = XonshMode.single_command
180 shell_kwargs['shell_type'] = 'none'
181 elif args.file is not None:
182 args.mode = XonshMode.script_from_file
183 shell_kwargs['shell_type'] = 'none'
184 elif not sys.stdin.isatty() and not args.force_interactive:
185 args.mode = XonshMode.script_from_stdin
186 shell_kwargs['shell_type'] = 'none'
187 else:
188 args.mode = XonshMode.interactive
189 shell_kwargs['completer'] = True
190 shell_kwargs['login'] = True
191 builtins.__xonsh_shell__ = Shell(**shell_kwargs)
192 env = builtins.__xonsh_env__
193 env['XONSH_LOGIN'] = shell_kwargs['login']
194 if args.defines is not None:
195 env.update([x.split('=', 1) for x in args.defines])
196 env['XONSH_INTERACTIVE'] = args.force_interactive
197 if ON_WINDOWS:
198 setup_win_unicode_console(env.get('WIN_UNICODE_CONSOLE', True))
199 return args
200
201
202 def main(argv=None):
203 """Main entry point for xonsh cli."""
204 if argv is None:
205 argv = sys.argv[1:]
206 args = premain(argv)
207 env = builtins.__xonsh_env__
208 shell = builtins.__xonsh_shell__
209 if args.mode == XonshMode.interactive:
210 # enter the shell
211 env['XONSH_INTERACTIVE'] = True
212 ignore_sigtstp()
213 if (env['XONSH_INTERACTIVE'] and
214 not env['LOADED_CONFIG'] and
215 not any(os.path.isfile(i) for i in env['XONSHRC'])):
216 print('Could not find xonsh configuration or run control files.',
217 file=sys.stderr)
218 xonfig_main(['wizard', '--confirm'])
219 shell.shell.cmdloop()
220 elif args.mode == XonshMode.single_command:
221 # run a single command and exit
222 run_code_with_cache(args.command.lstrip(), shell.execer, mode='single')
223 elif args.mode == XonshMode.script_from_file:
224 # run a script contained in a file
225 path = os.path.abspath(os.path.expanduser(args.file))
226 if os.path.isfile(path):
227 sys.argv = args.args
228 env['ARGS'] = [args.file] + args.args
229 env['XONSH_SOURCE'] = path
230 run_script_with_cache(args.file, shell.execer, glb=shell.ctx,
231 loc=None, mode='exec')
232 else:
233 print('xonsh: {0}: No such file or directory.'.format(args.file))
234 elif args.mode == XonshMode.script_from_stdin:
235 # run a script given on stdin
236 code = sys.stdin.read()
237 run_code_with_cache(code, shell.execer, glb=shell.ctx, loc=None,
238 mode='exec')
239 postmain(args)
240
241
242 def postmain(args=None):
243 """Teardown for main xonsh entry point, accepts parsed arguments."""
244 if ON_WINDOWS:
245 setup_win_unicode_console(enable=False)
246 if hasattr(builtins, '__xonsh_shell__'):
247 del builtins.__xonsh_shell__
248
249
250 @contextlib.contextmanager
251 def main_context(argv=None):
252 """Generator that runs pre- and post-main() functions. This has two iterations.
253 The first yields the shell. The second returns None but cleans
254 up the shell.
255 """
256 args = premain(argv)
257 yield builtins.__xonsh_shell__
258 postmain(args)
259
[end of xonsh/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/main.py b/xonsh/main.py
--- a/xonsh/main.py
+++ b/xonsh/main.py
@@ -159,7 +159,7 @@
parser.print_help()
parser.exit()
if args.version:
- version = '/'.join(('xonsh', __version__)),
+ version = '/'.join(('xonsh', __version__))
print(version)
parser.exit()
shell_kwargs = {'shell_type': args.shell_type,
@@ -224,8 +224,8 @@
# run a script contained in a file
path = os.path.abspath(os.path.expanduser(args.file))
if os.path.isfile(path):
- sys.argv = args.args
- env['ARGS'] = [args.file] + args.args
+ sys.argv = [args.file] + args.args
+ env['ARGS'] = sys.argv[:] # $ARGS is not sys.argv
env['XONSH_SOURCE'] = path
run_script_with_cache(args.file, shell.execer, glb=shell.ctx,
loc=None, mode='exec')
| {"golden_diff": "diff --git a/xonsh/main.py b/xonsh/main.py\n--- a/xonsh/main.py\n+++ b/xonsh/main.py\n@@ -159,7 +159,7 @@\n parser.print_help()\n parser.exit()\n if args.version:\n- version = '/'.join(('xonsh', __version__)),\n+ version = '/'.join(('xonsh', __version__))\n print(version)\n parser.exit()\n shell_kwargs = {'shell_type': args.shell_type,\n@@ -224,8 +224,8 @@\n # run a script contained in a file\n path = os.path.abspath(os.path.expanduser(args.file))\n if os.path.isfile(path):\n- sys.argv = args.args\n- env['ARGS'] = [args.file] + args.args\n+ sys.argv = [args.file] + args.args\n+ env['ARGS'] = sys.argv[:] # $ARGS is not sys.argv\n env['XONSH_SOURCE'] = path\n run_script_with_cache(args.file, shell.execer, glb=shell.ctx,\n loc=None, mode='exec')\n", "issue": "Invalid sys.argv when running a xonsh script\n``` sh\n$ cat test.py\n#!/usr/bin/env python3\nimport sys\nprint(sys.argv)\n$ ./test.py --hello world\n['./test.py', '--hello', 'world']\n$ python3 test.py --hello world\n['test.py', '--hello', 'world']\n$ sed -i s/python3/xonsh/ test.py\n$ ./test.py --hello world\n['--hello', 'world']\n$ xonsh test.py --hello world\n['--hello', 'world']\n$ execx($(cat test.py))\n['/usr/local/bin/xonsh']\n$\n```\n\nAs you can see, program name is not passed into `argv[0]`. This behaviour differs from _every other_ existing programming language, including Python. Thus, none of the option parsing libraries work with default settings, since they try to use `sys.argv[1:]`.\nHoping this is a bug that's going to be fixed, I'm using the following workaround for now:\n\n``` py\nif semver.match(re.search(r\"([\\w.-]+)\\W*$\", $(xonsh --version)).group(1), \"<=0.4.4\"):\n args = sys.argv\nelse:\n args = sys.argv[1:]\n```\n\nBy the way, why does `xonsh --version` print a `repr` of a tuple?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"The main xonsh script.\"\"\"\nimport os\nimport sys\nimport enum\nimport argparse\nimport builtins\nimport contextlib\n\nfrom xonsh import __version__\nfrom xonsh.lazyasd import lazyobject\nfrom xonsh.shell import Shell\nfrom xonsh.pretty import pretty\nfrom xonsh.proc import HiddenCompletedCommand\nfrom xonsh.jobs import ignore_sigtstp\nfrom xonsh.tools import setup_win_unicode_console, print_color\nfrom xonsh.platform import HAS_PYGMENTS, ON_WINDOWS\nfrom xonsh.codecache import run_script_with_cache, run_code_with_cache\nfrom xonsh.xonfig import xonfig_main\nfrom xonsh.lazyimps import pygments, pyghooks\n\n\ndef get_setproctitle():\n \"\"\"Proxy function for loading process title\"\"\"\n try:\n from setproctitle import setproctitle as spt\n except ImportError:\n return\n return spt\n\n\ndef path_argument(s):\n \"\"\"Return a path only if the path is actually legal\n\n This is very similar to argparse.FileType, except that it doesn't return\n an open file handle, but rather simply validates the path.\"\"\"\n\n s = os.path.abspath(os.path.expanduser(s))\n if not os.path.isfile(s):\n msg = '{0!r} must be a valid path to a file'.format(s)\n raise argparse.ArgumentTypeError(msg)\n return s\n\n\n@lazyobject\ndef parser():\n p = argparse.ArgumentParser(description='xonsh', add_help=False)\n p.add_argument('-h', '--help',\n dest='help',\n action='store_true',\n default=False,\n help='show help and exit')\n p.add_argument('-V', '--version',\n dest='version',\n action='store_true',\n default=False,\n help='show version information and exit')\n p.add_argument('-c',\n help=\"Run a single command and exit\",\n dest='command',\n required=False,\n default=None)\n p.add_argument('-i', '--interactive',\n help='force running in interactive mode',\n dest='force_interactive',\n action='store_true',\n default=False)\n p.add_argument('-l', '--login',\n help='run as a login shell',\n dest='login',\n action='store_true',\n default=False)\n p.add_argument('--config-path',\n help='specify a custom static configuration file',\n dest='config_path',\n default=None,\n type=path_argument)\n p.add_argument('--no-rc',\n help=\"Do not load the .xonshrc files\",\n dest='norc',\n action='store_true',\n default=False)\n p.add_argument('--no-script-cache',\n help=\"Do not cache scripts as they are run\",\n dest='scriptcache',\n action='store_false',\n default=True)\n p.add_argument('--cache-everything',\n help=\"Use a cache, even for interactive commands\",\n dest='cacheall',\n action='store_true',\n default=False)\n p.add_argument('-D',\n dest='defines',\n help='define an environment variable, in the form of '\n '-DNAME=VAL. May be used many times.',\n metavar='ITEM',\n action='append',\n default=None)\n p.add_argument('--shell-type',\n help='What kind of shell should be used. '\n 'Possible options: readline, prompt_toolkit, random. '\n 'Warning! If set this overrides $SHELL_TYPE variable.',\n dest='shell_type',\n choices=('readline', 'prompt_toolkit', 'best', 'random'),\n default=None)\n p.add_argument('file',\n metavar='script-file',\n help='If present, execute the script in script-file'\n ' and exit',\n nargs='?',\n default=None)\n p.add_argument('args',\n metavar='args',\n help='Additional arguments to the script specified '\n 'by script-file',\n nargs=argparse.REMAINDER,\n default=[])\n return p\n\n\ndef _pprint_displayhook(value):\n if value is None:\n return\n builtins._ = None # Set '_' to None to avoid recursion\n if isinstance(value, HiddenCompletedCommand):\n builtins._ = value\n return\n env = builtins.__xonsh_env__\n if env.get('PRETTY_PRINT_RESULTS'):\n printed_val = pretty(value)\n else:\n printed_val = repr(value)\n if HAS_PYGMENTS and env.get('COLOR_RESULTS'):\n tokens = list(pygments.lex(printed_val, lexer=pyghooks.XonshLexer()))\n print_color(tokens)\n else:\n print(printed_val) # black & white case\n builtins._ = value\n\n\nclass XonshMode(enum.Enum):\n single_command = 0\n script_from_file = 1\n script_from_stdin = 2\n interactive = 3\n\n\ndef premain(argv=None):\n \"\"\"Setup for main xonsh entry point, returns parsed arguments.\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n setproctitle = get_setproctitle()\n if setproctitle is not None:\n setproctitle(' '.join(['xonsh'] + argv))\n builtins.__xonsh_ctx__ = {}\n args = parser.parse_args(argv)\n if args.help:\n parser.print_help()\n parser.exit()\n if args.version:\n version = '/'.join(('xonsh', __version__)),\n print(version)\n parser.exit()\n shell_kwargs = {'shell_type': args.shell_type,\n 'completer': False,\n 'login': False,\n 'scriptcache': args.scriptcache,\n 'cacheall': args.cacheall,\n 'ctx': builtins.__xonsh_ctx__}\n if args.login:\n shell_kwargs['login'] = True\n if args.config_path is not None:\n shell_kwargs['config'] = args.config_path\n if args.norc:\n shell_kwargs['rc'] = ()\n setattr(sys, 'displayhook', _pprint_displayhook)\n if args.command is not None:\n args.mode = XonshMode.single_command\n shell_kwargs['shell_type'] = 'none'\n elif args.file is not None:\n args.mode = XonshMode.script_from_file\n shell_kwargs['shell_type'] = 'none'\n elif not sys.stdin.isatty() and not args.force_interactive:\n args.mode = XonshMode.script_from_stdin\n shell_kwargs['shell_type'] = 'none'\n else:\n args.mode = XonshMode.interactive\n shell_kwargs['completer'] = True\n shell_kwargs['login'] = True\n builtins.__xonsh_shell__ = Shell(**shell_kwargs)\n env = builtins.__xonsh_env__\n env['XONSH_LOGIN'] = shell_kwargs['login']\n if args.defines is not None:\n env.update([x.split('=', 1) for x in args.defines])\n env['XONSH_INTERACTIVE'] = args.force_interactive\n if ON_WINDOWS:\n setup_win_unicode_console(env.get('WIN_UNICODE_CONSOLE', True))\n return args\n\n\ndef main(argv=None):\n \"\"\"Main entry point for xonsh cli.\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n args = premain(argv)\n env = builtins.__xonsh_env__\n shell = builtins.__xonsh_shell__\n if args.mode == XonshMode.interactive:\n # enter the shell\n env['XONSH_INTERACTIVE'] = True\n ignore_sigtstp()\n if (env['XONSH_INTERACTIVE'] and\n not env['LOADED_CONFIG'] and\n not any(os.path.isfile(i) for i in env['XONSHRC'])):\n print('Could not find xonsh configuration or run control files.',\n file=sys.stderr)\n xonfig_main(['wizard', '--confirm'])\n shell.shell.cmdloop()\n elif args.mode == XonshMode.single_command:\n # run a single command and exit\n run_code_with_cache(args.command.lstrip(), shell.execer, mode='single')\n elif args.mode == XonshMode.script_from_file:\n # run a script contained in a file\n path = os.path.abspath(os.path.expanduser(args.file))\n if os.path.isfile(path):\n sys.argv = args.args\n env['ARGS'] = [args.file] + args.args\n env['XONSH_SOURCE'] = path\n run_script_with_cache(args.file, shell.execer, glb=shell.ctx,\n loc=None, mode='exec')\n else:\n print('xonsh: {0}: No such file or directory.'.format(args.file))\n elif args.mode == XonshMode.script_from_stdin:\n # run a script given on stdin\n code = sys.stdin.read()\n run_code_with_cache(code, shell.execer, glb=shell.ctx, loc=None,\n mode='exec')\n postmain(args)\n\n\ndef postmain(args=None):\n \"\"\"Teardown for main xonsh entry point, accepts parsed arguments.\"\"\"\n if ON_WINDOWS:\n setup_win_unicode_console(enable=False)\n if hasattr(builtins, '__xonsh_shell__'):\n del builtins.__xonsh_shell__\n\n\[email protected]\ndef main_context(argv=None):\n \"\"\"Generator that runs pre- and post-main() functions. This has two iterations.\n The first yields the shell. The second returns None but cleans\n up the shell.\n \"\"\"\n args = premain(argv)\n yield builtins.__xonsh_shell__\n postmain(args)\n", "path": "xonsh/main.py"}]} | 3,568 | 245 |
gh_patches_debug_16326 | rasdani/github-patches | git_diff | keras-team__autokeras-1297 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
How use multiple gpu?
### Feature Description
I want to use a single machine with multiple gpu for training, but it seems to have no actual effect### Code Example
```python
with strategy.scope():
```
### Reason
Speed up the calculation of toxins
### Solution
<!---
Please tell us how to implement the feature,
if you have one in mind.
-->
</issue>
<code>
[start of autokeras/engine/tuner.py]
1 # Copyright 2020 The AutoKeras Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import os
17
18 import kerastuner
19 import tensorflow as tf
20 from tensorflow.keras import callbacks as tf_callbacks
21 from tensorflow.keras.layers.experimental import preprocessing
22 from tensorflow.python.util import nest
23
24 from autokeras.utils import utils
25
26
27 class AutoTuner(kerastuner.engine.tuner.Tuner):
28 """A Tuner class based on KerasTuner for AutoKeras.
29
30 Different from KerasTuner's Tuner class. AutoTuner's not only tunes the
31 Hypermodel which can be directly built into a Keras model, but also the
32 preprocessors. Therefore, a HyperGraph stores the overall search space containing
33 both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the
34 PreprocessGraph and KerasGraph with the provided HyperParameters.
35
36 The AutoTuner uses EarlyStopping for acceleration during the search and fully
37 train the model with full epochs and with both training and validation data.
38 The fully trained model is the best model to be used by AutoModel.
39
40 # Arguments
41 preprocessors: An instance or list of `Preprocessor` objects corresponding to
42 each AutoModel input, to preprocess a `tf.data.Dataset` before passing it
43 to the model. Defaults to None (no external preprocessing).
44 **kwargs: The args supported by KerasTuner.
45 """
46
47 def __init__(self, oracle, hypermodel, preprocessors=None, **kwargs):
48 # Initialize before super() for reload to work.
49 self._finished = False
50 super().__init__(oracle, hypermodel, **kwargs)
51 self.preprocessors = nest.flatten(preprocessors)
52 # Save or load the HyperModel.
53 self.hypermodel.hypermodel.save(os.path.join(self.project_dir, "graph"))
54
55 # Override the function to prevent building the model during initialization.
56 def _populate_initial_space(self):
57 pass
58
59 def get_best_model(self):
60 model = self._build_best_model()
61 model.load_weights(self.best_model_path)
62 return model
63
64 def _on_train_begin(self, model, hp, x, *args, **kwargs):
65 """Adapt the preprocessing layers and tune the fit arguments."""
66 self.adapt(model, x)
67
68 @staticmethod
69 def adapt(model, dataset):
70 """Adapt the preprocessing layers in the model."""
71 # Currently, only support using the original dataset to adapt all the
72 # preprocessing layers before the first non-preprocessing layer.
73 # TODO: Use PreprocessingStage for preprocessing layers adapt.
74 # TODO: Use Keras Tuner for preprocessing layers adapt.
75 x = dataset.map(lambda x, y: x)
76
77 def get_output_layer(tensor):
78 tensor = nest.flatten(tensor)[0]
79 for layer in model.layers:
80 if isinstance(layer, tf.keras.layers.InputLayer):
81 continue
82 input_node = nest.flatten(layer.input)[0]
83 if input_node is tensor:
84 return layer
85 return None
86
87 for index, input_node in enumerate(nest.flatten(model.input)):
88 temp_x = x.map(lambda *args: nest.flatten(args)[index])
89 layer = get_output_layer(input_node)
90 while isinstance(layer, preprocessing.PreprocessingLayer):
91 layer.adapt(temp_x)
92 layer = get_output_layer(layer.output)
93 return model
94
95 def search(
96 self, epochs=None, callbacks=None, fit_on_val_data=False, **fit_kwargs
97 ):
98 """Search for the best HyperParameters.
99
100 If there is not early-stopping in the callbacks, the early-stopping callback
101 is injected to accelerate the search process. At the end of the search, the
102 best model will be fully trained with the specified number of epochs.
103
104 # Arguments
105 callbacks: A list of callback functions. Defaults to None.
106 fit_on_val_data: Boolean. Use the training set and validation set for the
107 final fit of the best model.
108 """
109 if self._finished:
110 return
111
112 if callbacks is None:
113 callbacks = []
114
115 # Insert early-stopping for adaptive number of epochs.
116 epochs_provided = True
117 if epochs is None:
118 epochs_provided = False
119 epochs = 1000
120 if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
121 callbacks.append(tf_callbacks.EarlyStopping(patience=10))
122
123 # Insert early-stopping for acceleration.
124 early_stopping_inserted = False
125 new_callbacks = self._deepcopy_callbacks(callbacks)
126 if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
127 early_stopping_inserted = True
128 new_callbacks.append(tf_callbacks.EarlyStopping(patience=10))
129
130 # Populate initial search space.
131 hp = self.oracle.get_space()
132 self.hypermodel.build(hp)
133 self.oracle.update_space(hp)
134
135 super().search(epochs=epochs, callbacks=new_callbacks, **fit_kwargs)
136
137 # Train the best model use validation data.
138 # Train the best model with enought number of epochs.
139 if fit_on_val_data or early_stopping_inserted:
140 copied_fit_kwargs = copy.copy(fit_kwargs)
141
142 # Remove early-stopping since no validation data.
143 # Remove early-stopping since it is inserted.
144 copied_fit_kwargs["callbacks"] = self._remove_early_stopping(callbacks)
145
146 # Decide the number of epochs.
147 copied_fit_kwargs["epochs"] = epochs
148 if not epochs_provided:
149 copied_fit_kwargs["epochs"] = self._get_best_trial_epochs()
150
151 # Concatenate training and validation data.
152 if fit_on_val_data:
153 copied_fit_kwargs["x"] = copied_fit_kwargs["x"].concatenate(
154 fit_kwargs["validation_data"]
155 )
156 copied_fit_kwargs.pop("validation_data")
157
158 model = self.final_fit(**copied_fit_kwargs)
159 else:
160 model = self.get_best_models()[0]
161
162 model.save_weights(self.best_model_path)
163 self._finished = True
164
165 def get_state(self):
166 state = super().get_state()
167 state.update({"finished": self._finished})
168 return state
169
170 def set_state(self, state):
171 super().set_state(state)
172 self._finished = state.get("finished")
173
174 @staticmethod
175 def _remove_early_stopping(callbacks):
176 return [
177 copy.deepcopy(callbacks)
178 for callback in callbacks
179 if not isinstance(callback, tf_callbacks.EarlyStopping)
180 ]
181
182 def _get_best_trial_epochs(self):
183 best_trial = self.oracle.get_best_trials(1)[0]
184 return len(best_trial.metrics.metrics["val_loss"]._observations)
185
186 def _build_best_model(self):
187 best_trial = self.oracle.get_best_trials(1)[0]
188 best_hp = best_trial.hyperparameters
189 return self.hypermodel.build(best_hp)
190
191 def final_fit(self, x=None, **fit_kwargs):
192 model = self._build_best_model()
193 self.adapt(model, x)
194 model.fit(x, **fit_kwargs)
195 return model
196
197 @property
198 def best_model_path(self):
199 return os.path.join(self.project_dir, "best_model")
200
201 @property
202 def objective(self):
203 return self.oracle.objective
204
205 @property
206 def max_trials(self):
207 return self.oracle.max_trials
208
[end of autokeras/engine/tuner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autokeras/engine/tuner.py b/autokeras/engine/tuner.py
--- a/autokeras/engine/tuner.py
+++ b/autokeras/engine/tuner.py
@@ -17,6 +17,7 @@
import kerastuner
import tensorflow as tf
+from kerastuner.engine import hypermodel as hm_module
from tensorflow.keras import callbacks as tf_callbacks
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.python.util import nest
@@ -58,7 +59,8 @@
def get_best_model(self):
model = self._build_best_model()
- model.load_weights(self.best_model_path)
+ with hm_module.maybe_distribute(self.distribution_strategy):
+ model.load_weights(self.best_model_path)
return model
def _on_train_begin(self, model, hp, x, *args, **kwargs):
| {"golden_diff": "diff --git a/autokeras/engine/tuner.py b/autokeras/engine/tuner.py\n--- a/autokeras/engine/tuner.py\n+++ b/autokeras/engine/tuner.py\n@@ -17,6 +17,7 @@\n \n import kerastuner\n import tensorflow as tf\n+from kerastuner.engine import hypermodel as hm_module\n from tensorflow.keras import callbacks as tf_callbacks\n from tensorflow.keras.layers.experimental import preprocessing\n from tensorflow.python.util import nest\n@@ -58,7 +59,8 @@\n \n def get_best_model(self):\n model = self._build_best_model()\n- model.load_weights(self.best_model_path)\n+ with hm_module.maybe_distribute(self.distribution_strategy):\n+ model.load_weights(self.best_model_path)\n return model\n \n def _on_train_begin(self, model, hp, x, *args, **kwargs):\n", "issue": "How use multiple gpu?\n### Feature Description\r\nI want to use a single machine with multiple gpu for training, but it seems to have no actual effect### Code Example\r\n\r\n```python\r\nwith strategy.scope():\r\n```\r\n\r\n### Reason\r\nSpeed up the calculation of toxins\r\n\r\n### Solution\r\n<!---\r\nPlease tell us how to implement the feature,\r\nif you have one in mind.\r\n-->\r\n\n", "before_files": [{"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport os\n\nimport kerastuner\nimport tensorflow as tf\nfrom tensorflow.keras import callbacks as tf_callbacks\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.python.util import nest\n\nfrom autokeras.utils import utils\n\n\nclass AutoTuner(kerastuner.engine.tuner.Tuner):\n \"\"\"A Tuner class based on KerasTuner for AutoKeras.\n\n Different from KerasTuner's Tuner class. AutoTuner's not only tunes the\n Hypermodel which can be directly built into a Keras model, but also the\n preprocessors. Therefore, a HyperGraph stores the overall search space containing\n both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the\n PreprocessGraph and KerasGraph with the provided HyperParameters.\n\n The AutoTuner uses EarlyStopping for acceleration during the search and fully\n train the model with full epochs and with both training and validation data.\n The fully trained model is the best model to be used by AutoModel.\n\n # Arguments\n preprocessors: An instance or list of `Preprocessor` objects corresponding to\n each AutoModel input, to preprocess a `tf.data.Dataset` before passing it\n to the model. Defaults to None (no external preprocessing).\n **kwargs: The args supported by KerasTuner.\n \"\"\"\n\n def __init__(self, oracle, hypermodel, preprocessors=None, **kwargs):\n # Initialize before super() for reload to work.\n self._finished = False\n super().__init__(oracle, hypermodel, **kwargs)\n self.preprocessors = nest.flatten(preprocessors)\n # Save or load the HyperModel.\n self.hypermodel.hypermodel.save(os.path.join(self.project_dir, \"graph\"))\n\n # Override the function to prevent building the model during initialization.\n def _populate_initial_space(self):\n pass\n\n def get_best_model(self):\n model = self._build_best_model()\n model.load_weights(self.best_model_path)\n return model\n\n def _on_train_begin(self, model, hp, x, *args, **kwargs):\n \"\"\"Adapt the preprocessing layers and tune the fit arguments.\"\"\"\n self.adapt(model, x)\n\n @staticmethod\n def adapt(model, dataset):\n \"\"\"Adapt the preprocessing layers in the model.\"\"\"\n # Currently, only support using the original dataset to adapt all the\n # preprocessing layers before the first non-preprocessing layer.\n # TODO: Use PreprocessingStage for preprocessing layers adapt.\n # TODO: Use Keras Tuner for preprocessing layers adapt.\n x = dataset.map(lambda x, y: x)\n\n def get_output_layer(tensor):\n tensor = nest.flatten(tensor)[0]\n for layer in model.layers:\n if isinstance(layer, tf.keras.layers.InputLayer):\n continue\n input_node = nest.flatten(layer.input)[0]\n if input_node is tensor:\n return layer\n return None\n\n for index, input_node in enumerate(nest.flatten(model.input)):\n temp_x = x.map(lambda *args: nest.flatten(args)[index])\n layer = get_output_layer(input_node)\n while isinstance(layer, preprocessing.PreprocessingLayer):\n layer.adapt(temp_x)\n layer = get_output_layer(layer.output)\n return model\n\n def search(\n self, epochs=None, callbacks=None, fit_on_val_data=False, **fit_kwargs\n ):\n \"\"\"Search for the best HyperParameters.\n\n If there is not early-stopping in the callbacks, the early-stopping callback\n is injected to accelerate the search process. At the end of the search, the\n best model will be fully trained with the specified number of epochs.\n\n # Arguments\n callbacks: A list of callback functions. Defaults to None.\n fit_on_val_data: Boolean. Use the training set and validation set for the\n final fit of the best model.\n \"\"\"\n if self._finished:\n return\n\n if callbacks is None:\n callbacks = []\n\n # Insert early-stopping for adaptive number of epochs.\n epochs_provided = True\n if epochs is None:\n epochs_provided = False\n epochs = 1000\n if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):\n callbacks.append(tf_callbacks.EarlyStopping(patience=10))\n\n # Insert early-stopping for acceleration.\n early_stopping_inserted = False\n new_callbacks = self._deepcopy_callbacks(callbacks)\n if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):\n early_stopping_inserted = True\n new_callbacks.append(tf_callbacks.EarlyStopping(patience=10))\n\n # Populate initial search space.\n hp = self.oracle.get_space()\n self.hypermodel.build(hp)\n self.oracle.update_space(hp)\n\n super().search(epochs=epochs, callbacks=new_callbacks, **fit_kwargs)\n\n # Train the best model use validation data.\n # Train the best model with enought number of epochs.\n if fit_on_val_data or early_stopping_inserted:\n copied_fit_kwargs = copy.copy(fit_kwargs)\n\n # Remove early-stopping since no validation data.\n # Remove early-stopping since it is inserted.\n copied_fit_kwargs[\"callbacks\"] = self._remove_early_stopping(callbacks)\n\n # Decide the number of epochs.\n copied_fit_kwargs[\"epochs\"] = epochs\n if not epochs_provided:\n copied_fit_kwargs[\"epochs\"] = self._get_best_trial_epochs()\n\n # Concatenate training and validation data.\n if fit_on_val_data:\n copied_fit_kwargs[\"x\"] = copied_fit_kwargs[\"x\"].concatenate(\n fit_kwargs[\"validation_data\"]\n )\n copied_fit_kwargs.pop(\"validation_data\")\n\n model = self.final_fit(**copied_fit_kwargs)\n else:\n model = self.get_best_models()[0]\n\n model.save_weights(self.best_model_path)\n self._finished = True\n\n def get_state(self):\n state = super().get_state()\n state.update({\"finished\": self._finished})\n return state\n\n def set_state(self, state):\n super().set_state(state)\n self._finished = state.get(\"finished\")\n\n @staticmethod\n def _remove_early_stopping(callbacks):\n return [\n copy.deepcopy(callbacks)\n for callback in callbacks\n if not isinstance(callback, tf_callbacks.EarlyStopping)\n ]\n\n def _get_best_trial_epochs(self):\n best_trial = self.oracle.get_best_trials(1)[0]\n return len(best_trial.metrics.metrics[\"val_loss\"]._observations)\n\n def _build_best_model(self):\n best_trial = self.oracle.get_best_trials(1)[0]\n best_hp = best_trial.hyperparameters\n return self.hypermodel.build(best_hp)\n\n def final_fit(self, x=None, **fit_kwargs):\n model = self._build_best_model()\n self.adapt(model, x)\n model.fit(x, **fit_kwargs)\n return model\n\n @property\n def best_model_path(self):\n return os.path.join(self.project_dir, \"best_model\")\n\n @property\n def objective(self):\n return self.oracle.objective\n\n @property\n def max_trials(self):\n return self.oracle.max_trials\n", "path": "autokeras/engine/tuner.py"}]} | 2,824 | 191 |
gh_patches_debug_7485 | rasdani/github-patches | git_diff | kornia__kornia-2897 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RANSAC.max_samples_by_conf returns negative numbers
### Describe the bug
This code fails
`
from kornia.geometry import RANSAC
x = RANSAC.max_samples_by_conf(n_inl=100, num_tc=1000, sample_size=7, conf=0.99)
assert x > 0.0
`
### Reproduction steps
```bash
from kornia.geometry import RANSAC
x = RANSAC.max_samples_by_conf(n_inl=100, num_tc=1000, sample_size=7, conf=0.99)
assert x > 0.0
```
### Expected behavior
The returned value should always be positive
### Environment
```shell
Environment agnostic
```
### Additional context
a one-line bugfix will shortly follow
</issue>
<code>
[start of kornia/geometry/ransac.py]
1 """Module containing RANSAC modules."""
2
3 import math
4 from functools import partial
5 from typing import Callable, Optional, Tuple
6
7 import torch
8
9 from kornia.core import Device, Module, Tensor, zeros
10 from kornia.core.check import KORNIA_CHECK_SHAPE
11 from kornia.geometry import (
12 find_fundamental,
13 find_homography_dlt,
14 find_homography_dlt_iterated,
15 find_homography_lines_dlt,
16 find_homography_lines_dlt_iterated,
17 symmetrical_epipolar_distance,
18 )
19 from kornia.geometry.homography import (
20 line_segment_transfer_error_one_way,
21 oneway_transfer_error,
22 sample_is_valid_for_homography,
23 )
24
25 __all__ = ["RANSAC"]
26
27
28 class RANSAC(Module):
29 """Module for robust geometry estimation with RANSAC. https://en.wikipedia.org/wiki/Random_sample_consensus.
30
31 Args:
32 model_type: type of model to estimate: "homography", "fundamental", "fundamental_7pt",
33 "homography_from_linesegments".
34 inliers_threshold: threshold for the correspondence to be an inlier.
35 batch_size: number of generated samples at once.
36 max_iterations: maximum batches to generate. Actual number of models to try is ``batch_size * max_iterations``.
37 confidence: desired confidence of the result, used for the early stopping.
38 max_local_iterations: number of local optimization (polishing) iterations.
39 """
40
41 def __init__(
42 self,
43 model_type: str = "homography",
44 inl_th: float = 2.0,
45 batch_size: int = 2048,
46 max_iter: int = 10,
47 confidence: float = 0.99,
48 max_lo_iters: int = 5,
49 ) -> None:
50 super().__init__()
51 self.supported_models = ["homography", "fundamental", "fundamental_7pt", "homography_from_linesegments"]
52 self.inl_th = inl_th
53 self.max_iter = max_iter
54 self.batch_size = batch_size
55 self.model_type = model_type
56 self.confidence = confidence
57 self.max_lo_iters = max_lo_iters
58 self.model_type = model_type
59
60 self.error_fn: Callable[..., Tensor]
61 self.minimal_solver: Callable[..., Tensor]
62 self.polisher_solver: Callable[..., Tensor]
63
64 if model_type == "homography":
65 self.error_fn = oneway_transfer_error
66 self.minimal_solver = find_homography_dlt
67 self.polisher_solver = find_homography_dlt_iterated
68 self.minimal_sample_size = 4
69 elif model_type == "homography_from_linesegments":
70 self.error_fn = line_segment_transfer_error_one_way
71 self.minimal_solver = find_homography_lines_dlt
72 self.polisher_solver = find_homography_lines_dlt_iterated
73 self.minimal_sample_size = 4
74 elif model_type == "fundamental":
75 self.error_fn = symmetrical_epipolar_distance
76 self.minimal_solver = find_fundamental
77 self.minimal_sample_size = 8
78 self.polisher_solver = find_fundamental
79 elif model_type == "fundamental_7pt":
80 self.error_fn = symmetrical_epipolar_distance
81 self.minimal_solver = partial(find_fundamental, method="7POINT")
82 self.minimal_sample_size = 7
83 self.polisher_solver = find_fundamental
84 else:
85 raise NotImplementedError(f"{model_type} is unknown. Try one of {self.supported_models}")
86
87 def sample(self, sample_size: int, pop_size: int, batch_size: int, device: Device = torch.device("cpu")) -> Tensor:
88 """Minimal sampler, but unlike traditional RANSAC we sample in batches to get benefit of the parallel
89 processing, esp.
90
91 on GPU.
92 """
93 rand = torch.rand(batch_size, pop_size, device=device)
94 _, out = rand.topk(k=sample_size, dim=1)
95 return out
96
97 @staticmethod
98 def max_samples_by_conf(n_inl: int, num_tc: int, sample_size: int, conf: float) -> float:
99 """Formula to update max_iter in order to stop iterations earlier
100 https://en.wikipedia.org/wiki/Random_sample_consensus."""
101 eps = 1e-9
102 if num_tc <= sample_size:
103 return 1.0
104 if n_inl == num_tc:
105 return 1.0
106 return math.log(1.0 - conf) / max(eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))
107
108 def estimate_model_from_minsample(self, kp1: Tensor, kp2: Tensor) -> Tensor:
109 batch_size, sample_size = kp1.shape[:2]
110 H = self.minimal_solver(kp1, kp2, torch.ones(batch_size, sample_size, dtype=kp1.dtype, device=kp1.device))
111 return H
112
113 def verify(self, kp1: Tensor, kp2: Tensor, models: Tensor, inl_th: float) -> Tuple[Tensor, Tensor, float]:
114 if len(kp1.shape) == 2:
115 kp1 = kp1[None]
116 if len(kp2.shape) == 2:
117 kp2 = kp2[None]
118 batch_size = models.shape[0]
119 if self.model_type == "homography_from_linesegments":
120 errors = self.error_fn(kp1.expand(batch_size, -1, 2, 2), kp2.expand(batch_size, -1, 2, 2), models)
121 else:
122 errors = self.error_fn(kp1.expand(batch_size, -1, 2), kp2.expand(batch_size, -1, 2), models)
123 inl = errors <= inl_th
124 models_score = inl.to(kp1).sum(dim=1)
125 best_model_idx = models_score.argmax()
126 best_model_score = models_score[best_model_idx].item()
127 model_best = models[best_model_idx].clone()
128 inliers_best = inl[best_model_idx]
129 return model_best, inliers_best, best_model_score
130
131 def remove_bad_samples(self, kp1: Tensor, kp2: Tensor) -> Tuple[Tensor, Tensor]:
132 """"""
133 # ToDo: add (model-specific) verification of the samples,
134 # E.g. constraints on not to be a degenerate sample
135 if self.model_type == "homography":
136 mask = sample_is_valid_for_homography(kp1, kp2)
137 return kp1[mask], kp2[mask]
138 return kp1, kp2
139
140 def remove_bad_models(self, models: Tensor) -> Tensor:
141 # ToDo: add more and better degenerate model rejection
142 # For now it is simple and hardcoded
143 main_diagonal = torch.diagonal(models, dim1=1, dim2=2)
144 mask = main_diagonal.abs().min(dim=1)[0] > 1e-4
145 return models[mask]
146
147 def polish_model(self, kp1: Tensor, kp2: Tensor, inliers: Tensor) -> Tensor:
148 # TODO: Replace this with MAGSAC++ polisher
149 kp1_inl = kp1[inliers][None]
150 kp2_inl = kp2[inliers][None]
151 num_inl = kp1_inl.size(1)
152 model = self.polisher_solver(
153 kp1_inl, kp2_inl, torch.ones(1, num_inl, dtype=kp1_inl.dtype, device=kp1_inl.device)
154 )
155 return model
156
157 def validate_inputs(self, kp1: Tensor, kp2: Tensor, weights: Optional[Tensor] = None) -> None:
158 if self.model_type in ["homography", "fundamental"]:
159 KORNIA_CHECK_SHAPE(kp1, ["N", "2"])
160 KORNIA_CHECK_SHAPE(kp2, ["N", "2"])
161 if not (kp1.shape[0] == kp2.shape[0]) or (kp1.shape[0] < self.minimal_sample_size):
162 raise ValueError(
163 "kp1 and kp2 should be equal shape at at least"
164 f" [{self.minimal_sample_size}, 2], got {kp1.shape}, {kp2.shape}"
165 )
166 if self.model_type == "homography_from_linesegments":
167 KORNIA_CHECK_SHAPE(kp1, ["N", "2", "2"])
168 KORNIA_CHECK_SHAPE(kp2, ["N", "2", "2"])
169 if not (kp1.shape[0] == kp2.shape[0]) or (kp1.shape[0] < self.minimal_sample_size):
170 raise ValueError(
171 "kp1 and kp2 should be equal shape at at least"
172 f" [{self.minimal_sample_size}, 2, 2], got {kp1.shape},"
173 f" {kp2.shape}"
174 )
175
176 def forward(self, kp1: Tensor, kp2: Tensor, weights: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
177 r"""Main forward method to execute the RANSAC algorithm.
178
179 Args:
180 kp1: source image keypoints :math:`(N, 2)`.
181 kp2: distance image keypoints :math:`(N, 2)`.
182 weights: optional correspondences weights. Not used now.
183
184 Returns:
185 - Estimated model, shape of :math:`(1, 3, 3)`.
186 - The inlier/outlier mask, shape of :math:`(1, N)`, where N is number of input correspondences.
187 """
188 self.validate_inputs(kp1, kp2, weights)
189 best_score_total: float = float(self.minimal_sample_size)
190 num_tc: int = len(kp1)
191 best_model_total = zeros(3, 3, dtype=kp1.dtype, device=kp1.device)
192 inliers_best_total: Tensor = zeros(num_tc, 1, device=kp1.device, dtype=torch.bool)
193 for i in range(self.max_iter):
194 # Sample minimal samples in batch to estimate models
195 idxs = self.sample(self.minimal_sample_size, num_tc, self.batch_size, kp1.device)
196 kp1_sampled = kp1[idxs]
197 kp2_sampled = kp2[idxs]
198
199 kp1_sampled, kp2_sampled = self.remove_bad_samples(kp1_sampled, kp2_sampled)
200 if len(kp1_sampled) == 0:
201 continue
202 # Estimate models
203 models = self.estimate_model_from_minsample(kp1_sampled, kp2_sampled)
204 models = self.remove_bad_models(models)
205 if (models is None) or (len(models) == 0):
206 continue
207 # Score the models and select the best one
208 model, inliers, model_score = self.verify(kp1, kp2, models, self.inl_th)
209 # Store far-the-best model and (optionally) do a local optimization
210 if model_score > best_score_total:
211 # Local optimization
212 for lo_step in range(self.max_lo_iters):
213 model_lo = self.polish_model(kp1, kp2, inliers)
214 if (model_lo is None) or (len(model_lo) == 0):
215 continue
216 _, inliers_lo, score_lo = self.verify(kp1, kp2, model_lo, self.inl_th)
217 # print (f"Orig score = {best_model_score}, LO score = {score_lo} TC={num_tc}")
218 if score_lo > model_score:
219 model = model_lo.clone()[0]
220 inliers = inliers_lo.clone()
221 model_score = score_lo
222 else:
223 break
224 # Now storing the best model
225 best_model_total = model.clone()
226 inliers_best_total = inliers.clone()
227 best_score_total = model_score
228
229 # Should we already stop?
230 new_max_iter = int(
231 self.max_samples_by_conf(int(best_score_total), num_tc, self.minimal_sample_size, self.confidence)
232 )
233 # print (f"New max_iter = {new_max_iter}")
234 # Stop estimation, if the model is very good
235 if (i + 1) * self.batch_size >= new_max_iter:
236 break
237 # local optimization with all inliers for better precision
238 return best_model_total, inliers_best_total
239
[end of kornia/geometry/ransac.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/geometry/ransac.py b/kornia/geometry/ransac.py
--- a/kornia/geometry/ransac.py
+++ b/kornia/geometry/ransac.py
@@ -103,7 +103,7 @@
return 1.0
if n_inl == num_tc:
return 1.0
- return math.log(1.0 - conf) / max(eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))
+ return math.log(1.0 - conf) / min(-eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))
def estimate_model_from_minsample(self, kp1: Tensor, kp2: Tensor) -> Tensor:
batch_size, sample_size = kp1.shape[:2]
| {"golden_diff": "diff --git a/kornia/geometry/ransac.py b/kornia/geometry/ransac.py\n--- a/kornia/geometry/ransac.py\n+++ b/kornia/geometry/ransac.py\n@@ -103,7 +103,7 @@\n return 1.0\n if n_inl == num_tc:\n return 1.0\n- return math.log(1.0 - conf) / max(eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))\n+ return math.log(1.0 - conf) / min(-eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))\n \n def estimate_model_from_minsample(self, kp1: Tensor, kp2: Tensor) -> Tensor:\n batch_size, sample_size = kp1.shape[:2]\n", "issue": "RANSAC.max_samples_by_conf returns negative numbers\n### Describe the bug\n\nThis code fails\r\n\r\n`\r\nfrom kornia.geometry import RANSAC\r\n\r\nx = RANSAC.max_samples_by_conf(n_inl=100, num_tc=1000, sample_size=7, conf=0.99)\r\nassert x > 0.0\r\n`\r\n\n\n### Reproduction steps\n\n```bash\nfrom kornia.geometry import RANSAC\r\n\r\nx = RANSAC.max_samples_by_conf(n_inl=100, num_tc=1000, sample_size=7, conf=0.99)\r\nassert x > 0.0\n```\n\n\n### Expected behavior\n\nThe returned value should always be positive\n\n### Environment\n\n```shell\nEnvironment agnostic\n```\n\n\n### Additional context\n\na one-line bugfix will shortly follow\n", "before_files": [{"content": "\"\"\"Module containing RANSAC modules.\"\"\"\n\nimport math\nfrom functools import partial\nfrom typing import Callable, Optional, Tuple\n\nimport torch\n\nfrom kornia.core import Device, Module, Tensor, zeros\nfrom kornia.core.check import KORNIA_CHECK_SHAPE\nfrom kornia.geometry import (\n find_fundamental,\n find_homography_dlt,\n find_homography_dlt_iterated,\n find_homography_lines_dlt,\n find_homography_lines_dlt_iterated,\n symmetrical_epipolar_distance,\n)\nfrom kornia.geometry.homography import (\n line_segment_transfer_error_one_way,\n oneway_transfer_error,\n sample_is_valid_for_homography,\n)\n\n__all__ = [\"RANSAC\"]\n\n\nclass RANSAC(Module):\n \"\"\"Module for robust geometry estimation with RANSAC. https://en.wikipedia.org/wiki/Random_sample_consensus.\n\n Args:\n model_type: type of model to estimate: \"homography\", \"fundamental\", \"fundamental_7pt\",\n \"homography_from_linesegments\".\n inliers_threshold: threshold for the correspondence to be an inlier.\n batch_size: number of generated samples at once.\n max_iterations: maximum batches to generate. Actual number of models to try is ``batch_size * max_iterations``.\n confidence: desired confidence of the result, used for the early stopping.\n max_local_iterations: number of local optimization (polishing) iterations.\n \"\"\"\n\n def __init__(\n self,\n model_type: str = \"homography\",\n inl_th: float = 2.0,\n batch_size: int = 2048,\n max_iter: int = 10,\n confidence: float = 0.99,\n max_lo_iters: int = 5,\n ) -> None:\n super().__init__()\n self.supported_models = [\"homography\", \"fundamental\", \"fundamental_7pt\", \"homography_from_linesegments\"]\n self.inl_th = inl_th\n self.max_iter = max_iter\n self.batch_size = batch_size\n self.model_type = model_type\n self.confidence = confidence\n self.max_lo_iters = max_lo_iters\n self.model_type = model_type\n\n self.error_fn: Callable[..., Tensor]\n self.minimal_solver: Callable[..., Tensor]\n self.polisher_solver: Callable[..., Tensor]\n\n if model_type == \"homography\":\n self.error_fn = oneway_transfer_error\n self.minimal_solver = find_homography_dlt\n self.polisher_solver = find_homography_dlt_iterated\n self.minimal_sample_size = 4\n elif model_type == \"homography_from_linesegments\":\n self.error_fn = line_segment_transfer_error_one_way\n self.minimal_solver = find_homography_lines_dlt\n self.polisher_solver = find_homography_lines_dlt_iterated\n self.minimal_sample_size = 4\n elif model_type == \"fundamental\":\n self.error_fn = symmetrical_epipolar_distance\n self.minimal_solver = find_fundamental\n self.minimal_sample_size = 8\n self.polisher_solver = find_fundamental\n elif model_type == \"fundamental_7pt\":\n self.error_fn = symmetrical_epipolar_distance\n self.minimal_solver = partial(find_fundamental, method=\"7POINT\")\n self.minimal_sample_size = 7\n self.polisher_solver = find_fundamental\n else:\n raise NotImplementedError(f\"{model_type} is unknown. Try one of {self.supported_models}\")\n\n def sample(self, sample_size: int, pop_size: int, batch_size: int, device: Device = torch.device(\"cpu\")) -> Tensor:\n \"\"\"Minimal sampler, but unlike traditional RANSAC we sample in batches to get benefit of the parallel\n processing, esp.\n\n on GPU.\n \"\"\"\n rand = torch.rand(batch_size, pop_size, device=device)\n _, out = rand.topk(k=sample_size, dim=1)\n return out\n\n @staticmethod\n def max_samples_by_conf(n_inl: int, num_tc: int, sample_size: int, conf: float) -> float:\n \"\"\"Formula to update max_iter in order to stop iterations earlier\n https://en.wikipedia.org/wiki/Random_sample_consensus.\"\"\"\n eps = 1e-9\n if num_tc <= sample_size:\n return 1.0\n if n_inl == num_tc:\n return 1.0\n return math.log(1.0 - conf) / max(eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))\n\n def estimate_model_from_minsample(self, kp1: Tensor, kp2: Tensor) -> Tensor:\n batch_size, sample_size = kp1.shape[:2]\n H = self.minimal_solver(kp1, kp2, torch.ones(batch_size, sample_size, dtype=kp1.dtype, device=kp1.device))\n return H\n\n def verify(self, kp1: Tensor, kp2: Tensor, models: Tensor, inl_th: float) -> Tuple[Tensor, Tensor, float]:\n if len(kp1.shape) == 2:\n kp1 = kp1[None]\n if len(kp2.shape) == 2:\n kp2 = kp2[None]\n batch_size = models.shape[0]\n if self.model_type == \"homography_from_linesegments\":\n errors = self.error_fn(kp1.expand(batch_size, -1, 2, 2), kp2.expand(batch_size, -1, 2, 2), models)\n else:\n errors = self.error_fn(kp1.expand(batch_size, -1, 2), kp2.expand(batch_size, -1, 2), models)\n inl = errors <= inl_th\n models_score = inl.to(kp1).sum(dim=1)\n best_model_idx = models_score.argmax()\n best_model_score = models_score[best_model_idx].item()\n model_best = models[best_model_idx].clone()\n inliers_best = inl[best_model_idx]\n return model_best, inliers_best, best_model_score\n\n def remove_bad_samples(self, kp1: Tensor, kp2: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"\"\"\"\n # ToDo: add (model-specific) verification of the samples,\n # E.g. constraints on not to be a degenerate sample\n if self.model_type == \"homography\":\n mask = sample_is_valid_for_homography(kp1, kp2)\n return kp1[mask], kp2[mask]\n return kp1, kp2\n\n def remove_bad_models(self, models: Tensor) -> Tensor:\n # ToDo: add more and better degenerate model rejection\n # For now it is simple and hardcoded\n main_diagonal = torch.diagonal(models, dim1=1, dim2=2)\n mask = main_diagonal.abs().min(dim=1)[0] > 1e-4\n return models[mask]\n\n def polish_model(self, kp1: Tensor, kp2: Tensor, inliers: Tensor) -> Tensor:\n # TODO: Replace this with MAGSAC++ polisher\n kp1_inl = kp1[inliers][None]\n kp2_inl = kp2[inliers][None]\n num_inl = kp1_inl.size(1)\n model = self.polisher_solver(\n kp1_inl, kp2_inl, torch.ones(1, num_inl, dtype=kp1_inl.dtype, device=kp1_inl.device)\n )\n return model\n\n def validate_inputs(self, kp1: Tensor, kp2: Tensor, weights: Optional[Tensor] = None) -> None:\n if self.model_type in [\"homography\", \"fundamental\"]:\n KORNIA_CHECK_SHAPE(kp1, [\"N\", \"2\"])\n KORNIA_CHECK_SHAPE(kp2, [\"N\", \"2\"])\n if not (kp1.shape[0] == kp2.shape[0]) or (kp1.shape[0] < self.minimal_sample_size):\n raise ValueError(\n \"kp1 and kp2 should be equal shape at at least\"\n f\" [{self.minimal_sample_size}, 2], got {kp1.shape}, {kp2.shape}\"\n )\n if self.model_type == \"homography_from_linesegments\":\n KORNIA_CHECK_SHAPE(kp1, [\"N\", \"2\", \"2\"])\n KORNIA_CHECK_SHAPE(kp2, [\"N\", \"2\", \"2\"])\n if not (kp1.shape[0] == kp2.shape[0]) or (kp1.shape[0] < self.minimal_sample_size):\n raise ValueError(\n \"kp1 and kp2 should be equal shape at at least\"\n f\" [{self.minimal_sample_size}, 2, 2], got {kp1.shape},\"\n f\" {kp2.shape}\"\n )\n\n def forward(self, kp1: Tensor, kp2: Tensor, weights: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:\n r\"\"\"Main forward method to execute the RANSAC algorithm.\n\n Args:\n kp1: source image keypoints :math:`(N, 2)`.\n kp2: distance image keypoints :math:`(N, 2)`.\n weights: optional correspondences weights. Not used now.\n\n Returns:\n - Estimated model, shape of :math:`(1, 3, 3)`.\n - The inlier/outlier mask, shape of :math:`(1, N)`, where N is number of input correspondences.\n \"\"\"\n self.validate_inputs(kp1, kp2, weights)\n best_score_total: float = float(self.minimal_sample_size)\n num_tc: int = len(kp1)\n best_model_total = zeros(3, 3, dtype=kp1.dtype, device=kp1.device)\n inliers_best_total: Tensor = zeros(num_tc, 1, device=kp1.device, dtype=torch.bool)\n for i in range(self.max_iter):\n # Sample minimal samples in batch to estimate models\n idxs = self.sample(self.minimal_sample_size, num_tc, self.batch_size, kp1.device)\n kp1_sampled = kp1[idxs]\n kp2_sampled = kp2[idxs]\n\n kp1_sampled, kp2_sampled = self.remove_bad_samples(kp1_sampled, kp2_sampled)\n if len(kp1_sampled) == 0:\n continue\n # Estimate models\n models = self.estimate_model_from_minsample(kp1_sampled, kp2_sampled)\n models = self.remove_bad_models(models)\n if (models is None) or (len(models) == 0):\n continue\n # Score the models and select the best one\n model, inliers, model_score = self.verify(kp1, kp2, models, self.inl_th)\n # Store far-the-best model and (optionally) do a local optimization\n if model_score > best_score_total:\n # Local optimization\n for lo_step in range(self.max_lo_iters):\n model_lo = self.polish_model(kp1, kp2, inliers)\n if (model_lo is None) or (len(model_lo) == 0):\n continue\n _, inliers_lo, score_lo = self.verify(kp1, kp2, model_lo, self.inl_th)\n # print (f\"Orig score = {best_model_score}, LO score = {score_lo} TC={num_tc}\")\n if score_lo > model_score:\n model = model_lo.clone()[0]\n inliers = inliers_lo.clone()\n model_score = score_lo\n else:\n break\n # Now storing the best model\n best_model_total = model.clone()\n inliers_best_total = inliers.clone()\n best_score_total = model_score\n\n # Should we already stop?\n new_max_iter = int(\n self.max_samples_by_conf(int(best_score_total), num_tc, self.minimal_sample_size, self.confidence)\n )\n # print (f\"New max_iter = {new_max_iter}\")\n # Stop estimation, if the model is very good\n if (i + 1) * self.batch_size >= new_max_iter:\n break\n # local optimization with all inliers for better precision\n return best_model_total, inliers_best_total\n", "path": "kornia/geometry/ransac.py"}]} | 4,038 | 202 |
gh_patches_debug_37148 | rasdani/github-patches | git_diff | gratipay__gratipay.com-4655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify mailing list opt-in even further
In #4651 we dropped back to two mailing list options, quarterly and yearly. However, as I think through the way #BackTheStack is gonna wanna play out, we are going to want to send multiple updates over the course of the campaign. That's not consistent with a "quarterly" or "yearly" newsletter. I think our best bet is to just have a simple "Yes/No" on the mailing list.
</issue>
<code>
[start of gratipay/models/payment_for_open_source.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import gratipay
5 from uuid import uuid4
6 from postgres.orm import Model
7
8
9 class PaymentForOpenSource(Model):
10
11 typname = "payments_for_open_source"
12
13 def __repr__(self):
14 return '<PaymentForOpenSource: {}>'.format(repr(self.amount))
15
16
17 @property
18 def succeeded(self):
19 return self.braintree_result_message == ''
20
21
22 @property
23 def invoice_url(self):
24 if not self.succeeded:
25 return None
26 return '{}/browse/payments/{}/invoice.html'.format(gratipay.base_url, self.uuid)
27
28
29 @classmethod
30 def from_uuid(cls, uuid, cursor=None):
31 """Take a uuid and return an object.
32 """
33 return (cursor or cls.db).one("""
34 SELECT pfos.*::payments_for_open_source
35 FROM payments_for_open_source pfos
36 WHERE uuid = %s
37 """, (uuid,))
38
39
40 @classmethod
41 def insert(cls, amount, grateful_for, name, follow_up, email_address,
42 promotion_name, promotion_url, promotion_twitter, promotion_message,
43 cursor=None):
44 """Take baseline info and insert into the database.
45 """
46 uuid = uuid4().hex
47 return (cursor or cls.db).one("""
48 INSERT INTO payments_for_open_source
49 (uuid, amount, grateful_for, name, follow_up, email_address,
50 promotion_name, promotion_url, promotion_twitter, promotion_message)
51 VALUES (%s, %s, %s, %s, %s, %s,
52 %s, %s, %s, %s)
53 RETURNING payments_for_open_source.*::payments_for_open_source
54 """, (uuid, amount, grateful_for, name, follow_up, email_address,
55 promotion_name, promotion_url, promotion_twitter, promotion_message))
56
57
58 def process_result(self, result):
59 """Take a Braintree API result and update the database.
60 """
61 result_message = '' if result.is_success else result.message
62 transaction_id = None
63 if result.transaction:
64 transaction_id = result.transaction.id
65
66 # Verify that Braintree is sending us the right payload.
67 # TODO This is hard to test and it should be a pretty tight guarantee,
68 # so I am commenting out for now. :(
69 #pfos_uuid = result.transaction.custom_fields['pfos_uuid']
70 #assert pfos_uuid == self.uuid, (pfos_uuid, transaction_id)
71
72 self.db.run("""
73 UPDATE payments_for_open_source
74 SET braintree_result_message=%s
75 , braintree_transaction_id=%s
76 WHERE uuid=%s
77 """, (result_message, transaction_id, self.uuid))
78 self.set_attributes( braintree_result_message=result_message
79 , braintree_transaction_id=transaction_id
80 )
81
[end of gratipay/models/payment_for_open_source.py]
[start of gratipay/homepage.py]
1 # -*- coding: utf-8 -*-
2 """This is the Python library behind gratipay.com.
3 """
4 from __future__ import absolute_import, division, print_function, unicode_literals
5
6 from gratipay import utils
7 from gratipay.models.payment_for_open_source import PaymentForOpenSource
8
9
10 kB = 2**10
11
12
13 def _parse(raw):
14 """Given a POST request.body, return (parsed<dict>, errors<list>).
15 """
16
17 errors = []
18 x = lambda f: raw[f].strip() # KeyError -> 400
19
20 # payment info
21 amount = x('amount')
22 if (not amount.isdigit()) or (int(amount) < 10):
23 errors.append('amount')
24 amount = ''.join(x for x in amount.split('.')[0] if x.isdigit())
25
26 payment_method_nonce = x('payment_method_nonce')
27 if len(payment_method_nonce) > 36:
28 errors.append('payment_method_nonce')
29 payment_method_nonce = ''
30
31 # ecosystems/packages
32 grateful_for = x('grateful_for')
33 if len(grateful_for) > 16*kB:
34 grateful_for = grateful_for[:16*kB]
35 errors.append('grateful_for')
36
37 # contact info
38 name = x('name')
39 if len(name) > 255:
40 name = name[:255]
41 errors.append('name')
42
43 email_address = x('email_address')
44 if email_address and not utils.is_valid_email_address(email_address):
45 email_address = email_address[:255]
46 errors.append('email_address')
47
48 follow_up = x('follow_up')
49 if follow_up not in ('quarterly', 'yearly', 'never'):
50 follow_up = 'quarterly'
51 errors.append('follow_up')
52
53 # promo fields
54 promotion_name = x('promotion_name')
55 if len(promotion_name) > 32:
56 promotion_name = promotion_name[:32]
57 errors.append('promotion_name')
58
59 promotion_url = x('promotion_url')
60 is_link = lambda x: (x.startswith('http://') or x.startswith('https://')) and '.' in x
61 if len(promotion_url) > 255 or (promotion_url and not is_link(promotion_url)):
62 promotion_url = promotion_url[:255]
63 errors.append('promotion_url')
64
65 promotion_twitter = x('promotion_twitter')
66 if len(promotion_twitter) > 32:
67 promotion_twitter = promotion_twitter[:32]
68 # TODO What are Twitter's rules?
69 errors.append('promotion_twitter')
70
71 promotion_message = x('promotion_message')
72 if len(promotion_message) > 128:
73 promotion_message = promotion_message[:128]
74 errors.append('promotion_message')
75
76 parsed = { 'amount': amount
77 , 'payment_method_nonce': payment_method_nonce
78 , 'grateful_for': grateful_for
79 , 'name': name
80 , 'email_address': email_address
81 , 'follow_up': follow_up
82 , 'promotion_name': promotion_name
83 , 'promotion_url': promotion_url
84 , 'promotion_twitter': promotion_twitter
85 , 'promotion_message': promotion_message
86 }
87 return parsed, errors
88
89
90 def _store(parsed):
91 return PaymentForOpenSource.insert(**parsed)
92
93
94 def _charge(app, pfos, nonce):
95 params = { 'amount': pfos.amount
96 , 'payment_method_nonce': nonce
97 , 'options': {'submit_for_settlement': True}
98 , 'custom_fields': {'pfos_uuid': pfos.uuid}
99 }
100 result = app.pfos_card_charger.charge(params)
101 pfos.process_result(result)
102
103
104 def _send(app, pfos):
105 app.email_queue.put( to=None
106 , template='paid-for-open-source'
107 , email=pfos.email_address
108 , amount=pfos.amount
109 , invoice_url=pfos.invoice_url
110 )
111
112
113 def pay_for_open_source(app, raw):
114 parsed, errors = _parse(raw)
115 out = {'errors': errors, 'invoice_url': None}
116 if not errors:
117 payment_method_nonce = parsed.pop('payment_method_nonce')
118 pfos = _store(parsed)
119 _charge(app, pfos, payment_method_nonce)
120 if pfos.succeeded:
121 out['invoice_url'] = pfos.invoice_url
122 if pfos.email_address:
123 _send(app, pfos)
124 else:
125 out['errors'].append('charging')
126 return out
127
[end of gratipay/homepage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gratipay/homepage.py b/gratipay/homepage.py
--- a/gratipay/homepage.py
+++ b/gratipay/homepage.py
@@ -45,10 +45,10 @@
email_address = email_address[:255]
errors.append('email_address')
- follow_up = x('follow_up')
- if follow_up not in ('quarterly', 'yearly', 'never'):
- follow_up = 'quarterly'
- errors.append('follow_up')
+ on_mailing_list = x('on_mailing_list')
+ if on_mailing_list not in ('yes', 'no'):
+ on_mailing_list = 'yes'
+ errors.append('on_mailing_list')
# promo fields
promotion_name = x('promotion_name')
@@ -78,7 +78,7 @@
, 'grateful_for': grateful_for
, 'name': name
, 'email_address': email_address
- , 'follow_up': follow_up
+ , 'on_mailing_list': on_mailing_list
, 'promotion_name': promotion_name
, 'promotion_url': promotion_url
, 'promotion_twitter': promotion_twitter
diff --git a/gratipay/models/payment_for_open_source.py b/gratipay/models/payment_for_open_source.py
--- a/gratipay/models/payment_for_open_source.py
+++ b/gratipay/models/payment_for_open_source.py
@@ -38,20 +38,21 @@
@classmethod
- def insert(cls, amount, grateful_for, name, follow_up, email_address,
+ def insert(cls, amount, grateful_for, name, on_mailing_list, email_address,
promotion_name, promotion_url, promotion_twitter, promotion_message,
cursor=None):
"""Take baseline info and insert into the database.
"""
uuid = uuid4().hex
+ on_mailing_list = on_mailing_list == 'yes'
return (cursor or cls.db).one("""
INSERT INTO payments_for_open_source
- (uuid, amount, grateful_for, name, follow_up, email_address,
+ (uuid, amount, grateful_for, name, on_mailing_list, email_address,
promotion_name, promotion_url, promotion_twitter, promotion_message)
VALUES (%s, %s, %s, %s, %s, %s,
%s, %s, %s, %s)
RETURNING payments_for_open_source.*::payments_for_open_source
- """, (uuid, amount, grateful_for, name, follow_up, email_address,
+ """, (uuid, amount, grateful_for, name, on_mailing_list, email_address,
promotion_name, promotion_url, promotion_twitter, promotion_message))
| {"golden_diff": "diff --git a/gratipay/homepage.py b/gratipay/homepage.py\n--- a/gratipay/homepage.py\n+++ b/gratipay/homepage.py\n@@ -45,10 +45,10 @@\n email_address = email_address[:255]\n errors.append('email_address')\n \n- follow_up = x('follow_up')\n- if follow_up not in ('quarterly', 'yearly', 'never'):\n- follow_up = 'quarterly'\n- errors.append('follow_up')\n+ on_mailing_list = x('on_mailing_list')\n+ if on_mailing_list not in ('yes', 'no'):\n+ on_mailing_list = 'yes'\n+ errors.append('on_mailing_list')\n \n # promo fields\n promotion_name = x('promotion_name')\n@@ -78,7 +78,7 @@\n , 'grateful_for': grateful_for\n , 'name': name\n , 'email_address': email_address\n- , 'follow_up': follow_up\n+ , 'on_mailing_list': on_mailing_list\n , 'promotion_name': promotion_name\n , 'promotion_url': promotion_url\n , 'promotion_twitter': promotion_twitter\ndiff --git a/gratipay/models/payment_for_open_source.py b/gratipay/models/payment_for_open_source.py\n--- a/gratipay/models/payment_for_open_source.py\n+++ b/gratipay/models/payment_for_open_source.py\n@@ -38,20 +38,21 @@\n \n \n @classmethod\n- def insert(cls, amount, grateful_for, name, follow_up, email_address,\n+ def insert(cls, amount, grateful_for, name, on_mailing_list, email_address,\n promotion_name, promotion_url, promotion_twitter, promotion_message,\n cursor=None):\n \"\"\"Take baseline info and insert into the database.\n \"\"\"\n uuid = uuid4().hex\n+ on_mailing_list = on_mailing_list == 'yes'\n return (cursor or cls.db).one(\"\"\"\n INSERT INTO payments_for_open_source\n- (uuid, amount, grateful_for, name, follow_up, email_address,\n+ (uuid, amount, grateful_for, name, on_mailing_list, email_address,\n promotion_name, promotion_url, promotion_twitter, promotion_message)\n VALUES (%s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s)\n RETURNING payments_for_open_source.*::payments_for_open_source\n- \"\"\", (uuid, amount, grateful_for, name, follow_up, email_address,\n+ \"\"\", (uuid, amount, grateful_for, name, on_mailing_list, email_address,\n promotion_name, promotion_url, promotion_twitter, promotion_message))\n", "issue": "Simplify mailing list opt-in even further\nIn #4651 we dropped back to two mailing list options, quarterly and yearly. However, as I think through the way #BackTheStack is gonna wanna play out, we are going to want to send multiple updates over the course of the campaign. That's not consistent with a \"quarterly\" or \"yearly\" newsletter. I think our best bet is to just have a simple \"Yes/No\" on the mailing list.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport gratipay\nfrom uuid import uuid4\nfrom postgres.orm import Model\n\n\nclass PaymentForOpenSource(Model):\n\n typname = \"payments_for_open_source\"\n\n def __repr__(self):\n return '<PaymentForOpenSource: {}>'.format(repr(self.amount))\n\n\n @property\n def succeeded(self):\n return self.braintree_result_message == ''\n\n\n @property\n def invoice_url(self):\n if not self.succeeded:\n return None\n return '{}/browse/payments/{}/invoice.html'.format(gratipay.base_url, self.uuid)\n\n\n @classmethod\n def from_uuid(cls, uuid, cursor=None):\n \"\"\"Take a uuid and return an object.\n \"\"\"\n return (cursor or cls.db).one(\"\"\"\n SELECT pfos.*::payments_for_open_source\n FROM payments_for_open_source pfos\n WHERE uuid = %s\n \"\"\", (uuid,))\n\n\n @classmethod\n def insert(cls, amount, grateful_for, name, follow_up, email_address,\n promotion_name, promotion_url, promotion_twitter, promotion_message,\n cursor=None):\n \"\"\"Take baseline info and insert into the database.\n \"\"\"\n uuid = uuid4().hex\n return (cursor or cls.db).one(\"\"\"\n INSERT INTO payments_for_open_source\n (uuid, amount, grateful_for, name, follow_up, email_address,\n promotion_name, promotion_url, promotion_twitter, promotion_message)\n VALUES (%s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s)\n RETURNING payments_for_open_source.*::payments_for_open_source\n \"\"\", (uuid, amount, grateful_for, name, follow_up, email_address,\n promotion_name, promotion_url, promotion_twitter, promotion_message))\n\n\n def process_result(self, result):\n \"\"\"Take a Braintree API result and update the database.\n \"\"\"\n result_message = '' if result.is_success else result.message\n transaction_id = None\n if result.transaction:\n transaction_id = result.transaction.id\n\n # Verify that Braintree is sending us the right payload.\n # TODO This is hard to test and it should be a pretty tight guarantee,\n # so I am commenting out for now. :(\n #pfos_uuid = result.transaction.custom_fields['pfos_uuid']\n #assert pfos_uuid == self.uuid, (pfos_uuid, transaction_id)\n\n self.db.run(\"\"\"\n UPDATE payments_for_open_source\n SET braintree_result_message=%s\n , braintree_transaction_id=%s\n WHERE uuid=%s\n \"\"\", (result_message, transaction_id, self.uuid))\n self.set_attributes( braintree_result_message=result_message\n , braintree_transaction_id=transaction_id\n )\n", "path": "gratipay/models/payment_for_open_source.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"This is the Python library behind gratipay.com.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay import utils\nfrom gratipay.models.payment_for_open_source import PaymentForOpenSource\n\n\nkB = 2**10\n\n\ndef _parse(raw):\n \"\"\"Given a POST request.body, return (parsed<dict>, errors<list>).\n \"\"\"\n\n errors = []\n x = lambda f: raw[f].strip() # KeyError -> 400\n\n # payment info\n amount = x('amount')\n if (not amount.isdigit()) or (int(amount) < 10):\n errors.append('amount')\n amount = ''.join(x for x in amount.split('.')[0] if x.isdigit())\n\n payment_method_nonce = x('payment_method_nonce')\n if len(payment_method_nonce) > 36:\n errors.append('payment_method_nonce')\n payment_method_nonce = ''\n\n # ecosystems/packages\n grateful_for = x('grateful_for')\n if len(grateful_for) > 16*kB:\n grateful_for = grateful_for[:16*kB]\n errors.append('grateful_for')\n\n # contact info\n name = x('name')\n if len(name) > 255:\n name = name[:255]\n errors.append('name')\n\n email_address = x('email_address')\n if email_address and not utils.is_valid_email_address(email_address):\n email_address = email_address[:255]\n errors.append('email_address')\n\n follow_up = x('follow_up')\n if follow_up not in ('quarterly', 'yearly', 'never'):\n follow_up = 'quarterly'\n errors.append('follow_up')\n\n # promo fields\n promotion_name = x('promotion_name')\n if len(promotion_name) > 32:\n promotion_name = promotion_name[:32]\n errors.append('promotion_name')\n\n promotion_url = x('promotion_url')\n is_link = lambda x: (x.startswith('http://') or x.startswith('https://')) and '.' in x\n if len(promotion_url) > 255 or (promotion_url and not is_link(promotion_url)):\n promotion_url = promotion_url[:255]\n errors.append('promotion_url')\n\n promotion_twitter = x('promotion_twitter')\n if len(promotion_twitter) > 32:\n promotion_twitter = promotion_twitter[:32]\n # TODO What are Twitter's rules?\n errors.append('promotion_twitter')\n\n promotion_message = x('promotion_message')\n if len(promotion_message) > 128:\n promotion_message = promotion_message[:128]\n errors.append('promotion_message')\n\n parsed = { 'amount': amount\n , 'payment_method_nonce': payment_method_nonce\n , 'grateful_for': grateful_for\n , 'name': name\n , 'email_address': email_address\n , 'follow_up': follow_up\n , 'promotion_name': promotion_name\n , 'promotion_url': promotion_url\n , 'promotion_twitter': promotion_twitter\n , 'promotion_message': promotion_message\n }\n return parsed, errors\n\n\ndef _store(parsed):\n return PaymentForOpenSource.insert(**parsed)\n\n\ndef _charge(app, pfos, nonce):\n params = { 'amount': pfos.amount\n , 'payment_method_nonce': nonce\n , 'options': {'submit_for_settlement': True}\n , 'custom_fields': {'pfos_uuid': pfos.uuid}\n }\n result = app.pfos_card_charger.charge(params)\n pfos.process_result(result)\n\n\ndef _send(app, pfos):\n app.email_queue.put( to=None\n , template='paid-for-open-source'\n , email=pfos.email_address\n , amount=pfos.amount\n , invoice_url=pfos.invoice_url\n )\n\n\ndef pay_for_open_source(app, raw):\n parsed, errors = _parse(raw)\n out = {'errors': errors, 'invoice_url': None}\n if not errors:\n payment_method_nonce = parsed.pop('payment_method_nonce')\n pfos = _store(parsed)\n _charge(app, pfos, payment_method_nonce)\n if pfos.succeeded:\n out['invoice_url'] = pfos.invoice_url\n if pfos.email_address:\n _send(app, pfos)\n else:\n out['errors'].append('charging')\n return out\n", "path": "gratipay/homepage.py"}]} | 2,675 | 598 |
gh_patches_debug_13390 | rasdani/github-patches | git_diff | conan-io__conan-13531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Xcode 14.2 with macos sdk 13.1 not working
### Environment details
* Operating System+version: MacOS 13.2
* Compiler+version: Xcode 14.2
* Conan version: 1.59.0
* Python version: 3.8
### Steps to reproduce
Setting macos sdk to 13.0 and building cmake on a default install of xcode 14.2 fails
### Logs
_No response_
</issue>
<code>
[start of conans/client/conf/__init__.py]
1 default_settings_yml = """\
2 # This file was generated by Conan. Remove this comment if you edit this file or Conan
3 # will destroy your changes.
4 os:
5 Windows:
6 subsystem: [null, cygwin, msys, msys2, wsl]
7 WindowsStore:
8 version: ["8.1", "10.0"]
9 WindowsCE:
10 platform: ANY
11 version: ["5.0", "6.0", "7.0", "8.0"]
12 Linux:
13 iOS:
14 version: &ios_version
15 ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
16 "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
17 "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
18 "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8",
19 "15.0", "15.1", "15.2", "15.3", "15.4", "15.5", "15.6", "16.0", "16.1"]
20 sdk: ["iphoneos", "iphonesimulator"]
21 sdk_version: [null, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
22 "13.0", "13.1", "13.2", "13.4", "13.5", "13.6", "13.7",
23 "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2", "15.4", "15.5", "16.0", "16.1"]
24 watchOS:
25 version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
26 "7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1", "8.3", "8.4", "8.5", "8.6", "8.7", "9.0", "9.1"]
27 sdk: ["watchos", "watchsimulator"]
28 sdk_version: [null, "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
29 "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3", "8.5", "9.0", "9.1"]
30 tvOS:
31 version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
32 "13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
33 "15.0", "15.1", "15.2", "15.3", "15.4", "15.5", "15.6", "16.0", "16.1"]
34 sdk: ["appletvos", "appletvsimulator"]
35 sdk_version: [null, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
36 "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2", "15.4", "16.0", "16.1"]
37 Macos:
38 version: [null, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
39 sdk_version: [null, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1", "12.3", "13.0"]
40 subsystem:
41 null:
42 catalyst:
43 ios_version: *ios_version
44 Android:
45 api_level: [ANY]
46 FreeBSD:
47 SunOS:
48 AIX:
49 Arduino:
50 board: [ANY]
51 Emscripten:
52 Neutrino:
53 version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
54 baremetal:
55 VxWorks:
56 version: ["7"]
57 arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64,
58 armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3,
59 sparc, sparcv9,
60 mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le,
61 e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7,
62 xtensalx6, xtensalx106, xtensalx7]
63 compiler:
64 sun-cc:
65 version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
66 threads: [null, posix]
67 libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
68 gcc:
69 version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
70 "5", "5.1", "5.2", "5.3", "5.4", "5.5",
71 "6", "6.1", "6.2", "6.3", "6.4", "6.5",
72 "7", "7.1", "7.2", "7.3", "7.4", "7.5",
73 "8", "8.1", "8.2", "8.3", "8.4", "8.5",
74 "9", "9.1", "9.2", "9.3", "9.4", "9.5",
75 "10", "10.1", "10.2", "10.3", "10.4",
76 "11", "11.1", "11.2", "11.3",
77 "12", "12.1", "12.2"]
78 libcxx: [libstdc++, libstdc++11]
79 threads: [null, posix, win32] # Windows MinGW
80 exception: [null, dwarf2, sjlj, seh] # Windows MinGW
81 cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
82 msvc:
83 version: [170, 180, 190, 191, 192, 193]
84 update: [null, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
85 runtime: [static, dynamic]
86 runtime_type: [Debug, Release]
87 cppstd: [null, 14, 17, 20, 23]
88 toolset: [null, v110_xp, v120_xp, v140_xp, v141_xp]
89 clang:
90 version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
91 "5.0", "6.0", "7.0", "7.1",
92 "8", "9", "10", "11", "12", "13", "14", "15", "16"]
93 libcxx: [null, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
94 cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
95 runtime: [null, static, dynamic]
96 runtime_type: [null, Debug, Release]
97 runtime_version: [null, v140, v141, v142, v143]
98 apple-clang:
99 version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13", "13.0", "13.1", "14", "14.0"]
100 libcxx: [libstdc++, libc++]
101 cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
102 intel-cc:
103 version: ["2021.1", "2021.2", "2021.3"]
104 update: [null, ANY]
105 mode: ["icx", "classic", "dpcpp"]
106 libcxx: [null, libstdc++, libstdc++11, libc++]
107 cppstd: [null, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
108 runtime: [null, static, dynamic]
109 runtime_type: [null, Debug, Release]
110 qcc:
111 version: ["4.4", "5.4", "8.3"]
112 libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
113 cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
114 mcst-lcc:
115 version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
116 libcxx: [libstdc++, libstdc++11]
117 cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
118
119 build_type: [null, Debug, Release, RelWithDebInfo, MinSizeRel]
120 """
121
122
123 def get_default_settings_yml():
124 return default_settings_yml
125
126
127 def migrate_settings_file(cache):
128 from conans.client.migrations import update_file
129
130 settings_path = cache.settings_path
131 update_file(settings_path, get_default_settings_yml())
132
[end of conans/client/conf/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/conf/__init__.py b/conans/client/conf/__init__.py
--- a/conans/client/conf/__init__.py
+++ b/conans/client/conf/__init__.py
@@ -36,7 +36,7 @@
"13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2", "15.4", "16.0", "16.1"]
Macos:
version: [null, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
- sdk_version: [null, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1", "12.3", "13.0"]
+ sdk_version: [null, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1", "12.3", "13.0", "13.1"]
subsystem:
null:
catalyst:
| {"golden_diff": "diff --git a/conans/client/conf/__init__.py b/conans/client/conf/__init__.py\n--- a/conans/client/conf/__init__.py\n+++ b/conans/client/conf/__init__.py\n@@ -36,7 +36,7 @@\n \"13.0\", \"13.1\", \"13.2\", \"13.4\", \"14.0\", \"14.2\", \"14.3\", \"14.5\", \"15.0\", \"15.2\", \"15.4\", \"16.0\", \"16.1\"]\n Macos:\n version: [null, \"10.6\", \"10.7\", \"10.8\", \"10.9\", \"10.10\", \"10.11\", \"10.12\", \"10.13\", \"10.14\", \"10.15\", \"11.0\", \"12.0\", \"13.0\"]\n- sdk_version: [null, \"10.13\", \"10.14\", \"10.15\", \"11.0\", \"11.1\", \"11.3\", \"12.0\", \"12.1\", \"12.3\", \"13.0\"]\n+ sdk_version: [null, \"10.13\", \"10.14\", \"10.15\", \"11.0\", \"11.1\", \"11.3\", \"12.0\", \"12.1\", \"12.3\", \"13.0\", \"13.1\"]\n subsystem:\n null:\n catalyst:\n", "issue": "[bug] Xcode 14.2 with macos sdk 13.1 not working\n### Environment details\n\n* Operating System+version: MacOS 13.2\r\n* Compiler+version: Xcode 14.2\r\n* Conan version: 1.59.0\r\n* Python version: 3.8\r\n\n\n### Steps to reproduce\n\nSetting macos sdk to 13.0 and building cmake on a default install of xcode 14.2 fails\n\n### Logs\n\n_No response_\n", "before_files": [{"content": "default_settings_yml = \"\"\"\\\n# This file was generated by Conan. Remove this comment if you edit this file or Conan\n# will destroy your changes.\nos:\n Windows:\n subsystem: [null, cygwin, msys, msys2, wsl]\n WindowsStore:\n version: [\"8.1\", \"10.0\"]\n WindowsCE:\n platform: ANY\n version: [\"5.0\", \"6.0\", \"7.0\", \"8.0\"]\n Linux:\n iOS:\n version: &ios_version\n [\"7.0\", \"7.1\", \"8.0\", \"8.1\", \"8.2\", \"8.3\", \"9.0\", \"9.1\", \"9.2\", \"9.3\", \"10.0\", \"10.1\", \"10.2\", \"10.3\",\n \"11.0\", \"11.1\", \"11.2\", \"11.3\", \"11.4\", \"12.0\", \"12.1\", \"12.2\", \"12.3\", \"12.4\",\n \"13.0\", \"13.1\", \"13.2\", \"13.3\", \"13.4\", \"13.5\", \"13.6\", \"13.7\",\n \"14.0\", \"14.1\", \"14.2\", \"14.3\", \"14.4\", \"14.5\", \"14.6\", \"14.7\", \"14.8\",\n \"15.0\", \"15.1\", \"15.2\", \"15.3\", \"15.4\", \"15.5\", \"15.6\", \"16.0\", \"16.1\"]\n sdk: [\"iphoneos\", \"iphonesimulator\"]\n sdk_version: [null, \"11.3\", \"11.4\", \"12.0\", \"12.1\", \"12.2\", \"12.4\",\n \"13.0\", \"13.1\", \"13.2\", \"13.4\", \"13.5\", \"13.6\", \"13.7\",\n \"14.0\", \"14.1\", \"14.2\", \"14.3\", \"14.4\", \"14.5\", \"15.0\", \"15.2\", \"15.4\", \"15.5\", \"16.0\", \"16.1\"]\n watchOS:\n version: [\"4.0\", \"4.1\", \"4.2\", \"4.3\", \"5.0\", \"5.1\", \"5.2\", \"5.3\", \"6.0\", \"6.1\", \"6.2\",\n \"7.0\", \"7.1\", \"7.2\", \"7.3\", \"7.4\", \"7.5\", \"7.6\", \"8.0\", \"8.1\", \"8.3\", \"8.4\", \"8.5\", \"8.6\", \"8.7\", \"9.0\", \"9.1\"]\n sdk: [\"watchos\", \"watchsimulator\"]\n sdk_version: [null, \"4.3\", \"5.0\", \"5.1\", \"5.2\", \"5.3\", \"6.0\", \"6.1\", \"6.2\",\n \"7.0\", \"7.1\", \"7.2\", \"7.4\", \"8.0\", \"8.0.1\", \"8.3\", \"8.5\", \"9.0\", \"9.1\"]\n tvOS:\n version: [\"11.0\", \"11.1\", \"11.2\", \"11.3\", \"11.4\", \"12.0\", \"12.1\", \"12.2\", \"12.3\", \"12.4\",\n \"13.0\", \"13.2\", \"13.3\", \"13.4\", \"14.0\", \"14.2\", \"14.3\", \"14.4\", \"14.5\", \"14.6\", \"14.7\",\n \"15.0\", \"15.1\", \"15.2\", \"15.3\", \"15.4\", \"15.5\", \"15.6\", \"16.0\", \"16.1\"]\n sdk: [\"appletvos\", \"appletvsimulator\"]\n sdk_version: [null, \"11.3\", \"11.4\", \"12.0\", \"12.1\", \"12.2\", \"12.4\",\n \"13.0\", \"13.1\", \"13.2\", \"13.4\", \"14.0\", \"14.2\", \"14.3\", \"14.5\", \"15.0\", \"15.2\", \"15.4\", \"16.0\", \"16.1\"]\n Macos:\n version: [null, \"10.6\", \"10.7\", \"10.8\", \"10.9\", \"10.10\", \"10.11\", \"10.12\", \"10.13\", \"10.14\", \"10.15\", \"11.0\", \"12.0\", \"13.0\"]\n sdk_version: [null, \"10.13\", \"10.14\", \"10.15\", \"11.0\", \"11.1\", \"11.3\", \"12.0\", \"12.1\", \"12.3\", \"13.0\"]\n subsystem:\n null:\n catalyst:\n ios_version: *ios_version\n Android:\n api_level: [ANY]\n FreeBSD:\n SunOS:\n AIX:\n Arduino:\n board: [ANY]\n Emscripten:\n Neutrino:\n version: [\"6.4\", \"6.5\", \"6.6\", \"7.0\", \"7.1\"]\n baremetal:\n VxWorks:\n version: [\"7\"]\narch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64,\n armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3,\n sparc, sparcv9,\n mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le,\n e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7,\n xtensalx6, xtensalx106, xtensalx7]\ncompiler:\n sun-cc:\n version: [\"5.10\", \"5.11\", \"5.12\", \"5.13\", \"5.14\", \"5.15\"]\n threads: [null, posix]\n libcxx: [libCstd, libstdcxx, libstlport, libstdc++]\n gcc:\n version: [\"4.1\", \"4.4\", \"4.5\", \"4.6\", \"4.7\", \"4.8\", \"4.9\",\n \"5\", \"5.1\", \"5.2\", \"5.3\", \"5.4\", \"5.5\",\n \"6\", \"6.1\", \"6.2\", \"6.3\", \"6.4\", \"6.5\",\n \"7\", \"7.1\", \"7.2\", \"7.3\", \"7.4\", \"7.5\",\n \"8\", \"8.1\", \"8.2\", \"8.3\", \"8.4\", \"8.5\",\n \"9\", \"9.1\", \"9.2\", \"9.3\", \"9.4\", \"9.5\",\n \"10\", \"10.1\", \"10.2\", \"10.3\", \"10.4\",\n \"11\", \"11.1\", \"11.2\", \"11.3\",\n \"12\", \"12.1\", \"12.2\"]\n libcxx: [libstdc++, libstdc++11]\n threads: [null, posix, win32] # Windows MinGW\n exception: [null, dwarf2, sjlj, seh] # Windows MinGW\n cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]\n msvc:\n version: [170, 180, 190, 191, 192, 193]\n update: [null, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n runtime: [static, dynamic]\n runtime_type: [Debug, Release]\n cppstd: [null, 14, 17, 20, 23]\n toolset: [null, v110_xp, v120_xp, v140_xp, v141_xp]\n clang:\n version: [\"3.3\", \"3.4\", \"3.5\", \"3.6\", \"3.7\", \"3.8\", \"3.9\", \"4.0\",\n \"5.0\", \"6.0\", \"7.0\", \"7.1\",\n \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\"]\n libcxx: [null, libstdc++, libstdc++11, libc++, c++_shared, c++_static]\n cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]\n runtime: [null, static, dynamic]\n runtime_type: [null, Debug, Release]\n runtime_version: [null, v140, v141, v142, v143]\n apple-clang:\n version: [\"5.0\", \"5.1\", \"6.0\", \"6.1\", \"7.0\", \"7.3\", \"8.0\", \"8.1\", \"9.0\", \"9.1\", \"10.0\", \"11.0\", \"12.0\", \"13\", \"13.0\", \"13.1\", \"14\", \"14.0\"]\n libcxx: [libstdc++, libc++]\n cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]\n intel-cc:\n version: [\"2021.1\", \"2021.2\", \"2021.3\"]\n update: [null, ANY]\n mode: [\"icx\", \"classic\", \"dpcpp\"]\n libcxx: [null, libstdc++, libstdc++11, libc++]\n cppstd: [null, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]\n runtime: [null, static, dynamic]\n runtime_type: [null, Debug, Release]\n qcc:\n version: [\"4.4\", \"5.4\", \"8.3\"]\n libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]\n cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]\n mcst-lcc:\n version: [\"1.19\", \"1.20\", \"1.21\", \"1.22\", \"1.23\", \"1.24\", \"1.25\"]\n libcxx: [libstdc++, libstdc++11]\n cppstd: [null, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]\n\nbuild_type: [null, Debug, Release, RelWithDebInfo, MinSizeRel]\n\"\"\"\n\n\ndef get_default_settings_yml():\n return default_settings_yml\n\n\ndef migrate_settings_file(cache):\n from conans.client.migrations import update_file\n\n settings_path = cache.settings_path\n update_file(settings_path, get_default_settings_yml())\n", "path": "conans/client/conf/__init__.py"}]} | 4,045 | 386 |
gh_patches_debug_26 | rasdani/github-patches | git_diff | nautobot__nautobot-3317 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove legacy `manage.py`
<!--
NOTE: This template is for use by maintainers only. Please do not submit
an issue using this template unless you have been specifically asked to
do so.
-->
### Proposed Changes
Simply remove `manage.py` from the project root.
<!-- Provide justification for the proposed change(s). -->
### Justification
This was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed.
</issue>
<code>
[start of manage.py]
1 #!/usr/bin/env python3
2
3 import sys
4
5 from nautobot.core.cli import main
6
7
8 if __name__ == "__main__":
9 main()
10
[end of manage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manage.py b/manage.py
deleted file mode 100755
--- a/manage.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-import sys
-
-from nautobot.core.cli import main
-
-
-if __name__ == "__main__":
- main()
| {"golden_diff": "diff --git a/manage.py b/manage.py\ndeleted file mode 100755\n--- a/manage.py\n+++ /dev/null\n@@ -1,9 +0,0 @@\n-#!/usr/bin/env python3\n-\n-import sys\n-\n-from nautobot.core.cli import main\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Remove legacy `manage.py` \n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\n\r\nSimply remove `manage.py` from the project root.\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\n\r\nThis was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport sys\n\nfrom nautobot.core.cli import main\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manage.py"}]} | 674 | 77 |
gh_patches_debug_35073 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flask serializer that can use python datetimes/date/timestamp types
We need to introduce a json serializer for flask that can handle python datetime/date/timestamp type.
Note: Figure out commonly used timestamp/datetime types other than the "standard" ones from the `datetime` library that might be yielded from our database clients and make it into the dataframe.
See example in db.py for how to do this, but maybe there's a easier flask specific way of doing this.
Format should be a string that's `yyyy-MM-dd'T'HH:mm:ss.SSS`, so from years to miliseconds with no timezeone/offset.
</issue>
<code>
[start of mindsdb/api/http/initialize.py]
1 from distutils.version import LooseVersion
2 import requests
3 import os
4 import shutil
5 import threading
6 import webbrowser
7 from zipfile import ZipFile
8 from pathlib import Path
9 import traceback
10 #import concurrent.futures
11
12 from flask import Flask, url_for
13 from flask_restx import Api
14
15 from mindsdb.__about__ import __version__ as mindsdb_version
16 from mindsdb.interfaces.datastore.datastore import DataStore
17 from mindsdb.interfaces.model.model_interface import ModelInterface as NativeInterface
18 from mindsdb.interfaces.custom.custom_models import CustomModels
19 from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
20 from mindsdb.interfaces.database.database import DatabaseWrapper
21 from mindsdb.utilities.telemetry import inject_telemetry_to_static
22 from mindsdb.utilities.config import Config
23 from mindsdb.utilities.log import get_log
24 from mindsdb.interfaces.storage.db import session
25
26
27 class Swagger_Api(Api):
28 """
29 This is a modification of the base Flask Restplus Api class due to the issue described here
30 https://github.com/noirbizarre/flask-restplus/issues/223
31 """
32 @property
33 def specs_url(self):
34 return url_for(self.endpoint("specs"), _external=False)
35
36
37 def initialize_static(config):
38 ''' Update Scout files basing on compatible-config.json content.
39 Files will be downloaded and updated if new version of GUI > current.
40 Current GUI version stored in static/version.txt.
41 '''
42 log = get_log('http')
43 static_path = Path(config.paths['static'])
44 static_path.mkdir(parents=True, exist_ok=True)
45
46 try:
47 res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json')
48 except (ConnectionError, requests.exceptions.ConnectionError) as e:
49 print(f'Is no connection. {e}')
50 return False
51 except Exception as e:
52 print(f'Is something wrong with getting compatible-config.json: {e}')
53 return False
54
55 if res.status_code != 200:
56 print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
57 return False
58
59 try:
60 versions = res.json()
61 except Exception as e:
62 print(f'Cant decode compatible-config.json: {e}')
63 return False
64
65 current_mindsdb_lv = LooseVersion(mindsdb_version)
66
67 try:
68 gui_versions = {}
69 max_mindsdb_lv = None
70 max_gui_lv = None
71 for el in versions['mindsdb']:
72 if el['mindsdb_version'] is None:
73 gui_lv = LooseVersion(el['gui_version'])
74 else:
75 mindsdb_lv = LooseVersion(el['mindsdb_version'])
76 gui_lv = LooseVersion(el['gui_version'])
77 if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
78 gui_versions[mindsdb_lv.vstring] = gui_lv
79 if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
80 max_mindsdb_lv = mindsdb_lv
81 if max_gui_lv is None or max_gui_lv < gui_lv:
82 max_gui_lv = gui_lv
83
84 all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
85 all_mindsdb_lv.sort()
86
87 if current_mindsdb_lv.vstring in gui_versions:
88 gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
89 elif current_mindsdb_lv > all_mindsdb_lv[-1]:
90 gui_version_lv = max_gui_lv
91 else:
92 lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
93 if len(lower_versions) == 0:
94 gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
95 else:
96 all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
97 gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
98 except Exception as e:
99 log.error(f'Error in compatible-config.json structure: {e}')
100 return False
101
102 current_gui_version = None
103
104 version_txt_path = static_path.joinpath('version.txt')
105 if version_txt_path.is_file():
106 with open(version_txt_path, 'rt') as f:
107 current_gui_version = f.readline()
108 if current_gui_version is not None:
109 current_gui_lv = LooseVersion(current_gui_version)
110 if current_gui_lv >= gui_version_lv:
111 return True
112
113 log.info(f'New version of GUI available ({gui_version_lv.vstring}). Downloading...')
114
115 shutil.rmtree(static_path)
116 static_path.mkdir(parents=True, exist_ok=True)
117
118 try:
119 css_zip_path = str(static_path.joinpath('css.zip'))
120 js_zip_path = str(static_path.joinpath('js.zip'))
121 media_zip_path = str(static_path.joinpath('media.zip'))
122 bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
123
124 gui_version = gui_version_lv.vstring
125
126 resources = [
127 {
128 'url': bucket + 'css-V' + gui_version + '.zip',
129 'path': css_zip_path
130 }, {
131 'url': bucket + 'js-V' + gui_version + '.zip',
132 'path': js_zip_path
133 }, {
134 'url': bucket + 'indexV' + gui_version + '.html',
135 'path': str(static_path.joinpath('index.html'))
136 }, {
137 'url': bucket + 'favicon.ico',
138 'path': str(static_path.joinpath('favicon.ico'))
139 }, {
140 'url': bucket + 'media.zip',
141 'path': media_zip_path
142 }
143 ]
144
145 def get_resources(resource):
146 try:
147 response = requests.get(resource['url'])
148 if response.status_code != requests.status_codes.codes.ok:
149 return Exception(f"Error {response.status_code} GET {resource['url']}")
150 open(resource['path'], 'wb').write(response.content)
151 except Exception as e:
152 return e
153 return None
154
155 for r in resources:
156 get_resources(r)
157
158 '''
159 # to make downloading faster download each resource in a separate thread
160 with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
161 future_to_url = {executor.submit(get_resources, r): r for r in resources}
162 for future in concurrent.futures.as_completed(future_to_url):
163 res = future.result()
164 if res is not None:
165 raise res
166 '''
167
168 except Exception as e:
169 log.error(f'Error during downloading files from s3: {e}')
170 session.close()
171 return False
172
173 static_folder = static_path.joinpath('static')
174 static_folder.mkdir(parents=True, exist_ok=True)
175
176 # unzip process
177 for zip_path, dir_name in [[js_zip_path, 'js'], [css_zip_path, 'css']]:
178 temp_dir = static_path.joinpath(f'temp_{dir_name}')
179 temp_dir.mkdir(mode=0o777, exist_ok=True, parents=True)
180 ZipFile(zip_path).extractall(temp_dir)
181 files_path = static_path.joinpath('static', dir_name)
182 if temp_dir.joinpath('build', 'static', dir_name).is_dir():
183 shutil.move(temp_dir.joinpath('build', 'static', dir_name), files_path)
184 shutil.rmtree(temp_dir)
185 else:
186 shutil.move(temp_dir, files_path)
187
188 ZipFile(media_zip_path).extractall(static_folder)
189
190 os.remove(js_zip_path)
191 os.remove(css_zip_path)
192 os.remove(media_zip_path)
193
194 with open(version_txt_path, 'wt') as f:
195 f.write(gui_version_lv.vstring)
196
197 log.info(f'GUI version updated to {gui_version_lv.vstring}')
198 session.close()
199 return True
200
201
202 def initialize_flask(config, init_static_thread, no_studio):
203 # Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
204 if no_studio:
205 app = Flask(
206 __name__
207 )
208 else:
209 app = Flask(
210 __name__,
211 static_url_path='/static',
212 static_folder=os.path.join(config.paths['static'], 'static/')
213 )
214
215 app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
216 app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
217 authorizations = {
218 'apikey': {
219 'type': 'apiKey',
220 'in': 'query',
221 'name': 'apikey'
222 }
223 }
224
225 api = Swagger_Api(
226 app,
227 authorizations=authorizations,
228 security=['apikey'],
229 url_prefix=':8000',
230 prefix='/api',
231 doc='/doc/'
232 )
233
234 port = config['api']['http']['port']
235 host = config['api']['http']['host']
236
237 # NOTE rewrite it, that hotfix to see GUI link
238 if not no_studio:
239 log = get_log('http')
240 if host in ('', '0.0.0.0'):
241 url = f'http://127.0.0.1:{port}/'
242 else:
243 url = f'http://{host}:{port}/'
244 log.info(f' - GUI available at {url}')
245
246 pid = os.getpid()
247 x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config.paths['static']), daemon=True)
248 x.start()
249
250 return app, api
251
252
253 def initialize_interfaces(app):
254 app.default_store = DataStore()
255 app.naitve_interface = NativeInterface()
256 app.custom_models = CustomModels()
257 app.dbw = DatabaseWrapper()
258 config = Config()
259 app.config_obj = config
260
261
262 def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
263 """Open webbrowser with url when http service is started.
264
265 If some error then do nothing.
266 """
267 init_static_thread.join()
268 inject_telemetry_to_static(static_folder)
269 logger = get_log('http')
270 try:
271 is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
272 pid=pid, port=port)
273 if is_http_active:
274 webbrowser.open(url)
275 except Exception as e:
276 logger.error(f'Failed to open {url} in webbrowser with exception {e}')
277 logger.error(traceback.format_exc())
278 session.close()
279
[end of mindsdb/api/http/initialize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/api/http/initialize.py b/mindsdb/api/http/initialize.py
--- a/mindsdb/api/http/initialize.py
+++ b/mindsdb/api/http/initialize.py
@@ -7,10 +7,13 @@
from zipfile import ZipFile
from pathlib import Path
import traceback
+from datetime import datetime, date, timedelta
#import concurrent.futures
-from flask import Flask, url_for
+from flask import Flask, url_for, make_response
+from flask.json import dumps
from flask_restx import Api
+from flask.json import JSONEncoder
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
@@ -34,6 +37,24 @@
return url_for(self.endpoint("specs"), _external=False)
+class CustomJSONEncoder(JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, date):
+ return obj.strftime("%Y-%m-%d")
+ if isinstance(obj, datetime):
+ return obj.strftime("%Y-%m-%dT%H:%M:%S.%f")
+ if isinstance(obj, timedelta):
+ return str(obj)
+
+ return JSONEncoder.default(self, obj)
+
+
+def custom_output_json(data, code, headers=None):
+ resp = make_response(dumps(data), code)
+ resp.headers.extend(headers or {})
+ return resp
+
+
def initialize_static(config):
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
@@ -214,11 +235,13 @@
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
+ app.json_encoder = CustomJSONEncoder
+
authorizations = {
'apikey': {
- 'type': 'apiKey',
+ 'type': 'session',
'in': 'query',
- 'name': 'apikey'
+ 'name': 'session'
}
}
@@ -231,6 +254,8 @@
doc='/doc/'
)
+ api.representations['application/json'] = custom_output_json
+
port = config['api']['http']['port']
host = config['api']['http']['host']
| {"golden_diff": "diff --git a/mindsdb/api/http/initialize.py b/mindsdb/api/http/initialize.py\n--- a/mindsdb/api/http/initialize.py\n+++ b/mindsdb/api/http/initialize.py\n@@ -7,10 +7,13 @@\n from zipfile import ZipFile\n from pathlib import Path\n import traceback\n+from datetime import datetime, date, timedelta\n #import concurrent.futures\n \n-from flask import Flask, url_for\n+from flask import Flask, url_for, make_response\n+from flask.json import dumps\n from flask_restx import Api\n+from flask.json import JSONEncoder\n \n from mindsdb.__about__ import __version__ as mindsdb_version\n from mindsdb.interfaces.datastore.datastore import DataStore\n@@ -34,6 +37,24 @@\n return url_for(self.endpoint(\"specs\"), _external=False)\n \n \n+class CustomJSONEncoder(JSONEncoder):\n+ def default(self, obj):\n+ if isinstance(obj, date):\n+ return obj.strftime(\"%Y-%m-%d\")\n+ if isinstance(obj, datetime):\n+ return obj.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n+ if isinstance(obj, timedelta):\n+ return str(obj)\n+\n+ return JSONEncoder.default(self, obj)\n+\n+\n+def custom_output_json(data, code, headers=None):\n+ resp = make_response(dumps(data), code)\n+ resp.headers.extend(headers or {})\n+ return resp\n+\n+\n def initialize_static(config):\n ''' Update Scout files basing on compatible-config.json content.\n Files will be downloaded and updated if new version of GUI > current.\n@@ -214,11 +235,13 @@\n \n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60\n app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'\n+ app.json_encoder = CustomJSONEncoder\n+\n authorizations = {\n 'apikey': {\n- 'type': 'apiKey',\n+ 'type': 'session',\n 'in': 'query',\n- 'name': 'apikey'\n+ 'name': 'session'\n }\n }\n \n@@ -231,6 +254,8 @@\n doc='/doc/'\n )\n \n+ api.representations['application/json'] = custom_output_json\n+\n port = config['api']['http']['port']\n host = config['api']['http']['host']\n", "issue": "Flask serializer that can use python datetimes/date/timestamp types\nWe need to introduce a json serializer for flask that can handle python datetime/date/timestamp type.\r\n\r\nNote: Figure out commonly used timestamp/datetime types other than the \"standard\" ones from the `datetime` library that might be yielded from our database clients and make it into the dataframe. \r\n\r\nSee example in db.py for how to do this, but maybe there's a easier flask specific way of doing this.\r\n\r\nFormat should be a string that's `yyyy-MM-dd'T'HH:mm:ss.SSS`, so from years to miliseconds with no timezeone/offset.\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport requests\nimport os\nimport shutil\nimport threading\nimport webbrowser\nfrom zipfile import ZipFile\nfrom pathlib import Path\nimport traceback\n#import concurrent.futures\n\nfrom flask import Flask, url_for\nfrom flask_restx import Api\n\nfrom mindsdb.__about__ import __version__ as mindsdb_version\nfrom mindsdb.interfaces.datastore.datastore import DataStore\nfrom mindsdb.interfaces.model.model_interface import ModelInterface as NativeInterface\nfrom mindsdb.interfaces.custom.custom_models import CustomModels\nfrom mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\nfrom mindsdb.utilities.telemetry import inject_telemetry_to_static\nfrom mindsdb.utilities.config import Config\nfrom mindsdb.utilities.log import get_log\nfrom mindsdb.interfaces.storage.db import session\n\n\nclass Swagger_Api(Api):\n \"\"\"\n This is a modification of the base Flask Restplus Api class due to the issue described here\n https://github.com/noirbizarre/flask-restplus/issues/223\n \"\"\"\n @property\n def specs_url(self):\n return url_for(self.endpoint(\"specs\"), _external=False)\n\n\ndef initialize_static(config):\n ''' Update Scout files basing on compatible-config.json content.\n Files will be downloaded and updated if new version of GUI > current.\n Current GUI version stored in static/version.txt.\n '''\n log = get_log('http')\n static_path = Path(config.paths['static'])\n static_path.mkdir(parents=True, exist_ok=True)\n\n try:\n res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json')\n except (ConnectionError, requests.exceptions.ConnectionError) as e:\n print(f'Is no connection. {e}')\n return False\n except Exception as e:\n print(f'Is something wrong with getting compatible-config.json: {e}')\n return False\n\n if res.status_code != 200:\n print(f'Cant get compatible-config.json: returned status code = {res.status_code}')\n return False\n\n try:\n versions = res.json()\n except Exception as e:\n print(f'Cant decode compatible-config.json: {e}')\n return False\n\n current_mindsdb_lv = LooseVersion(mindsdb_version)\n\n try:\n gui_versions = {}\n max_mindsdb_lv = None\n max_gui_lv = None\n for el in versions['mindsdb']:\n if el['mindsdb_version'] is None:\n gui_lv = LooseVersion(el['gui_version'])\n else:\n mindsdb_lv = LooseVersion(el['mindsdb_version'])\n gui_lv = LooseVersion(el['gui_version'])\n if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:\n gui_versions[mindsdb_lv.vstring] = gui_lv\n if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:\n max_mindsdb_lv = mindsdb_lv\n if max_gui_lv is None or max_gui_lv < gui_lv:\n max_gui_lv = gui_lv\n\n all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]\n all_mindsdb_lv.sort()\n\n if current_mindsdb_lv.vstring in gui_versions:\n gui_version_lv = gui_versions[current_mindsdb_lv.vstring]\n elif current_mindsdb_lv > all_mindsdb_lv[-1]:\n gui_version_lv = max_gui_lv\n else:\n lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}\n if len(lower_versions) == 0:\n gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]\n else:\n all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]\n gui_version_lv = gui_versions[all_lower_versions[-1].vstring]\n except Exception as e:\n log.error(f'Error in compatible-config.json structure: {e}')\n return False\n\n current_gui_version = None\n\n version_txt_path = static_path.joinpath('version.txt')\n if version_txt_path.is_file():\n with open(version_txt_path, 'rt') as f:\n current_gui_version = f.readline()\n if current_gui_version is not None:\n current_gui_lv = LooseVersion(current_gui_version)\n if current_gui_lv >= gui_version_lv:\n return True\n\n log.info(f'New version of GUI available ({gui_version_lv.vstring}). Downloading...')\n\n shutil.rmtree(static_path)\n static_path.mkdir(parents=True, exist_ok=True)\n\n try:\n css_zip_path = str(static_path.joinpath('css.zip'))\n js_zip_path = str(static_path.joinpath('js.zip'))\n media_zip_path = str(static_path.joinpath('media.zip'))\n bucket = \"https://mindsdb-web-builds.s3.amazonaws.com/\"\n\n gui_version = gui_version_lv.vstring\n\n resources = [\n {\n 'url': bucket + 'css-V' + gui_version + '.zip',\n 'path': css_zip_path\n }, {\n 'url': bucket + 'js-V' + gui_version + '.zip',\n 'path': js_zip_path\n }, {\n 'url': bucket + 'indexV' + gui_version + '.html',\n 'path': str(static_path.joinpath('index.html'))\n }, {\n 'url': bucket + 'favicon.ico',\n 'path': str(static_path.joinpath('favicon.ico'))\n }, {\n 'url': bucket + 'media.zip',\n 'path': media_zip_path\n }\n ]\n\n def get_resources(resource):\n try:\n response = requests.get(resource['url'])\n if response.status_code != requests.status_codes.codes.ok:\n return Exception(f\"Error {response.status_code} GET {resource['url']}\")\n open(resource['path'], 'wb').write(response.content)\n except Exception as e:\n return e\n return None\n\n for r in resources:\n get_resources(r)\n\n '''\n # to make downloading faster download each resource in a separate thread\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n future_to_url = {executor.submit(get_resources, r): r for r in resources}\n for future in concurrent.futures.as_completed(future_to_url):\n res = future.result()\n if res is not None:\n raise res\n '''\n\n except Exception as e:\n log.error(f'Error during downloading files from s3: {e}')\n session.close()\n return False\n\n static_folder = static_path.joinpath('static')\n static_folder.mkdir(parents=True, exist_ok=True)\n\n # unzip process\n for zip_path, dir_name in [[js_zip_path, 'js'], [css_zip_path, 'css']]:\n temp_dir = static_path.joinpath(f'temp_{dir_name}')\n temp_dir.mkdir(mode=0o777, exist_ok=True, parents=True)\n ZipFile(zip_path).extractall(temp_dir)\n files_path = static_path.joinpath('static', dir_name)\n if temp_dir.joinpath('build', 'static', dir_name).is_dir():\n shutil.move(temp_dir.joinpath('build', 'static', dir_name), files_path)\n shutil.rmtree(temp_dir)\n else:\n shutil.move(temp_dir, files_path)\n\n ZipFile(media_zip_path).extractall(static_folder)\n\n os.remove(js_zip_path)\n os.remove(css_zip_path)\n os.remove(media_zip_path)\n\n with open(version_txt_path, 'wt') as f:\n f.write(gui_version_lv.vstring)\n\n log.info(f'GUI version updated to {gui_version_lv.vstring}')\n session.close()\n return True\n\n\ndef initialize_flask(config, init_static_thread, no_studio):\n # Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)\n if no_studio:\n app = Flask(\n __name__\n )\n else:\n app = Flask(\n __name__,\n static_url_path='/static',\n static_folder=os.path.join(config.paths['static'], 'static/')\n )\n\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60\n app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'\n authorizations = {\n 'apikey': {\n 'type': 'apiKey',\n 'in': 'query',\n 'name': 'apikey'\n }\n }\n\n api = Swagger_Api(\n app,\n authorizations=authorizations,\n security=['apikey'],\n url_prefix=':8000',\n prefix='/api',\n doc='/doc/'\n )\n\n port = config['api']['http']['port']\n host = config['api']['http']['host']\n\n # NOTE rewrite it, that hotfix to see GUI link\n if not no_studio:\n log = get_log('http')\n if host in ('', '0.0.0.0'):\n url = f'http://127.0.0.1:{port}/'\n else:\n url = f'http://{host}:{port}/'\n log.info(f' - GUI available at {url}')\n\n pid = os.getpid()\n x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config.paths['static']), daemon=True)\n x.start()\n\n return app, api\n\n\ndef initialize_interfaces(app):\n app.default_store = DataStore()\n app.naitve_interface = NativeInterface()\n app.custom_models = CustomModels()\n app.dbw = DatabaseWrapper()\n config = Config()\n app.config_obj = config\n\n\ndef _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):\n \"\"\"Open webbrowser with url when http service is started.\n\n If some error then do nothing.\n \"\"\"\n init_static_thread.join()\n inject_telemetry_to_static(static_folder)\n logger = get_log('http')\n try:\n is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,\n pid=pid, port=port)\n if is_http_active:\n webbrowser.open(url)\n except Exception as e:\n logger.error(f'Failed to open {url} in webbrowser with exception {e}')\n logger.error(traceback.format_exc())\n session.close()\n", "path": "mindsdb/api/http/initialize.py"}]} | 3,680 | 525 |
gh_patches_debug_33091 | rasdani/github-patches | git_diff | ansible__ansible-36661 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Seport: support list input for ports
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and devel branch are affected too.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest -->
- Feature Idea
##### COMPONENT NAME
<!---
Name of the module, plugin, task or feature
Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path
-->
seport
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
ansible 2.4.3.0
```
##### CONFIGURATION
<!---
If using Ansible 2.4 or above, paste the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.
-->
N/A
##### SUMMARY
<!--- Explain the problem briefly -->
It would be nice if the `ports` option allowed for a list of strings instead of just a string.
https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/system/seport.py#L234
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
<!--- Paste example playbooks or commands between quotes below -->
##### EXPECTED RESULTS
```yaml
- seport:
ports:
- "10000-10100"
- "10112"
proto: tcp
setype: memcache_port_t
state: present
```
##### ACTUAL RESULTS
```yaml
- seport:
ports: 10000-10100,10112
proto: tcp
setype: memcache_port_t
state: present
```
</issue>
<code>
[start of lib/ansible/modules/system/seport.py]
1 #!/usr/bin/python
2
3 # Copyright: (c) 2014, Dan Keder <[email protected]>
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7 __metaclass__ = type
8
9 ANSIBLE_METADATA = {'metadata_version': '1.1',
10 'status': ['preview'],
11 'supported_by': 'community'}
12
13 DOCUMENTATION = '''
14 ---
15 module: seport
16 short_description: Manages SELinux network port type definitions
17 description:
18 - Manages SELinux network port type definitions.
19 version_added: "2.0"
20 options:
21 ports:
22 description:
23 - Ports or port ranges, separated by a comma.
24 required: true
25 proto:
26 description:
27 - Protocol for the specified port.
28 required: true
29 choices: [ tcp, udp ]
30 setype:
31 description:
32 - SELinux type for the specified port.
33 required: true
34 state:
35 description:
36 - Desired boolean value.
37 required: true
38 choices: [ absent, present ]
39 default: present
40 reload:
41 description:
42 - Reload SELinux policy after commit.
43 type: bool
44 default: 'yes'
45 notes:
46 - The changes are persistent across reboots.
47 - Not tested on any debian based system.
48 requirements:
49 - libselinux-python
50 - policycoreutils-python
51 author:
52 - Dan Keder
53 '''
54
55 EXAMPLES = '''
56 - name: Allow Apache to listen on tcp port 8888
57 seport:
58 ports: 8888
59 proto: tcp
60 setype: http_port_t
61 state: present
62
63 - name: Allow sshd to listen on tcp port 8991
64 seport:
65 ports: 8991
66 proto: tcp
67 setype: ssh_port_t
68 state: present
69
70 - name: Allow memcached to listen on tcp ports 10000-10100 and 10112
71 seport:
72 ports: 10000-10100,10112
73 proto: tcp
74 setype: memcache_port_t
75 state: present
76 '''
77
78 import traceback
79
80 try:
81 import selinux
82 HAVE_SELINUX = True
83 except ImportError:
84 HAVE_SELINUX = False
85
86 try:
87 import seobject
88 HAVE_SEOBJECT = True
89 except ImportError:
90 HAVE_SEOBJECT = False
91
92 from ansible.module_utils.basic import AnsibleModule, HAVE_SELINUX
93 from ansible.module_utils._text import to_native
94
95
96 def semanage_port_get_ports(seport, setype, proto):
97 """ Get the list of ports that have the specified type definition.
98
99 :param seport: Instance of seobject.portRecords
100
101 :type setype: str
102 :param setype: SELinux type.
103
104 :type proto: str
105 :param proto: Protocol ('tcp' or 'udp')
106
107 :rtype: list
108 :return: List of ports that have the specified SELinux type.
109 """
110 records = seport.get_all_by_type()
111 if (setype, proto) in records:
112 return records[(setype, proto)]
113 else:
114 return []
115
116
117 def semanage_port_get_type(seport, port, proto):
118 """ Get the SELinux type of the specified port.
119
120 :param seport: Instance of seobject.portRecords
121
122 :type port: str
123 :param port: Port or port range (example: "8080", "8080-9090")
124
125 :type proto: str
126 :param proto: Protocol ('tcp' or 'udp')
127
128 :rtype: tuple
129 :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
130 """
131 ports = port.split('-', 1)
132 if len(ports) == 1:
133 ports.extend(ports)
134 key = (int(ports[0]), int(ports[1]), proto)
135
136 records = seport.get_all()
137 if key in records:
138 return records[key]
139 else:
140 return None
141
142
143 def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
144 """ Add SELinux port type definition to the policy.
145
146 :type module: AnsibleModule
147 :param module: Ansible module
148
149 :type ports: list
150 :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
151
152 :type proto: str
153 :param proto: Protocol ('tcp' or 'udp')
154
155 :type setype: str
156 :param setype: SELinux type
157
158 :type do_reload: bool
159 :param do_reload: Whether to reload SELinux policy after commit
160
161 :type serange: str
162 :param serange: SELinux MLS/MCS range (defaults to 's0')
163
164 :type sestore: str
165 :param sestore: SELinux store
166
167 :rtype: bool
168 :return: True if the policy was changed, otherwise False
169 """
170 try:
171 seport = seobject.portRecords(sestore)
172 seport.set_reload(do_reload)
173 change = False
174 ports_by_type = semanage_port_get_ports(seport, setype, proto)
175 for port in ports:
176 if port not in ports_by_type:
177 change = True
178 port_type = semanage_port_get_type(seport, port, proto)
179 if port_type is None and not module.check_mode:
180 seport.add(port, proto, serange, setype)
181 elif port_type is not None and not module.check_mode:
182 seport.modify(port, proto, serange, setype)
183
184 except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
185 module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
186
187 return change
188
189
190 def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
191 """ Delete SELinux port type definition from the policy.
192
193 :type module: AnsibleModule
194 :param module: Ansible module
195
196 :type ports: list
197 :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
198
199 :type proto: str
200 :param proto: Protocol ('tcp' or 'udp')
201
202 :type setype: str
203 :param setype: SELinux type.
204
205 :type do_reload: bool
206 :param do_reload: Whether to reload SELinux policy after commit
207
208 :type sestore: str
209 :param sestore: SELinux store
210
211 :rtype: bool
212 :return: True if the policy was changed, otherwise False
213 """
214 try:
215 seport = seobject.portRecords(sestore)
216 seport.set_reload(do_reload)
217 change = False
218 ports_by_type = semanage_port_get_ports(seport, setype, proto)
219 for port in ports:
220 if port in ports_by_type:
221 change = True
222 if not module.check_mode:
223 seport.delete(port, proto)
224
225 except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
226 module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
227
228 return change
229
230
231 def main():
232 module = AnsibleModule(
233 argument_spec=dict(
234 ports=dict(type='str', required=True),
235 proto=dict(type='str', required=True, choices=['tcp', 'udp']),
236 setype=dict(type='str', required=True),
237 state=dict(type='str', required=True, choices=['absent', 'present']),
238 reload=dict(type='bool', default=True),
239 ),
240 supports_check_mode=True,
241 )
242
243 if not HAVE_SELINUX:
244 module.fail_json(msg="This module requires libselinux-python")
245
246 if not HAVE_SEOBJECT:
247 module.fail_json(msg="This module requires policycoreutils-python")
248
249 if not selinux.is_selinux_enabled():
250 module.fail_json(msg="SELinux is disabled on this host.")
251
252 ports = [x.strip() for x in str(module.params['ports']).split(',')]
253 proto = module.params['proto']
254 setype = module.params['setype']
255 state = module.params['state']
256 do_reload = module.params['reload']
257
258 result = {
259 'ports': ports,
260 'proto': proto,
261 'setype': setype,
262 'state': state,
263 }
264
265 if state == 'present':
266 result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
267 elif state == 'absent':
268 result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
269 else:
270 module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
271
272 module.exit_json(**result)
273
274
275 if __name__ == '__main__':
276 main()
277
[end of lib/ansible/modules/system/seport.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/modules/system/seport.py b/lib/ansible/modules/system/seport.py
--- a/lib/ansible/modules/system/seport.py
+++ b/lib/ansible/modules/system/seport.py
@@ -15,12 +15,12 @@
module: seport
short_description: Manages SELinux network port type definitions
description:
- - Manages SELinux network port type definitions.
+ - Manages SELinux network port type definitions.
version_added: "2.0"
options:
ports:
description:
- - Ports or port ranges, separated by a comma.
+ - Ports or port ranges. Can be a list (since 2.6) or comma separated string.
required: true
proto:
description:
@@ -73,6 +73,15 @@
proto: tcp
setype: memcache_port_t
state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ seport:
+ ports:
+ - 10000-10100
+ - 10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
'''
import traceback
@@ -231,7 +240,7 @@
def main():
module = AnsibleModule(
argument_spec=dict(
- ports=dict(type='str', required=True),
+ ports=dict(type='list', required=True),
proto=dict(type='str', required=True, choices=['tcp', 'udp']),
setype=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
@@ -249,7 +258,7 @@
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
- ports = [x.strip() for x in str(module.params['ports']).split(',')]
+ ports = module.params['ports']
proto = module.params['proto']
setype = module.params['setype']
state = module.params['state']
| {"golden_diff": "diff --git a/lib/ansible/modules/system/seport.py b/lib/ansible/modules/system/seport.py\n--- a/lib/ansible/modules/system/seport.py\n+++ b/lib/ansible/modules/system/seport.py\n@@ -15,12 +15,12 @@\n module: seport\n short_description: Manages SELinux network port type definitions\n description:\n- - Manages SELinux network port type definitions.\n+ - Manages SELinux network port type definitions.\n version_added: \"2.0\"\n options:\n ports:\n description:\n- - Ports or port ranges, separated by a comma.\n+ - Ports or port ranges. Can be a list (since 2.6) or comma separated string.\n required: true\n proto:\n description:\n@@ -73,6 +73,15 @@\n proto: tcp\n setype: memcache_port_t\n state: present\n+\n+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112\n+ seport:\n+ ports:\n+ - 10000-10100\n+ - 10112\n+ proto: tcp\n+ setype: memcache_port_t\n+ state: present\n '''\n \n import traceback\n@@ -231,7 +240,7 @@\n def main():\n module = AnsibleModule(\n argument_spec=dict(\n- ports=dict(type='str', required=True),\n+ ports=dict(type='list', required=True),\n proto=dict(type='str', required=True, choices=['tcp', 'udp']),\n setype=dict(type='str', required=True),\n state=dict(type='str', required=True, choices=['absent', 'present']),\n@@ -249,7 +258,7 @@\n if not selinux.is_selinux_enabled():\n module.fail_json(msg=\"SELinux is disabled on this host.\")\n \n- ports = [x.strip() for x in str(module.params['ports']).split(',')]\n+ ports = module.params['ports']\n proto = module.params['proto']\n setype = module.params['setype']\n state = module.params['state']\n", "issue": "Seport: support list input for ports\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest -->\r\n - Feature Idea\r\n\r\n##### COMPONENT NAME\r\n<!---\r\nName of the module, plugin, task or feature\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path\r\n-->\r\nseport\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.4.3.0\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nIf using Ansible 2.4 or above, paste the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n-->\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\nN/A\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\nIt would be nice if the `ports` option allowed for a list of strings instead of just a string.\r\n\r\nhttps://github.com/ansible/ansible/blob/devel/lib/ansible/modules/system/seport.py#L234\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n##### EXPECTED RESULTS\r\n```yaml\r\n- seport:\r\n ports: \r\n - \"10000-10100\"\r\n - \"10112\"\r\n proto: tcp\r\n setype: memcache_port_t\r\n state: present\r\n```\r\n\r\n##### ACTUAL RESULTS\r\n```yaml\r\n- seport:\r\n ports: 10000-10100,10112\r\n proto: tcp\r\n setype: memcache_port_t\r\n state: present\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# Copyright: (c) 2014, Dan Keder <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: seport\nshort_description: Manages SELinux network port type definitions\ndescription:\n - Manages SELinux network port type definitions.\nversion_added: \"2.0\"\noptions:\n ports:\n description:\n - Ports or port ranges, separated by a comma.\n required: true\n proto:\n description:\n - Protocol for the specified port.\n required: true\n choices: [ tcp, udp ]\n setype:\n description:\n - SELinux type for the specified port.\n required: true\n state:\n description:\n - Desired boolean value.\n required: true\n choices: [ absent, present ]\n default: present\n reload:\n description:\n - Reload SELinux policy after commit.\n type: bool\n default: 'yes'\nnotes:\n - The changes are persistent across reboots.\n - Not tested on any debian based system.\nrequirements:\n- libselinux-python\n- policycoreutils-python\nauthor:\n- Dan Keder\n'''\n\nEXAMPLES = '''\n- name: Allow Apache to listen on tcp port 8888\n seport:\n ports: 8888\n proto: tcp\n setype: http_port_t\n state: present\n\n- name: Allow sshd to listen on tcp port 8991\n seport:\n ports: 8991\n proto: tcp\n setype: ssh_port_t\n state: present\n\n- name: Allow memcached to listen on tcp ports 10000-10100 and 10112\n seport:\n ports: 10000-10100,10112\n proto: tcp\n setype: memcache_port_t\n state: present\n'''\n\nimport traceback\n\ntry:\n import selinux\n HAVE_SELINUX = True\nexcept ImportError:\n HAVE_SELINUX = False\n\ntry:\n import seobject\n HAVE_SEOBJECT = True\nexcept ImportError:\n HAVE_SEOBJECT = False\n\nfrom ansible.module_utils.basic import AnsibleModule, HAVE_SELINUX\nfrom ansible.module_utils._text import to_native\n\n\ndef semanage_port_get_ports(seport, setype, proto):\n \"\"\" Get the list of ports that have the specified type definition.\n\n :param seport: Instance of seobject.portRecords\n\n :type setype: str\n :param setype: SELinux type.\n\n :type proto: str\n :param proto: Protocol ('tcp' or 'udp')\n\n :rtype: list\n :return: List of ports that have the specified SELinux type.\n \"\"\"\n records = seport.get_all_by_type()\n if (setype, proto) in records:\n return records[(setype, proto)]\n else:\n return []\n\n\ndef semanage_port_get_type(seport, port, proto):\n \"\"\" Get the SELinux type of the specified port.\n\n :param seport: Instance of seobject.portRecords\n\n :type port: str\n :param port: Port or port range (example: \"8080\", \"8080-9090\")\n\n :type proto: str\n :param proto: Protocol ('tcp' or 'udp')\n\n :rtype: tuple\n :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.\n \"\"\"\n ports = port.split('-', 1)\n if len(ports) == 1:\n ports.extend(ports)\n key = (int(ports[0]), int(ports[1]), proto)\n\n records = seport.get_all()\n if key in records:\n return records[key]\n else:\n return None\n\n\ndef semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):\n \"\"\" Add SELinux port type definition to the policy.\n\n :type module: AnsibleModule\n :param module: Ansible module\n\n :type ports: list\n :param ports: List of ports and port ranges to add (e.g. [\"8080\", \"8080-9090\"])\n\n :type proto: str\n :param proto: Protocol ('tcp' or 'udp')\n\n :type setype: str\n :param setype: SELinux type\n\n :type do_reload: bool\n :param do_reload: Whether to reload SELinux policy after commit\n\n :type serange: str\n :param serange: SELinux MLS/MCS range (defaults to 's0')\n\n :type sestore: str\n :param sestore: SELinux store\n\n :rtype: bool\n :return: True if the policy was changed, otherwise False\n \"\"\"\n try:\n seport = seobject.portRecords(sestore)\n seport.set_reload(do_reload)\n change = False\n ports_by_type = semanage_port_get_ports(seport, setype, proto)\n for port in ports:\n if port not in ports_by_type:\n change = True\n port_type = semanage_port_get_type(seport, port, proto)\n if port_type is None and not module.check_mode:\n seport.add(port, proto, serange, setype)\n elif port_type is not None and not module.check_mode:\n seport.modify(port, proto, serange, setype)\n\n except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:\n module.fail_json(msg=\"%s: %s\\n\" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())\n\n return change\n\n\ndef semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):\n \"\"\" Delete SELinux port type definition from the policy.\n\n :type module: AnsibleModule\n :param module: Ansible module\n\n :type ports: list\n :param ports: List of ports and port ranges to delete (e.g. [\"8080\", \"8080-9090\"])\n\n :type proto: str\n :param proto: Protocol ('tcp' or 'udp')\n\n :type setype: str\n :param setype: SELinux type.\n\n :type do_reload: bool\n :param do_reload: Whether to reload SELinux policy after commit\n\n :type sestore: str\n :param sestore: SELinux store\n\n :rtype: bool\n :return: True if the policy was changed, otherwise False\n \"\"\"\n try:\n seport = seobject.portRecords(sestore)\n seport.set_reload(do_reload)\n change = False\n ports_by_type = semanage_port_get_ports(seport, setype, proto)\n for port in ports:\n if port in ports_by_type:\n change = True\n if not module.check_mode:\n seport.delete(port, proto)\n\n except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:\n module.fail_json(msg=\"%s: %s\\n\" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())\n\n return change\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n ports=dict(type='str', required=True),\n proto=dict(type='str', required=True, choices=['tcp', 'udp']),\n setype=dict(type='str', required=True),\n state=dict(type='str', required=True, choices=['absent', 'present']),\n reload=dict(type='bool', default=True),\n ),\n supports_check_mode=True,\n )\n\n if not HAVE_SELINUX:\n module.fail_json(msg=\"This module requires libselinux-python\")\n\n if not HAVE_SEOBJECT:\n module.fail_json(msg=\"This module requires policycoreutils-python\")\n\n if not selinux.is_selinux_enabled():\n module.fail_json(msg=\"SELinux is disabled on this host.\")\n\n ports = [x.strip() for x in str(module.params['ports']).split(',')]\n proto = module.params['proto']\n setype = module.params['setype']\n state = module.params['state']\n do_reload = module.params['reload']\n\n result = {\n 'ports': ports,\n 'proto': proto,\n 'setype': setype,\n 'state': state,\n }\n\n if state == 'present':\n result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)\n elif state == 'absent':\n result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)\n else:\n module.fail_json(msg='Invalid value of argument \"state\": {0}'.format(state))\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/system/seport.py"}]} | 3,824 | 482 |
gh_patches_debug_39074 | rasdani/github-patches | git_diff | geopandas__geopandas-2007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TST: use a different geocoder than GeocodeFarm in geocoding tests
The CI is failing under geopy new version 2.2.
The error from https://github.com/geopandas/geopandas/runs/3042280515?check_suite_focus=true#step:5:2930:
`GeocodeFarm` has been removed under geopy2.2, see geopy/geopy#445.
```python
_________________________________ test_forward _________________________________
[gw0] linux -- Python 3.7.7 /usr/share/miniconda/envs/test/bin/python
locations = ['260 Broadway, New York, NY', '77 Massachusetts Ave, Cambridge, MA']
points = [<shapely.geometry.point.Point object at 0x7f0ec0b22a10>, <shapely.geometry.point.Point object at 0x7f0ec0b22510>]
def test_forward(locations, points):
> from geopy.geocoders import GeocodeFarm
E ImportError: cannot import name 'GeocodeFarm' from 'geopy.geocoders' (/usr/share/miniconda/envs/test/lib/python3.7/site-packages/geopy/geocoders/__init__.py)
geopandas/tests/test_geocode.py:141: ImportError
_________________________________ test_reverse _________________________________
[gw0] linux -- Python 3.7.7 /usr/share/miniconda/envs/test/bin/python
locations = ['260 Broadway, New York, NY', '77 Massachusetts Ave, Cambridge, MA']
points = [<shapely.geometry.point.Point object at 0x7f0ec0b39250>, <shapely.geometry.point.Point object at 0x7f0ec0b39c10>]
def test_reverse(locations, points):
> from geopy.geocoders import GeocodeFarm
E ImportError: cannot import name 'GeocodeFarm' from 'geopy.geocoders' (/usr/share/miniconda/envs/test/lib/python3.7/site-packages/geopy/geocoders/__init__.py)
geopandas/tests/test_geocode.py:158: ImportError
```
</issue>
<code>
[start of geopandas/tools/geocoding.py]
1 from collections import defaultdict
2 import time
3
4 import pandas as pd
5
6 from shapely.geometry import Point
7
8 import geopandas
9
10
11 def _get_throttle_time(provider):
12 """
13 Amount of time to wait between requests to a geocoding API, for providers
14 that specify rate limits in their terms of service.
15 """
16 import geopy.geocoders
17
18 # https://operations.osmfoundation.org/policies/nominatim/
19 if provider == geopy.geocoders.Nominatim:
20 return 1
21 else:
22 return 0
23
24
25 def geocode(strings, provider=None, **kwargs):
26 """
27 Geocode a set of strings and get a GeoDataFrame of the resulting points.
28
29 Parameters
30 ----------
31 strings : list or Series of addresses to geocode
32 provider : str or geopy.geocoder
33 Specifies geocoding service to use. If none is provided,
34 will use 'geocodefarm' with a rate limit applied (see the geocodefarm
35 terms of service at:
36 https://geocode.farm/geocoding/free-api-documentation/ ).
37
38 Either the string name used by geopy (as specified in
39 geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance
40 (e.g., geopy.geocoders.GeocodeFarm) may be used.
41
42 Some providers require additional arguments such as access keys
43 See each geocoder's specific parameters in geopy.geocoders
44
45 Notes
46 -----
47 Ensure proper use of the results by consulting the Terms of Service for
48 your provider.
49
50 Geocoding requires geopy. Install it using 'pip install geopy'. See also
51 https://github.com/geopy/geopy
52
53 Examples
54 --------
55 >>> df = geopandas.tools.geocode( # doctest: +SKIP
56 ... ["boston, ma", "1600 pennsylvania ave. washington, dc"]
57 ... )
58 >>> df # doctest: +SKIP
59 geometry address
60 0 POINT (-71.05863 42.35899) Boston, MA, United States
61 1 POINT (-77.03651 38.89766) 1600 Pennsylvania Ave NW, Washington, DC 20006...
62 """
63
64 if provider is None:
65 # https://geocode.farm/geocoding/free-api-documentation/
66 provider = "geocodefarm"
67 throttle_time = 0.25
68 else:
69 throttle_time = _get_throttle_time(provider)
70
71 return _query(strings, True, provider, throttle_time, **kwargs)
72
73
74 def reverse_geocode(points, provider=None, **kwargs):
75 """
76 Reverse geocode a set of points and get a GeoDataFrame of the resulting
77 addresses.
78
79 The points
80
81 Parameters
82 ----------
83 points : list or Series of Shapely Point objects.
84 x coordinate is longitude
85 y coordinate is latitude
86 provider : str or geopy.geocoder (opt)
87 Specifies geocoding service to use. If none is provided,
88 will use 'geocodefarm' with a rate limit applied (see the geocodefarm
89 terms of service at:
90 https://geocode.farm/geocoding/free-api-documentation/ ).
91
92 Either the string name used by geopy (as specified in
93 geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance
94 (e.g., geopy.geocoders.GeocodeFarm) may be used.
95
96 Some providers require additional arguments such as access keys
97 See each geocoder's specific parameters in geopy.geocoders
98
99 Notes
100 -----
101 Ensure proper use of the results by consulting the Terms of Service for
102 your provider.
103
104 Reverse geocoding requires geopy. Install it using 'pip install geopy'.
105 See also https://github.com/geopy/geopy
106
107 Examples
108 --------
109 >>> from shapely.geometry import Point
110 >>> df = geopandas.tools.reverse_geocode( # doctest: +SKIP
111 ... [Point(-71.0594869, 42.3584697), Point(-77.0365305, 38.8977332)]
112 ... )
113 >>> df # doctest: +SKIP
114 geometry address
115 0 POINT (-71.05941 42.35837) 29 Court Sq, Boston, MA 02108, United States
116 1 POINT (-77.03641 38.89766) 1600 Pennsylvania Ave NW, Washington, DC 20006...
117 """
118
119 if provider is None:
120 # https://geocode.farm/geocoding/free-api-documentation/
121 provider = "geocodefarm"
122 throttle_time = 0.25
123 else:
124 throttle_time = _get_throttle_time(provider)
125
126 return _query(points, False, provider, throttle_time, **kwargs)
127
128
129 def _query(data, forward, provider, throttle_time, **kwargs):
130 # generic wrapper for calls over lists to geopy Geocoders
131 from geopy.geocoders.base import GeocoderQueryError
132 from geopy.geocoders import get_geocoder_for_service
133
134 if not isinstance(data, pd.Series):
135 data = pd.Series(data)
136
137 if isinstance(provider, str):
138 provider = get_geocoder_for_service(provider)
139
140 coder = provider(**kwargs)
141 results = {}
142 for i, s in data.items():
143 try:
144 if forward:
145 results[i] = coder.geocode(s)
146 else:
147 results[i] = coder.reverse((s.y, s.x), exactly_one=True)
148 except (GeocoderQueryError, ValueError):
149 results[i] = (None, None)
150 time.sleep(throttle_time)
151
152 df = _prepare_geocode_result(results)
153 return df
154
155
156 def _prepare_geocode_result(results):
157 """
158 Helper function for the geocode function
159
160 Takes a dict where keys are index entries, values are tuples containing:
161 (address, (lat, lon))
162
163 """
164 # Prepare the data for the DataFrame as a dict of lists
165 d = defaultdict(list)
166 index = []
167
168 for i, s in results.items():
169
170 if s is None:
171 p = Point()
172 address = None
173
174 else:
175 address, loc = s
176
177 # loc is lat, lon and we want lon, lat
178 if loc is None:
179 p = Point()
180 else:
181 p = Point(loc[1], loc[0])
182
183 d["geometry"].append(p)
184 d["address"].append(address)
185 index.append(i)
186
187 df = geopandas.GeoDataFrame(d, index=index, crs="EPSG:4326")
188
189 return df
190
[end of geopandas/tools/geocoding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/tools/geocoding.py b/geopandas/tools/geocoding.py
--- a/geopandas/tools/geocoding.py
+++ b/geopandas/tools/geocoding.py
@@ -31,13 +31,12 @@
strings : list or Series of addresses to geocode
provider : str or geopy.geocoder
Specifies geocoding service to use. If none is provided,
- will use 'geocodefarm' with a rate limit applied (see the geocodefarm
- terms of service at:
- https://geocode.farm/geocoding/free-api-documentation/ ).
+ will use 'photon' (see the Photon's terms of service at:
+ https://photon.komoot.io).
Either the string name used by geopy (as specified in
geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance
- (e.g., geopy.geocoders.GeocodeFarm) may be used.
+ (e.g., geopy.geocoders.Photon) may be used.
Some providers require additional arguments such as access keys
See each geocoder's specific parameters in geopy.geocoders
@@ -62,11 +61,8 @@
"""
if provider is None:
- # https://geocode.farm/geocoding/free-api-documentation/
- provider = "geocodefarm"
- throttle_time = 0.25
- else:
- throttle_time = _get_throttle_time(provider)
+ provider = "photon"
+ throttle_time = _get_throttle_time(provider)
return _query(strings, True, provider, throttle_time, **kwargs)
@@ -85,13 +81,12 @@
y coordinate is latitude
provider : str or geopy.geocoder (opt)
Specifies geocoding service to use. If none is provided,
- will use 'geocodefarm' with a rate limit applied (see the geocodefarm
- terms of service at:
- https://geocode.farm/geocoding/free-api-documentation/ ).
+ will use 'photon' (see the Photon's terms of service at:
+ https://photon.komoot.io).
Either the string name used by geopy (as specified in
geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance
- (e.g., geopy.geocoders.GeocodeFarm) may be used.
+ (e.g., geopy.geocoders.Photon) may be used.
Some providers require additional arguments such as access keys
See each geocoder's specific parameters in geopy.geocoders
@@ -117,11 +112,8 @@
"""
if provider is None:
- # https://geocode.farm/geocoding/free-api-documentation/
- provider = "geocodefarm"
- throttle_time = 0.25
- else:
- throttle_time = _get_throttle_time(provider)
+ provider = "photon"
+ throttle_time = _get_throttle_time(provider)
return _query(points, False, provider, throttle_time, **kwargs)
| {"golden_diff": "diff --git a/geopandas/tools/geocoding.py b/geopandas/tools/geocoding.py\n--- a/geopandas/tools/geocoding.py\n+++ b/geopandas/tools/geocoding.py\n@@ -31,13 +31,12 @@\n strings : list or Series of addresses to geocode\n provider : str or geopy.geocoder\n Specifies geocoding service to use. If none is provided,\n- will use 'geocodefarm' with a rate limit applied (see the geocodefarm\n- terms of service at:\n- https://geocode.farm/geocoding/free-api-documentation/ ).\n+ will use 'photon' (see the Photon's terms of service at:\n+ https://photon.komoot.io).\n \n Either the string name used by geopy (as specified in\n geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance\n- (e.g., geopy.geocoders.GeocodeFarm) may be used.\n+ (e.g., geopy.geocoders.Photon) may be used.\n \n Some providers require additional arguments such as access keys\n See each geocoder's specific parameters in geopy.geocoders\n@@ -62,11 +61,8 @@\n \"\"\"\n \n if provider is None:\n- # https://geocode.farm/geocoding/free-api-documentation/\n- provider = \"geocodefarm\"\n- throttle_time = 0.25\n- else:\n- throttle_time = _get_throttle_time(provider)\n+ provider = \"photon\"\n+ throttle_time = _get_throttle_time(provider)\n \n return _query(strings, True, provider, throttle_time, **kwargs)\n \n@@ -85,13 +81,12 @@\n y coordinate is latitude\n provider : str or geopy.geocoder (opt)\n Specifies geocoding service to use. If none is provided,\n- will use 'geocodefarm' with a rate limit applied (see the geocodefarm\n- terms of service at:\n- https://geocode.farm/geocoding/free-api-documentation/ ).\n+ will use 'photon' (see the Photon's terms of service at:\n+ https://photon.komoot.io).\n \n Either the string name used by geopy (as specified in\n geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance\n- (e.g., geopy.geocoders.GeocodeFarm) may be used.\n+ (e.g., geopy.geocoders.Photon) may be used.\n \n Some providers require additional arguments such as access keys\n See each geocoder's specific parameters in geopy.geocoders\n@@ -117,11 +112,8 @@\n \"\"\"\n \n if provider is None:\n- # https://geocode.farm/geocoding/free-api-documentation/\n- provider = \"geocodefarm\"\n- throttle_time = 0.25\n- else:\n- throttle_time = _get_throttle_time(provider)\n+ provider = \"photon\"\n+ throttle_time = _get_throttle_time(provider)\n \n return _query(points, False, provider, throttle_time, **kwargs)\n", "issue": "TST: use a different geocoder than GeocodeFarm in geocoding tests\nThe CI is failing under geopy new version 2.2.\r\n\r\nThe error from https://github.com/geopandas/geopandas/runs/3042280515?check_suite_focus=true#step:5:2930:\r\n\r\n`GeocodeFarm` has been removed under geopy2.2, see geopy/geopy#445.\r\n\r\n```python\r\n_________________________________ test_forward _________________________________\r\n[gw0] linux -- Python 3.7.7 /usr/share/miniconda/envs/test/bin/python\r\n\r\nlocations = ['260 Broadway, New York, NY', '77 Massachusetts Ave, Cambridge, MA']\r\npoints = [<shapely.geometry.point.Point object at 0x7f0ec0b22a10>, <shapely.geometry.point.Point object at 0x7f0ec0b22510>]\r\n\r\n def test_forward(locations, points):\r\n> from geopy.geocoders import GeocodeFarm\r\nE ImportError: cannot import name 'GeocodeFarm' from 'geopy.geocoders' (/usr/share/miniconda/envs/test/lib/python3.7/site-packages/geopy/geocoders/__init__.py)\r\n\r\ngeopandas/tests/test_geocode.py:141: ImportError\r\n_________________________________ test_reverse _________________________________\r\n[gw0] linux -- Python 3.7.7 /usr/share/miniconda/envs/test/bin/python\r\n\r\nlocations = ['260 Broadway, New York, NY', '77 Massachusetts Ave, Cambridge, MA']\r\npoints = [<shapely.geometry.point.Point object at 0x7f0ec0b39250>, <shapely.geometry.point.Point object at 0x7f0ec0b39c10>]\r\n\r\n def test_reverse(locations, points):\r\n> from geopy.geocoders import GeocodeFarm\r\nE ImportError: cannot import name 'GeocodeFarm' from 'geopy.geocoders' (/usr/share/miniconda/envs/test/lib/python3.7/site-packages/geopy/geocoders/__init__.py)\r\n\r\ngeopandas/tests/test_geocode.py:158: ImportError\r\n```\n", "before_files": [{"content": "from collections import defaultdict\nimport time\n\nimport pandas as pd\n\nfrom shapely.geometry import Point\n\nimport geopandas\n\n\ndef _get_throttle_time(provider):\n \"\"\"\n Amount of time to wait between requests to a geocoding API, for providers\n that specify rate limits in their terms of service.\n \"\"\"\n import geopy.geocoders\n\n # https://operations.osmfoundation.org/policies/nominatim/\n if provider == geopy.geocoders.Nominatim:\n return 1\n else:\n return 0\n\n\ndef geocode(strings, provider=None, **kwargs):\n \"\"\"\n Geocode a set of strings and get a GeoDataFrame of the resulting points.\n\n Parameters\n ----------\n strings : list or Series of addresses to geocode\n provider : str or geopy.geocoder\n Specifies geocoding service to use. If none is provided,\n will use 'geocodefarm' with a rate limit applied (see the geocodefarm\n terms of service at:\n https://geocode.farm/geocoding/free-api-documentation/ ).\n\n Either the string name used by geopy (as specified in\n geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance\n (e.g., geopy.geocoders.GeocodeFarm) may be used.\n\n Some providers require additional arguments such as access keys\n See each geocoder's specific parameters in geopy.geocoders\n\n Notes\n -----\n Ensure proper use of the results by consulting the Terms of Service for\n your provider.\n\n Geocoding requires geopy. Install it using 'pip install geopy'. See also\n https://github.com/geopy/geopy\n\n Examples\n --------\n >>> df = geopandas.tools.geocode( # doctest: +SKIP\n ... [\"boston, ma\", \"1600 pennsylvania ave. washington, dc\"]\n ... )\n >>> df # doctest: +SKIP\n geometry address\n 0 POINT (-71.05863 42.35899) Boston, MA, United States\n 1 POINT (-77.03651 38.89766) 1600 Pennsylvania Ave NW, Washington, DC 20006...\n \"\"\"\n\n if provider is None:\n # https://geocode.farm/geocoding/free-api-documentation/\n provider = \"geocodefarm\"\n throttle_time = 0.25\n else:\n throttle_time = _get_throttle_time(provider)\n\n return _query(strings, True, provider, throttle_time, **kwargs)\n\n\ndef reverse_geocode(points, provider=None, **kwargs):\n \"\"\"\n Reverse geocode a set of points and get a GeoDataFrame of the resulting\n addresses.\n\n The points\n\n Parameters\n ----------\n points : list or Series of Shapely Point objects.\n x coordinate is longitude\n y coordinate is latitude\n provider : str or geopy.geocoder (opt)\n Specifies geocoding service to use. If none is provided,\n will use 'geocodefarm' with a rate limit applied (see the geocodefarm\n terms of service at:\n https://geocode.farm/geocoding/free-api-documentation/ ).\n\n Either the string name used by geopy (as specified in\n geopy.geocoders.SERVICE_TO_GEOCODER) or a geopy Geocoder instance\n (e.g., geopy.geocoders.GeocodeFarm) may be used.\n\n Some providers require additional arguments such as access keys\n See each geocoder's specific parameters in geopy.geocoders\n\n Notes\n -----\n Ensure proper use of the results by consulting the Terms of Service for\n your provider.\n\n Reverse geocoding requires geopy. Install it using 'pip install geopy'.\n See also https://github.com/geopy/geopy\n\n Examples\n --------\n >>> from shapely.geometry import Point\n >>> df = geopandas.tools.reverse_geocode( # doctest: +SKIP\n ... [Point(-71.0594869, 42.3584697), Point(-77.0365305, 38.8977332)]\n ... )\n >>> df # doctest: +SKIP\n geometry address\n 0 POINT (-71.05941 42.35837) 29 Court Sq, Boston, MA 02108, United States\n 1 POINT (-77.03641 38.89766) 1600 Pennsylvania Ave NW, Washington, DC 20006...\n \"\"\"\n\n if provider is None:\n # https://geocode.farm/geocoding/free-api-documentation/\n provider = \"geocodefarm\"\n throttle_time = 0.25\n else:\n throttle_time = _get_throttle_time(provider)\n\n return _query(points, False, provider, throttle_time, **kwargs)\n\n\ndef _query(data, forward, provider, throttle_time, **kwargs):\n # generic wrapper for calls over lists to geopy Geocoders\n from geopy.geocoders.base import GeocoderQueryError\n from geopy.geocoders import get_geocoder_for_service\n\n if not isinstance(data, pd.Series):\n data = pd.Series(data)\n\n if isinstance(provider, str):\n provider = get_geocoder_for_service(provider)\n\n coder = provider(**kwargs)\n results = {}\n for i, s in data.items():\n try:\n if forward:\n results[i] = coder.geocode(s)\n else:\n results[i] = coder.reverse((s.y, s.x), exactly_one=True)\n except (GeocoderQueryError, ValueError):\n results[i] = (None, None)\n time.sleep(throttle_time)\n\n df = _prepare_geocode_result(results)\n return df\n\n\ndef _prepare_geocode_result(results):\n \"\"\"\n Helper function for the geocode function\n\n Takes a dict where keys are index entries, values are tuples containing:\n (address, (lat, lon))\n\n \"\"\"\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in results.items():\n\n if s is None:\n p = Point()\n address = None\n\n else:\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n d[\"geometry\"].append(p)\n d[\"address\"].append(address)\n index.append(i)\n\n df = geopandas.GeoDataFrame(d, index=index, crs=\"EPSG:4326\")\n\n return df\n", "path": "geopandas/tools/geocoding.py"}]} | 3,029 | 719 |
gh_patches_debug_7304 | rasdani/github-patches | git_diff | uccser__cs-unplugged-225 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Complete folder structure for test suite
Each file should have a docstring explaining it's intended purpose.
Add a code coverage tool
</issue>
<code>
[start of csunplugged/utils/BaseLoader.py]
1 """Base loader used to create custom loaders for content."""
2
3 import yaml
4 import mdx_math
5 import abc
6 import sys
7 import re
8 import os.path
9 from os import listdir
10 from verto import Verto
11
12 from .check_required_files import check_converter_required_files
13
14 from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError
15 from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError
16 from utils.errors.EmptyConfigFileError import EmptyConfigFileError
17 from utils.errors.InvalidConfigFileError import InvalidConfigFileError
18 from utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError
19 from utils.errors.CouldNotFindConfigFileError import CouldNotFindConfigFileError
20
21
22 class BaseLoader():
23 """Base loader class for individual loaders."""
24
25 def __init__(self, BASE_PATH="", load_log=[]):
26 """Create a BaseLoader object.
27
28 Args:
29 BASE_PATH: string of base path.
30 load_log: list of log messages.
31 """
32 if load_log:
33 self.load_log = load_log
34 else:
35 self.load_log = list(load_log)
36 self.BASE_PATH = BASE_PATH
37 self.setup_md_to_html_converter()
38
39 def setup_md_to_html_converter(self):
40 """Create Markdown converter.
41
42 The converter is created with custom processors, html templates,
43 and extensions.
44 """
45 templates = self.load_template_files()
46 extensions = [
47 "markdown.extensions.fenced_code",
48 "markdown.extensions.codehilite",
49 "markdown.extensions.sane_lists",
50 "markdown.extensions.tables",
51 mdx_math.MathExtension(enable_dollar_delimiter=True)
52 ]
53 self.converter = Verto(html_templates=templates, extensions=extensions)
54 custom_processors = self.converter.processor_defaults()
55 custom_processors.add("remove-title")
56 self.converter.update_processors(custom_processors)
57
58 def convert_md_file(self, md_file_path, config_file_path, heading_required=True):
59 """Return the Verto object for a given Markdown file.
60
61 Args:
62 md_file_path: location of Markdown file to convert
63
64 Returns:
65 VertoResult object
66
67 Raises:
68 CouldNotFindMarkdownFileError: when a given Markdown file cannot be found.
69 NoHeadingFoundInMarkdownFileError: when no heading can be found in a given
70 Markdown file.
71 EmptyMarkdownFileError: when no content can be found in a given Markdown
72 file.
73 """
74 try:
75 # check file exists
76 content = open(md_file_path, encoding="UTF-8").read()
77 except:
78 raise CouldNotFindMarkdownFileError(md_file_path, config_file_path)
79
80 result = self.converter.convert(content)
81
82 if heading_required:
83 if result.title is None:
84 raise NoHeadingFoundInMarkdownFileError(md_file_path)
85
86 if len(result.html_string) == 0:
87 raise EmptyMarkdownFileError(md_file_path)
88
89 check_converter_required_files(result.required_files, md_file_path)
90 return result
91
92 def log(self, log_message, indent_amount=0):
93 """Add the log message to the load log with the specified indent."""
94 self.load_log.append((log_message, indent_amount))
95
96 def print_load_log(self):
97 """Output log messages from loader to console."""
98 for (log, indent_amount) in self.load_log:
99 indent = " " * indent_amount
100 sys.stdout.write("{indent}{text}\n".format(indent=indent, text=log))
101 sys.stdout.write("\n")
102 self.load_log = []
103
104 def load_yaml_file(self, yaml_file_path):
105 """Load and read given YAML file.
106
107 Args:
108 file_path: location of yaml file to read
109
110 Returns:
111 Either list or string, depending on structure of given yaml file
112
113 Raises:
114 CouldNotFindConfigFileError: when a given config file cannot be found.
115 InvalidConfigFileError: when a given config file is incorrectly formatted.
116 EmptyConfigFileError: when a give config file is empty.
117 """
118 try:
119 yaml_file = open(yaml_file_path, encoding="UTF-8").read()
120 except:
121 raise CouldNotFindConfigFileError(yaml_file_path)
122
123 try:
124 yaml_contents = yaml.load(yaml_file)
125 except:
126 raise InvalidConfigFileError(yaml_file_path)
127
128 if yaml_contents is None:
129 raise EmptyConfigFileError(yaml_file_path)
130
131 if isinstance(yaml_contents, dict) is False:
132 raise InvalidConfigFileError(yaml_file_path)
133
134 return yaml_contents
135
136 def load_template_files(self):
137 """Load custom HTML templates for converter.
138
139 Returns:
140 templates: dictionary of html templates
141 """
142 templates = dict()
143 template_path = os.path.join(
144 os.path.dirname(__file__),
145 "custom_converter_templates/"
146 )
147 for file in listdir(template_path):
148 template_file = re.search(r"(.*?).html$", file)
149 if template_file:
150 template_name = template_file.groups()[0]
151 templates[template_name] = open(template_path + file).read()
152 return templates
153
154 @abc.abstractmethod
155 def load(self):
156 """Abstract method to be implemented by subclasses.
157
158 Raise:
159 NotImplementedError: when a user attempts to run the load() method of the
160 BaseLoader class.
161 """
162 raise NotImplementedError("Subclass does not implement this method")
163
[end of csunplugged/utils/BaseLoader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/utils/BaseLoader.py b/csunplugged/utils/BaseLoader.py
--- a/csunplugged/utils/BaseLoader.py
+++ b/csunplugged/utils/BaseLoader.py
@@ -10,7 +10,6 @@
from verto import Verto
from .check_required_files import check_converter_required_files
-
from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError
from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError
from utils.errors.EmptyConfigFileError import EmptyConfigFileError
| {"golden_diff": "diff --git a/csunplugged/utils/BaseLoader.py b/csunplugged/utils/BaseLoader.py\n--- a/csunplugged/utils/BaseLoader.py\n+++ b/csunplugged/utils/BaseLoader.py\n@@ -10,7 +10,6 @@\n from verto import Verto\n \n from .check_required_files import check_converter_required_files\n-\n from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError\n from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError\n from utils.errors.EmptyConfigFileError import EmptyConfigFileError\n", "issue": "Complete folder structure for test suite\nEach file should have a docstring explaining it's intended purpose.\nAdd a code coverage tool\n\n", "before_files": [{"content": "\"\"\"Base loader used to create custom loaders for content.\"\"\"\n\nimport yaml\nimport mdx_math\nimport abc\nimport sys\nimport re\nimport os.path\nfrom os import listdir\nfrom verto import Verto\n\nfrom .check_required_files import check_converter_required_files\n\nfrom utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError\nfrom utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError\nfrom utils.errors.EmptyConfigFileError import EmptyConfigFileError\nfrom utils.errors.InvalidConfigFileError import InvalidConfigFileError\nfrom utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError\nfrom utils.errors.CouldNotFindConfigFileError import CouldNotFindConfigFileError\n\n\nclass BaseLoader():\n \"\"\"Base loader class for individual loaders.\"\"\"\n\n def __init__(self, BASE_PATH=\"\", load_log=[]):\n \"\"\"Create a BaseLoader object.\n\n Args:\n BASE_PATH: string of base path.\n load_log: list of log messages.\n \"\"\"\n if load_log:\n self.load_log = load_log\n else:\n self.load_log = list(load_log)\n self.BASE_PATH = BASE_PATH\n self.setup_md_to_html_converter()\n\n def setup_md_to_html_converter(self):\n \"\"\"Create Markdown converter.\n\n The converter is created with custom processors, html templates,\n and extensions.\n \"\"\"\n templates = self.load_template_files()\n extensions = [\n \"markdown.extensions.fenced_code\",\n \"markdown.extensions.codehilite\",\n \"markdown.extensions.sane_lists\",\n \"markdown.extensions.tables\",\n mdx_math.MathExtension(enable_dollar_delimiter=True)\n ]\n self.converter = Verto(html_templates=templates, extensions=extensions)\n custom_processors = self.converter.processor_defaults()\n custom_processors.add(\"remove-title\")\n self.converter.update_processors(custom_processors)\n\n def convert_md_file(self, md_file_path, config_file_path, heading_required=True):\n \"\"\"Return the Verto object for a given Markdown file.\n\n Args:\n md_file_path: location of Markdown file to convert\n\n Returns:\n VertoResult object\n\n Raises:\n CouldNotFindMarkdownFileError: when a given Markdown file cannot be found.\n NoHeadingFoundInMarkdownFileError: when no heading can be found in a given\n Markdown file.\n EmptyMarkdownFileError: when no content can be found in a given Markdown\n file.\n \"\"\"\n try:\n # check file exists\n content = open(md_file_path, encoding=\"UTF-8\").read()\n except:\n raise CouldNotFindMarkdownFileError(md_file_path, config_file_path)\n\n result = self.converter.convert(content)\n\n if heading_required:\n if result.title is None:\n raise NoHeadingFoundInMarkdownFileError(md_file_path)\n\n if len(result.html_string) == 0:\n raise EmptyMarkdownFileError(md_file_path)\n\n check_converter_required_files(result.required_files, md_file_path)\n return result\n\n def log(self, log_message, indent_amount=0):\n \"\"\"Add the log message to the load log with the specified indent.\"\"\"\n self.load_log.append((log_message, indent_amount))\n\n def print_load_log(self):\n \"\"\"Output log messages from loader to console.\"\"\"\n for (log, indent_amount) in self.load_log:\n indent = \" \" * indent_amount\n sys.stdout.write(\"{indent}{text}\\n\".format(indent=indent, text=log))\n sys.stdout.write(\"\\n\")\n self.load_log = []\n\n def load_yaml_file(self, yaml_file_path):\n \"\"\"Load and read given YAML file.\n\n Args:\n file_path: location of yaml file to read\n\n Returns:\n Either list or string, depending on structure of given yaml file\n\n Raises:\n CouldNotFindConfigFileError: when a given config file cannot be found.\n InvalidConfigFileError: when a given config file is incorrectly formatted.\n EmptyConfigFileError: when a give config file is empty.\n \"\"\"\n try:\n yaml_file = open(yaml_file_path, encoding=\"UTF-8\").read()\n except:\n raise CouldNotFindConfigFileError(yaml_file_path)\n\n try:\n yaml_contents = yaml.load(yaml_file)\n except:\n raise InvalidConfigFileError(yaml_file_path)\n\n if yaml_contents is None:\n raise EmptyConfigFileError(yaml_file_path)\n\n if isinstance(yaml_contents, dict) is False:\n raise InvalidConfigFileError(yaml_file_path)\n\n return yaml_contents\n\n def load_template_files(self):\n \"\"\"Load custom HTML templates for converter.\n\n Returns:\n templates: dictionary of html templates\n \"\"\"\n templates = dict()\n template_path = os.path.join(\n os.path.dirname(__file__),\n \"custom_converter_templates/\"\n )\n for file in listdir(template_path):\n template_file = re.search(r\"(.*?).html$\", file)\n if template_file:\n template_name = template_file.groups()[0]\n templates[template_name] = open(template_path + file).read()\n return templates\n\n @abc.abstractmethod\n def load(self):\n \"\"\"Abstract method to be implemented by subclasses.\n\n Raise:\n NotImplementedError: when a user attempts to run the load() method of the\n BaseLoader class.\n \"\"\"\n raise NotImplementedError(\"Subclass does not implement this method\")\n", "path": "csunplugged/utils/BaseLoader.py"}]} | 2,092 | 119 |
gh_patches_debug_60620 | rasdani/github-patches | git_diff | rasterio__rasterio-373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate to Cython 0.22
The bandaid applied in #276 is falling off. I've tagged this for 1.0 but we could do it sooner.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Two environmental variables influence this script.
4 #
5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,
6 # libraries, and data files.
7 #
8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the
9 # source or binary distribution. This is essential when creating self-contained
10 # binary wheels.
11
12 import logging
13 import os
14 import pprint
15 import shutil
16 import subprocess
17 import sys
18
19 from setuptools import setup
20 from setuptools.extension import Extension
21
22 logging.basicConfig()
23 log = logging.getLogger()
24
25 # python -W all setup.py ...
26 if 'all' in sys.warnoptions:
27 log.level = logging.DEBUG
28
29 def check_output(cmd):
30 # since subprocess.check_output doesn't exist in 2.6
31 # we wrap it here.
32 try:
33 out = subprocess.check_output(cmd)
34 return out.decode('utf')
35 except AttributeError:
36 # For some reasone check_output doesn't exist
37 # So fall back on Popen
38 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
39 out, err = p.communicate()
40 return out
41
42 def copy_data_tree(datadir, destdir):
43 try:
44 shutil.rmtree(destdir)
45 except OSError:
46 pass
47 shutil.copytree(datadir, destdir)
48
49 # Parse the version from the rasterio module.
50 with open('rasterio/__init__.py') as f:
51 for line in f:
52 if line.find("__version__") >= 0:
53 version = line.split("=")[1].strip()
54 version = version.strip('"')
55 version = version.strip("'")
56 continue
57
58 with open('VERSION.txt', 'w') as f:
59 f.write(version)
60
61 # Use Cython if available.
62 try:
63 from Cython.Build import cythonize
64 except ImportError:
65 cythonize = None
66
67 # By default we'll try to get options via gdal-config. On systems without,
68 # options will need to be set in setup.cfg or on the setup command line.
69 include_dirs = []
70 library_dirs = []
71 libraries = []
72 extra_link_args = []
73 gdal_output = [None]*3
74
75 try:
76 import numpy
77 include_dirs.append(numpy.get_include())
78 except ImportError:
79 log.critical("Numpy and its headers are required to run setup(). Exiting.")
80 sys.exit(1)
81
82 try:
83 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
84 for i, flag in enumerate(("--cflags", "--libs", "--datadir")):
85 gdal_output[i] = check_output([gdal_config, flag]).strip()
86
87 for item in gdal_output[0].split():
88 if item.startswith("-I"):
89 include_dirs.extend(item[2:].split(":"))
90 for item in gdal_output[1].split():
91 if item.startswith("-L"):
92 library_dirs.extend(item[2:].split(":"))
93 elif item.startswith("-l"):
94 libraries.append(item[2:])
95 else:
96 # e.g. -framework GDAL
97 extra_link_args.append(item)
98
99 except Exception as e:
100 if os.name == "nt":
101 log.info(("Building on Windows requires extra options to setup.py to locate needed GDAL files.\n"
102 "More information is available in the README."))
103 else:
104 log.warning("Failed to get options via gdal-config: %s", str(e))
105
106
107 # Conditionally copy the GDAL data. To be used in conjunction with
108 # the bdist_wheel command to make self-contained binary wheels.
109 if os.environ.get('PACKAGE_DATA'):
110 destdir = 'rasterio/gdal_data'
111 if gdal_output[2]:
112 log.info("Copying gdal data from %s" % gdal_output[2])
113 copy_data_tree(gdal_output[2], destdir)
114 else:
115 # check to see if GDAL_DATA is defined
116 gdal_data = os.environ.get('GDAL_DATA', None)
117 if gdal_data:
118 log.info("Copying gdal_data from %s" % gdal_data)
119 copy_data_tree(gdal_data, destdir)
120
121 # Conditionally copy PROJ.4 data.
122 projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')
123 if os.path.exists(projdatadir):
124 log.info("Copying proj_data from %s" % projdatadir)
125 copy_data_tree(projdatadir, 'rasterio/proj_data')
126
127 ext_options = dict(
128 include_dirs=include_dirs,
129 library_dirs=library_dirs,
130 libraries=libraries,
131 extra_link_args=extra_link_args)
132
133 log.debug('ext_options:\n%s', pprint.pformat(ext_options))
134
135 # When building from a repo, Cython is required.
136 if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
137 log.info("MANIFEST.in found, presume a repo, cythonizing...")
138 if not cythonize:
139 log.critical(
140 "Cython.Build.cythonize not found. "
141 "Cython is required to build from a repo.")
142 sys.exit(1)
143 ext_modules = cythonize([
144 Extension(
145 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),
146 Extension(
147 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),
148 Extension(
149 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),
150 Extension(
151 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),
152 Extension(
153 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),
154 Extension(
155 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),
156 Extension(
157 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),
158 Extension(
159 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
160 Extension(
161 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
162 ], quiet=True)
163
164 # If there's no manifest template, as in an sdist, we just specify .c files.
165 else:
166 ext_modules = [
167 Extension(
168 'rasterio._base', ['rasterio/_base.c'], **ext_options),
169 Extension(
170 'rasterio._io', ['rasterio/_io.c'], **ext_options),
171 Extension(
172 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),
173 Extension(
174 'rasterio._features', ['rasterio/_features.c'], **ext_options),
175 Extension(
176 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),
177 Extension(
178 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),
179 Extension(
180 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),
181 Extension(
182 'rasterio._err', ['rasterio/_err.c'], **ext_options),
183 Extension(
184 'rasterio._example', ['rasterio/_example.c'], **ext_options),
185 ]
186
187 with open('README.rst') as f:
188 readme = f.read()
189
190 # Runtime requirements.
191 inst_reqs = [
192 'affine>=1.0',
193 'cligj',
194 'Numpy>=1.7',
195 'snuggs>=1.3.1']
196
197 if sys.version_info < (3, 4):
198 inst_reqs.append('enum34')
199
200 setup_args = dict(
201 name='rasterio',
202 version=version,
203 description="Fast and direct raster I/O for use with Numpy and SciPy",
204 long_description=readme,
205 classifiers=[
206 'Development Status :: 4 - Beta',
207 'Intended Audience :: Developers',
208 'Intended Audience :: Information Technology',
209 'Intended Audience :: Science/Research',
210 'License :: OSI Approved :: BSD License',
211 'Programming Language :: C',
212 'Programming Language :: Python :: 2.6',
213 'Programming Language :: Python :: 2.7',
214 'Programming Language :: Python :: 3.3',
215 'Programming Language :: Python :: 3.4',
216 'Topic :: Multimedia :: Graphics :: Graphics Conversion',
217 'Topic :: Scientific/Engineering :: GIS'],
218 keywords='raster gdal',
219 author='Sean Gillies',
220 author_email='[email protected]',
221 url='https://github.com/mapbox/rasterio',
222 license='BSD',
223 package_dir={'': '.'},
224 packages=['rasterio', 'rasterio.rio'],
225 entry_points='''
226 [console_scripts]
227 rio=rasterio.rio.main:cli
228
229 [rasterio.rio_commands]
230 bounds=rasterio.rio.rio:bounds
231 calc=rasterio.rio.calc:calc
232 edit-info=rasterio.rio.info:edit
233 env=rasterio.rio.info:env
234 info=rasterio.rio.info:info
235 insp=rasterio.rio.rio:insp
236 mask=rasterio.rio.features:mask
237 merge=rasterio.rio.merge:merge
238 rasterize=rasterio.rio.features:rasterize
239 sample=rasterio.rio.sample:sample
240 shapes=rasterio.rio.features:shapes
241 stack=rasterio.rio.bands:stack
242 transform=rasterio.rio.rio:transform
243 ''',
244 include_package_data=True,
245 ext_modules=ext_modules,
246 zip_safe=False,
247 install_requires=inst_reqs)
248
249 if os.environ.get('PACKAGE_DATA'):
250 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}
251
252 setup(**setup_args)
253
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -125,6 +125,7 @@
copy_data_tree(projdatadir, 'rasterio/proj_data')
ext_options = dict(
+ extra_compile_args=['-Wno-unused-parameter', '-Wno-unused-function'],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -125,6 +125,7 @@\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n \n ext_options = dict(\n+ extra_compile_args=['-Wno-unused-parameter', '-Wno-unused-function'],\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n", "issue": "Migrate to Cython 0.22\nThe bandaid applied in #276 is falling off. I've tagged this for 1.0 but we could do it sooner.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:cli\n \n [rasterio.rio_commands]\n bounds=rasterio.rio.rio:bounds\n calc=rasterio.rio.calc:calc\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.rio:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n transform=rasterio.rio.rio:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs)\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}]} | 3,379 | 93 |
gh_patches_debug_22951 | rasdani/github-patches | git_diff | ydataai__ydata-profiling-829 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Phi K correlation variable order
For me all correlation plots show variables in the (domain-specific sensible) order of the columns in my data frame.
Only Phi K shows them in some other order.
Is this a bug or a feature?
Is there a setting to get the "good" order?
This is with pandas 1.3 and pandas-profiling 3.0.0
<img width="879" alt="Screenshot 2021-09-05 at 21 43 55" src="https://user-images.githubusercontent.com/852409/132139566-ba92033b-98fb-4b3d-a869-6c096ed294a1.png">
<img width="907" alt="Screenshot 2021-09-05 at 21 43 45" src="https://user-images.githubusercontent.com/852409/132139567-22e2d9ce-cdc8-4b95-93b2-7445a78ed397.png">
</issue>
<code>
[start of src/pandas_profiling/model/pandas/correlations_pandas.py]
1 """Correlations between variables."""
2 import itertools
3 import warnings
4 from typing import Optional
5
6 import numpy as np
7 import pandas as pd
8 from scipy import stats
9
10 from pandas_profiling.config import Settings
11 from pandas_profiling.model.correlations import (
12 Cramers,
13 Kendall,
14 Pearson,
15 PhiK,
16 Spearman,
17 )
18
19
20 @Spearman.compute.register(Settings, pd.DataFrame, dict)
21 def pandas_spearman_compute(
22 config: Settings, df: pd.DataFrame, summary: dict
23 ) -> Optional[pd.DataFrame]:
24 return df.corr(method="spearman")
25
26
27 @Pearson.compute.register(Settings, pd.DataFrame, dict)
28 def pandas_pearson_compute(
29 config: Settings, df: pd.DataFrame, summary: dict
30 ) -> Optional[pd.DataFrame]:
31 return df.corr(method="pearson")
32
33
34 @Kendall.compute.register(Settings, pd.DataFrame, dict)
35 def pandas_kendall_compute(
36 config: Settings, df: pd.DataFrame, summary: dict
37 ) -> Optional[pd.DataFrame]:
38 return df.corr(method="kendall")
39
40
41 def _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:
42 """Calculate the Cramer's V corrected stat for two variables.
43
44 Args:
45 confusion_matrix: Crosstab between two variables.
46 correction: Should the correction be applied?
47
48 Returns:
49 The Cramer's V corrected stat for the two variables.
50 """
51 chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]
52 n = confusion_matrix.sum().sum()
53 phi2 = chi2 / n
54 r = confusion_matrix.shape[0]
55 k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1
56
57 # Deal with NaNs later on
58 with np.errstate(divide="ignore", invalid="ignore"):
59 phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))
60 rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)
61 kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)
62 rkcorr = min((kcorr - 1.0), (rcorr - 1.0))
63 if rkcorr == 0.0:
64 corr = 1.0
65 else:
66 corr = np.sqrt(phi2corr / rkcorr)
67 return corr
68
69
70 @Cramers.compute.register(Settings, pd.DataFrame, dict)
71 def pandas_cramers_compute(
72 config: Settings, df: pd.DataFrame, summary: dict
73 ) -> Optional[pd.DataFrame]:
74 threshold = config.categorical_maximum_correlation_distinct
75
76 categoricals = {
77 key
78 for key, value in summary.items()
79 if value["type"] in {"Categorical", "Boolean"}
80 and value["n_distinct"] <= threshold
81 }
82
83 if len(categoricals) <= 1:
84 return None
85
86 matrix = np.zeros((len(categoricals), len(categoricals)))
87 np.fill_diagonal(matrix, 1.0)
88 correlation_matrix = pd.DataFrame(
89 matrix,
90 index=categoricals,
91 columns=categoricals,
92 )
93
94 for name1, name2 in itertools.combinations(categoricals, 2):
95 confusion_matrix = pd.crosstab(df[name1], df[name2])
96 correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(
97 confusion_matrix, correction=True
98 )
99 correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]
100 return correlation_matrix
101
102
103 @PhiK.compute.register(Settings, pd.DataFrame, dict)
104 def pandas_phik_compute(
105 config: Settings, df: pd.DataFrame, summary: dict
106 ) -> Optional[pd.DataFrame]:
107 intcols = {
108 key
109 for key, value in summary.items()
110 # DateTime currently excluded
111 # In some use cases, it makes sense to convert it to interval
112 # See https://github.com/KaveIO/PhiK/issues/7
113 if value["type"] == "Numeric" and 1 < value["n_distinct"]
114 }
115
116 selcols = {
117 key
118 for key, value in summary.items()
119 if value["type"] != "Unsupported"
120 and 1 < value["n_distinct"] <= config.categorical_maximum_correlation_distinct
121 }
122 selcols = selcols.union(intcols)
123
124 if len(selcols) <= 1:
125 return None
126
127 with warnings.catch_warnings():
128 warnings.simplefilter("ignore")
129 from phik import phik_matrix
130
131 correlation = phik_matrix(df[selcols], interval_cols=list(intcols))
132
133 return correlation
134
[end of src/pandas_profiling/model/pandas/correlations_pandas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pandas_profiling/model/pandas/correlations_pandas.py b/src/pandas_profiling/model/pandas/correlations_pandas.py
--- a/src/pandas_profiling/model/pandas/correlations_pandas.py
+++ b/src/pandas_profiling/model/pandas/correlations_pandas.py
@@ -104,6 +104,8 @@
def pandas_phik_compute(
config: Settings, df: pd.DataFrame, summary: dict
) -> Optional[pd.DataFrame]:
+ df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}
+
intcols = {
key
for key, value in summary.items()
@@ -120,14 +122,15 @@
and 1 < value["n_distinct"] <= config.categorical_maximum_correlation_distinct
}
selcols = selcols.union(intcols)
+ selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])
- if len(selcols) <= 1:
+ if len(selected_cols) <= 1:
return None
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from phik import phik_matrix
- correlation = phik_matrix(df[selcols], interval_cols=list(intcols))
+ correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))
return correlation
| {"golden_diff": "diff --git a/src/pandas_profiling/model/pandas/correlations_pandas.py b/src/pandas_profiling/model/pandas/correlations_pandas.py\n--- a/src/pandas_profiling/model/pandas/correlations_pandas.py\n+++ b/src/pandas_profiling/model/pandas/correlations_pandas.py\n@@ -104,6 +104,8 @@\n def pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n ) -> Optional[pd.DataFrame]:\n+ df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}\n+\n intcols = {\n key\n for key, value in summary.items()\n@@ -120,14 +122,15 @@\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n+ selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])\n \n- if len(selcols) <= 1:\n+ if len(selected_cols) <= 1:\n return None\n \n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n \n- correlation = phik_matrix(df[selcols], interval_cols=list(intcols))\n+ correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))\n \n return correlation\n", "issue": "Phi K correlation variable order\nFor me all correlation plots show variables in the (domain-specific sensible) order of the columns in my data frame.\r\n\r\nOnly Phi K shows them in some other order.\r\n\r\nIs this a bug or a feature?\r\n\r\nIs there a setting to get the \"good\" order?\r\n\r\nThis is with pandas 1.3 and pandas-profiling 3.0.0\r\n\r\n<img width=\"879\" alt=\"Screenshot 2021-09-05 at 21 43 55\" src=\"https://user-images.githubusercontent.com/852409/132139566-ba92033b-98fb-4b3d-a869-6c096ed294a1.png\">\r\n<img width=\"907\" alt=\"Screenshot 2021-09-05 at 21 43 45\" src=\"https://user-images.githubusercontent.com/852409/132139567-22e2d9ce-cdc8-4b95-93b2-7445a78ed397.png\">\r\n\n", "before_files": [{"content": "\"\"\"Correlations between variables.\"\"\"\nimport itertools\nimport warnings\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import (\n Cramers,\n Kendall,\n Pearson,\n PhiK,\n Spearman,\n)\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_spearman_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"spearman\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_pearson_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"pearson\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_kendall_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"kendall\")\n\n\ndef _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:\n \"\"\"Calculate the Cramer's V corrected stat for two variables.\n\n Args:\n confusion_matrix: Crosstab between two variables.\n correction: Should the correction be applied?\n\n Returns:\n The Cramer's V corrected stat for the two variables.\n \"\"\"\n chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]\n n = confusion_matrix.sum().sum()\n phi2 = chi2 / n\n r = confusion_matrix.shape[0]\n k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1\n\n # Deal with NaNs later on\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))\n rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)\n kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)\n rkcorr = min((kcorr - 1.0), (rcorr - 1.0))\n if rkcorr == 0.0:\n corr = 1.0\n else:\n corr = np.sqrt(phi2corr / rkcorr)\n return corr\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_cramers_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n threshold = config.categorical_maximum_correlation_distinct\n\n categoricals = {\n key\n for key, value in summary.items()\n if value[\"type\"] in {\"Categorical\", \"Boolean\"}\n and value[\"n_distinct\"] <= threshold\n }\n\n if len(categoricals) <= 1:\n return None\n\n matrix = np.zeros((len(categoricals), len(categoricals)))\n np.fill_diagonal(matrix, 1.0)\n correlation_matrix = pd.DataFrame(\n matrix,\n index=categoricals,\n columns=categoricals,\n )\n\n for name1, name2 in itertools.combinations(categoricals, 2):\n confusion_matrix = pd.crosstab(df[name1], df[name2])\n correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n confusion_matrix, correction=True\n )\n correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]\n return correlation_matrix\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n intcols = {\n key\n for key, value in summary.items()\n # DateTime currently excluded\n # In some use cases, it makes sense to convert it to interval\n # See https://github.com/KaveIO/PhiK/issues/7\n if value[\"type\"] == \"Numeric\" and 1 < value[\"n_distinct\"]\n }\n\n selcols = {\n key\n for key, value in summary.items()\n if value[\"type\"] != \"Unsupported\"\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n\n if len(selcols) <= 1:\n return None\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n\n correlation = phik_matrix(df[selcols], interval_cols=list(intcols))\n\n return correlation\n", "path": "src/pandas_profiling/model/pandas/correlations_pandas.py"}]} | 2,176 | 305 |
gh_patches_debug_439 | rasdani/github-patches | git_diff | localstack__localstack-1075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update code climate and badge
https://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project.
</issue>
<code>
[start of localstack/constants.py]
1 import os
2 import localstack_client.config
3
4 # LocalStack version
5 VERSION = '0.8.9'
6
7 # default AWS region
8 if 'DEFAULT_REGION' not in os.environ:
9 os.environ['DEFAULT_REGION'] = 'us-east-1'
10 DEFAULT_REGION = os.environ['DEFAULT_REGION']
11
12 # constant to represent the "local" region, i.e., local machine
13 REGION_LOCAL = 'local'
14
15 # dev environment
16 ENV_DEV = 'dev'
17
18 # backend service ports, for services that are behind a proxy (counting down from 4566)
19 DEFAULT_PORT_APIGATEWAY_BACKEND = 4566
20 DEFAULT_PORT_KINESIS_BACKEND = 4565
21 DEFAULT_PORT_DYNAMODB_BACKEND = 4564
22 DEFAULT_PORT_S3_BACKEND = 4563
23 DEFAULT_PORT_SNS_BACKEND = 4562
24 DEFAULT_PORT_SQS_BACKEND = 4561
25 DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560
26 DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559
27
28 DEFAULT_PORT_WEB_UI = 8080
29
30 LOCALHOST = 'localhost'
31
32 # version of the Maven dependency with Java utility code
33 LOCALSTACK_MAVEN_VERSION = '0.1.15'
34
35 # map of default service APIs and ports to be spun up (fetch map from localstack_client)
36 DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
37
38 # host to bind to when starting the services
39 BIND_HOST = '0.0.0.0'
40
41 # AWS user account ID used for tests
42 TEST_AWS_ACCOUNT_ID = '000000000000'
43 os.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID
44
45 # root code folder
46 LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
47
48 # virtualenv folder
49 LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
50 if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
51 # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
52 LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
53
54 # API Gateway path to indicate a user request sent to the gateway
55 PATH_USER_REQUEST = '_user_request_'
56
57 # name of LocalStack Docker image
58 DOCKER_IMAGE_NAME = 'localstack/localstack'
59
60 # environment variable name to tag local test runs
61 ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
62
63 # content types
64 APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
65 APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
66 APPLICATION_JSON = 'application/json'
67
68 # Lambda defaults
69 LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
70
71 # installation constants
72 ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'
73 DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
74 ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'
75 STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
76
77 # API endpoint for analytics events
78 API_ENDPOINT = 'https://api.localstack.cloud/v1'
79
[end of localstack/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/constants.py b/localstack/constants.py
--- a/localstack/constants.py
+++ b/localstack/constants.py
@@ -2,7 +2,7 @@
import localstack_client.config
# LocalStack version
-VERSION = '0.8.9'
+VERSION = '0.8.10'
# default AWS region
if 'DEFAULT_REGION' not in os.environ:
| {"golden_diff": "diff --git a/localstack/constants.py b/localstack/constants.py\n--- a/localstack/constants.py\n+++ b/localstack/constants.py\n@@ -2,7 +2,7 @@\n import localstack_client.config\n \n # LocalStack version\n-VERSION = '0.8.9'\n+VERSION = '0.8.10'\n \n # default AWS region\n if 'DEFAULT_REGION' not in os.environ:\n", "issue": "Update code climate and badge\nhttps://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project.\n", "before_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.9'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}]} | 1,526 | 86 |
gh_patches_debug_11599 | rasdani/github-patches | git_diff | DataDog__integrations-core-268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[couch] error "local variable 'db_stats' referenced before assignment"
I just started using datadog and have an issue getting the couch integration to run (on MacOS Sierra).
`/usr/local/bin/datadog-agent info` reports this:
````
Checks
======
ntp
---
- Collected 0 metrics, 0 events & 1 service check
disk
----
- instance #0 [OK]
- Collected 44 metrics, 0 events & 1 service check
network
-------
- instance #0 [OK]
- Collected 27 metrics, 0 events & 1 service check
couch
-----
- instance #0 [ERROR]: "local variable 'db_stats' referenced before assignment"
- Collected 0 metrics, 0 events & 2 service checks
Emitters
========
- http_emitter [OK]
===================
Dogstatsd (v 5.8.0)
===================
Status date: 2017-02-22 17:11:34 (8s ago)
Pid: 85989
Platform: Darwin-16.4.0-x86_64-i386-64bit
Python Version: 2.7.11, 64bit
````
To me, `local variable 'db_stats' referenced before assignment` looks like an error in the couchdb integration library.
</issue>
<code>
[start of couch/check.py]
1 # (C) Datadog, Inc. 2010-2016
2 # All rights reserved
3 # Licensed under Simplified BSD License (see LICENSE)
4
5 # stdlib
6 from urlparse import urljoin
7
8 # 3rd party
9 import requests
10
11 # project
12 from checks import AgentCheck
13 from util import headers
14
15
16 class CouchDb(AgentCheck):
17 """Extracts stats from CouchDB via its REST API
18 http://wiki.apache.org/couchdb/Runtime_Statistics
19 """
20
21 MAX_DB = 50
22 SERVICE_CHECK_NAME = 'couchdb.can_connect'
23 SOURCE_TYPE_NAME = 'couchdb'
24 TIMEOUT = 5
25
26 def __init__(self, name, init_config, agentConfig, instances=None):
27 AgentCheck.__init__(self, name, init_config, agentConfig, instances)
28 self.db_blacklist = {}
29
30 def _create_metric(self, data, tags=None):
31 overall_stats = data.get('stats', {})
32 for key, stats in overall_stats.items():
33 for metric, val in stats.items():
34 if val['current'] is not None:
35 metric_name = '.'.join(['couchdb', key, metric])
36 self.gauge(metric_name, val['current'], tags=tags)
37
38 for db_name, db_stats in data.get('databases', {}).items():
39 for name, val in db_stats.items():
40 if name in ['doc_count', 'disk_size'] and val is not None:
41 metric_name = '.'.join(['couchdb', 'by_db', name])
42 metric_tags = list(tags)
43 metric_tags.append('db:%s' % db_name)
44 self.gauge(metric_name, val, tags=metric_tags, device_name=db_name)
45
46 def _get_stats(self, url, instance):
47 "Hit a given URL and return the parsed json"
48 self.log.debug('Fetching Couchdb stats at url: %s' % url)
49
50 auth = None
51 if 'user' in instance and 'password' in instance:
52 auth = (instance['user'], instance['password'])
53 # Override Accept request header so that failures are not redirected to the Futon web-ui
54 request_headers = headers(self.agentConfig)
55 request_headers['Accept'] = 'text/json'
56 r = requests.get(url, auth=auth, headers=request_headers,
57 timeout=int(instance.get('timeout', self.TIMEOUT)))
58 r.raise_for_status()
59 return r.json()
60
61 def check(self, instance):
62 server = instance.get('server', None)
63 if server is None:
64 raise Exception("A server must be specified")
65 data = self.get_data(server, instance)
66 self._create_metric(data, tags=['instance:%s' % server])
67
68 def get_data(self, server, instance):
69 # The dictionary to be returned.
70 couchdb = {'stats': None, 'databases': {}}
71
72 # First, get overall statistics.
73 endpoint = '/_stats/'
74
75 url = urljoin(server, endpoint)
76
77 # Fetch initial stats and capture a service check based on response.
78 service_check_tags = ['instance:%s' % server]
79 try:
80 overall_stats = self._get_stats(url, instance)
81 except requests.exceptions.Timeout as e:
82 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
83 tags=service_check_tags, message="Request timeout: {0}, {1}".format(url, e))
84 raise
85 except requests.exceptions.HTTPError as e:
86 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
87 tags=service_check_tags, message=str(e.message))
88 raise
89 except Exception as e:
90 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
91 tags=service_check_tags, message=str(e))
92 raise
93 else:
94 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
95 tags=service_check_tags,
96 message='Connection to %s was successful' % url)
97
98 # No overall stats? bail out now
99 if overall_stats is None:
100 raise Exception("No stats could be retrieved from %s" % url)
101
102 couchdb['stats'] = overall_stats
103
104 # Next, get all database names.
105 endpoint = '/_all_dbs/'
106
107 url = urljoin(server, endpoint)
108
109 # Get the list of whitelisted databases.
110 db_whitelist = instance.get('db_whitelist')
111 self.db_blacklist.setdefault(server,[])
112 self.db_blacklist[server].extend(instance.get('db_blacklist',[]))
113 whitelist = set(db_whitelist) if db_whitelist else None
114 databases = set(self._get_stats(url, instance)) - set(self.db_blacklist[server])
115 databases = databases.intersection(whitelist) if whitelist else databases
116
117 if len(databases) > self.MAX_DB:
118 self.warning('Too many databases, only the first %s will be checked.' % self.MAX_DB)
119 databases = list(databases)[:self.MAX_DB]
120
121 for dbName in databases:
122 url = urljoin(server, dbName)
123 try:
124 db_stats = self._get_stats(url, instance)
125 except requests.exceptions.HTTPError as e:
126 couchdb['databases'][dbName] = None
127 if (e.response.status_code == 403) or (e.response.status_code == 401):
128 self.db_blacklist[server].append(dbName)
129 self.warning('Database %s is not readable by the configured user. It will be added to the blacklist. Please restart the agent to clear.' % dbName)
130 del couchdb['databases'][dbName]
131 continue
132 if db_stats is not None:
133 couchdb['databases'][dbName] = db_stats
134 return couchdb
135
[end of couch/check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/couch/check.py b/couch/check.py
--- a/couch/check.py
+++ b/couch/check.py
@@ -4,6 +4,7 @@
# stdlib
from urlparse import urljoin
+from urllib import quote
# 3rd party
import requests
@@ -119,7 +120,7 @@
databases = list(databases)[:self.MAX_DB]
for dbName in databases:
- url = urljoin(server, dbName)
+ url = urljoin(server, quote(dbName, safe = ''))
try:
db_stats = self._get_stats(url, instance)
except requests.exceptions.HTTPError as e:
| {"golden_diff": "diff --git a/couch/check.py b/couch/check.py\n--- a/couch/check.py\n+++ b/couch/check.py\n@@ -4,6 +4,7 @@\n \n # stdlib\n from urlparse import urljoin\n+from urllib import quote\n \n # 3rd party\n import requests\n@@ -119,7 +120,7 @@\n databases = list(databases)[:self.MAX_DB]\n \n for dbName in databases:\n- url = urljoin(server, dbName)\n+ url = urljoin(server, quote(dbName, safe = ''))\n try:\n db_stats = self._get_stats(url, instance)\n except requests.exceptions.HTTPError as e:\n", "issue": "[couch] error \"local variable 'db_stats' referenced before assignment\"\nI just started using datadog and have an issue getting the couch integration to run (on MacOS Sierra). \r\n\r\n `/usr/local/bin/datadog-agent info` reports this:\r\n\r\n````\r\n Checks\r\n ======\r\n\r\n ntp\r\n ---\r\n - Collected 0 metrics, 0 events & 1 service check\r\n\r\n disk\r\n ----\r\n - instance #0 [OK]\r\n - Collected 44 metrics, 0 events & 1 service check\r\n\r\n network\r\n -------\r\n - instance #0 [OK]\r\n - Collected 27 metrics, 0 events & 1 service check\r\n\r\n couch\r\n -----\r\n - instance #0 [ERROR]: \"local variable 'db_stats' referenced before assignment\"\r\n - Collected 0 metrics, 0 events & 2 service checks\r\n\r\n\r\n Emitters\r\n ========\r\n\r\n - http_emitter [OK]\r\n\r\n===================\r\nDogstatsd (v 5.8.0)\r\n===================\r\n\r\n Status date: 2017-02-22 17:11:34 (8s ago)\r\n Pid: 85989\r\n Platform: Darwin-16.4.0-x86_64-i386-64bit\r\n Python Version: 2.7.11, 64bit\r\n````\r\n\r\nTo me, `local variable 'db_stats' referenced before assignment` looks like an error in the couchdb integration library.\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom urlparse import urljoin\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\nfrom util import headers\n\n\nclass CouchDb(AgentCheck):\n \"\"\"Extracts stats from CouchDB via its REST API\n http://wiki.apache.org/couchdb/Runtime_Statistics\n \"\"\"\n\n MAX_DB = 50\n SERVICE_CHECK_NAME = 'couchdb.can_connect'\n SOURCE_TYPE_NAME = 'couchdb'\n TIMEOUT = 5\n\n def __init__(self, name, init_config, agentConfig, instances=None):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n self.db_blacklist = {}\n\n def _create_metric(self, data, tags=None):\n overall_stats = data.get('stats', {})\n for key, stats in overall_stats.items():\n for metric, val in stats.items():\n if val['current'] is not None:\n metric_name = '.'.join(['couchdb', key, metric])\n self.gauge(metric_name, val['current'], tags=tags)\n\n for db_name, db_stats in data.get('databases', {}).items():\n for name, val in db_stats.items():\n if name in ['doc_count', 'disk_size'] and val is not None:\n metric_name = '.'.join(['couchdb', 'by_db', name])\n metric_tags = list(tags)\n metric_tags.append('db:%s' % db_name)\n self.gauge(metric_name, val, tags=metric_tags, device_name=db_name)\n\n def _get_stats(self, url, instance):\n \"Hit a given URL and return the parsed json\"\n self.log.debug('Fetching Couchdb stats at url: %s' % url)\n\n auth = None\n if 'user' in instance and 'password' in instance:\n auth = (instance['user'], instance['password'])\n # Override Accept request header so that failures are not redirected to the Futon web-ui\n request_headers = headers(self.agentConfig)\n request_headers['Accept'] = 'text/json'\n r = requests.get(url, auth=auth, headers=request_headers,\n timeout=int(instance.get('timeout', self.TIMEOUT)))\n r.raise_for_status()\n return r.json()\n\n def check(self, instance):\n server = instance.get('server', None)\n if server is None:\n raise Exception(\"A server must be specified\")\n data = self.get_data(server, instance)\n self._create_metric(data, tags=['instance:%s' % server])\n\n def get_data(self, server, instance):\n # The dictionary to be returned.\n couchdb = {'stats': None, 'databases': {}}\n\n # First, get overall statistics.\n endpoint = '/_stats/'\n\n url = urljoin(server, endpoint)\n\n # Fetch initial stats and capture a service check based on response.\n service_check_tags = ['instance:%s' % server]\n try:\n overall_stats = self._get_stats(url, instance)\n except requests.exceptions.Timeout as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=\"Request timeout: {0}, {1}\".format(url, e))\n raise\n except requests.exceptions.HTTPError as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=str(e.message))\n raise\n except Exception as e:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n tags=service_check_tags, message=str(e))\n raise\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags=service_check_tags,\n message='Connection to %s was successful' % url)\n\n # No overall stats? bail out now\n if overall_stats is None:\n raise Exception(\"No stats could be retrieved from %s\" % url)\n\n couchdb['stats'] = overall_stats\n\n # Next, get all database names.\n endpoint = '/_all_dbs/'\n\n url = urljoin(server, endpoint)\n\n # Get the list of whitelisted databases.\n db_whitelist = instance.get('db_whitelist')\n self.db_blacklist.setdefault(server,[])\n self.db_blacklist[server].extend(instance.get('db_blacklist',[]))\n whitelist = set(db_whitelist) if db_whitelist else None\n databases = set(self._get_stats(url, instance)) - set(self.db_blacklist[server])\n databases = databases.intersection(whitelist) if whitelist else databases\n\n if len(databases) > self.MAX_DB:\n self.warning('Too many databases, only the first %s will be checked.' % self.MAX_DB)\n databases = list(databases)[:self.MAX_DB]\n\n for dbName in databases:\n url = urljoin(server, dbName)\n try:\n db_stats = self._get_stats(url, instance)\n except requests.exceptions.HTTPError as e:\n couchdb['databases'][dbName] = None\n if (e.response.status_code == 403) or (e.response.status_code == 401):\n self.db_blacklist[server].append(dbName)\n self.warning('Database %s is not readable by the configured user. It will be added to the blacklist. Please restart the agent to clear.' % dbName)\n del couchdb['databases'][dbName]\n continue\n if db_stats is not None:\n couchdb['databases'][dbName] = db_stats\n return couchdb\n", "path": "couch/check.py"}]} | 2,395 | 146 |
gh_patches_debug_1333 | rasdani/github-patches | git_diff | pypa__virtualenv-2107 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
site.getsitepackages() doesn't respect --system-site-packages on python2
**Issue**
site.getsitepackages() doesn't respect --system-site-packages being set on python2. System site-package paths are never included.
I came across this while working on #2105. In contrast to #2105 this is not specific to debian, which is why I am creating a separate issue for it.
```
python -m virtualenv --system-site-packages -ppython2 testenv
. testenv/bin/activate
python -c "import site; print(site.getsitepackages())"
['/home/adrian/Documents/dev/uni/bachelor/mypy/testenv/lib/python2.7/site-packages', '/home/adrian/Documents/dev/uni/bachelor/mypy/testenv/lib/site-python']
```
**Environment**
Provide at least:
- OS: Tested on both Ubuntu 18.04.3 LTS and Arch Linux
- ``pip list`` of the host python where ``virtualenv`` is installed:
Same as #2105
**Output of the virtual environment creation**
Make sure to run the creation with `-vvv --with-traceback`:
Same as #2105
</issue>
<code>
[start of src/virtualenv/create/via_global_ref/builtin/python2/site.py]
1 # -*- coding: utf-8 -*-
2 """
3 A simple shim module to fix up things on Python 2 only.
4
5 Note: until we setup correctly the paths we can only import built-ins.
6 """
7 import sys
8
9
10 def main():
11 """Patch what needed, and invoke the original site.py"""
12 config = read_pyvenv()
13 sys.real_prefix = sys.base_prefix = config["base-prefix"]
14 sys.base_exec_prefix = config["base-exec-prefix"]
15 sys.base_executable = config["base-executable"]
16 global_site_package_enabled = config.get("include-system-site-packages", False) == "true"
17 rewrite_standard_library_sys_path()
18 disable_user_site_package()
19 load_host_site()
20 if global_site_package_enabled:
21 add_global_site_package()
22
23
24 def load_host_site():
25 """trigger reload of site.py - now it will use the standard library instance that will take care of init"""
26 # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies
27 # because this is what pip will be using; the host site.py though may contain it's own pattern for where the
28 # platform and pure library paths should exist
29
30 # notably on Ubuntu there's a patch for getsitepackages to point to
31 # - prefix + local/lib/pythonx.y/dist-packages
32 # - prefix + lib/pythonx.y/dist-packages
33 # while distutils.install.cmd still points both of these to
34 # - prefix + lib/python2.7/site-packages
35
36 # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,
37 # as this will ensure that initialization code within host site.py runs
38
39 here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here
40
41 # ___RELOAD_CODE___
42
43 # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add
44 # them by invoking add_site_dir to trigger the processing of pth files
45 import os
46
47 site_packages = r"""
48 ___EXPECTED_SITE_PACKAGES___
49 """
50 import json
51
52 add_site_dir = sys.modules["site"].addsitedir
53 for path in json.loads(site_packages):
54 full_path = os.path.abspath(os.path.join(here, path.encode("utf-8")))
55 add_site_dir(full_path)
56
57
58 sep = "\\" if sys.platform == "win32" else "/" # no os module here yet - poor mans version
59
60
61 def read_pyvenv():
62 """read pyvenv.cfg"""
63 config_file = "{}{}pyvenv.cfg".format(sys.prefix, sep)
64 with open(config_file) as file_handler:
65 lines = file_handler.readlines()
66 config = {}
67 for line in lines:
68 try:
69 split_at = line.index("=")
70 except ValueError:
71 continue # ignore bad/empty lines
72 else:
73 config[line[:split_at].strip()] = line[split_at + 1 :].strip()
74 return config
75
76
77 def rewrite_standard_library_sys_path():
78 """Once this site file is loaded the standard library paths have already been set, fix them up"""
79 exe, prefix, exec_prefix = get_exe_prefixes(base=False)
80 base_exe, base_prefix, base_exec = get_exe_prefixes(base=True)
81 exe_dir = exe[: exe.rfind(sep)]
82 for at, path in enumerate(sys.path):
83 path = abs_path(path) # replace old sys prefix path starts with new
84 skip_rewrite = path == exe_dir # don't fix the current executable location, notably on Windows this gets added
85 skip_rewrite = skip_rewrite # ___SKIP_REWRITE____
86 if not skip_rewrite:
87 sys.path[at] = map_path(path, base_exe, exe_dir, exec_prefix, base_prefix, prefix, base_exec)
88
89 # the rewrite above may have changed elements from PYTHONPATH, revert these if on
90 if sys.flags.ignore_environment:
91 return
92 import os
93
94 python_paths = []
95 if "PYTHONPATH" in os.environ and os.environ["PYTHONPATH"]:
96 for path in os.environ["PYTHONPATH"].split(os.pathsep):
97 if path not in python_paths:
98 python_paths.append(path)
99 sys.path[: len(python_paths)] = python_paths
100
101
102 def get_exe_prefixes(base=False):
103 return tuple(abs_path(getattr(sys, ("base_" if base else "") + i)) for i in ("executable", "prefix", "exec_prefix"))
104
105
106 def abs_path(value):
107 values, keep = value.split(sep), []
108 at = len(values) - 1
109 while at >= 0:
110 if values[at] == "..":
111 at -= 1
112 else:
113 keep.append(values[at])
114 at -= 1
115 return sep.join(keep[::-1])
116
117
118 def map_path(path, base_executable, exe_dir, exec_prefix, base_prefix, prefix, base_exec_prefix):
119 if path_starts_with(path, exe_dir):
120 # content inside the exe folder needs to remap to original executables folder
121 orig_exe_folder = base_executable[: base_executable.rfind(sep)]
122 return "{}{}".format(orig_exe_folder, path[len(exe_dir) :])
123 elif path_starts_with(path, prefix):
124 return "{}{}".format(base_prefix, path[len(prefix) :])
125 elif path_starts_with(path, exec_prefix):
126 return "{}{}".format(base_exec_prefix, path[len(exec_prefix) :])
127 return path
128
129
130 def path_starts_with(directory, value):
131 return directory.startswith(value if value[-1] == sep else value + sep)
132
133
134 def disable_user_site_package():
135 """Flip the switch on enable user site package"""
136 # sys.flags is a c-extension type, so we cannot monkeypatch it, replace it with a python class to flip it
137 sys.original_flags = sys.flags
138
139 class Flags(object):
140 def __init__(self):
141 self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith("_")}
142
143 sys.flags = Flags()
144 sys.flags.no_user_site = 1
145
146
147 def add_global_site_package():
148 """add the global site package"""
149 import site
150
151 # add user site package
152 sys.flags = sys.original_flags # restore original
153 site.ENABLE_USER_SITE = None # reset user site check
154 # add the global site package to the path - use new prefix and delegate to site.py
155 orig_prefixes = None
156 try:
157 orig_prefixes = site.PREFIXES
158 site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]
159 site.main()
160 finally:
161 site.PREFIXES = orig_prefixes
162
163
164 main()
165
[end of src/virtualenv/create/via_global_ref/builtin/python2/site.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py
--- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py
+++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py
@@ -158,7 +158,7 @@
site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]
site.main()
finally:
- site.PREFIXES = orig_prefixes
+ site.PREFIXES = orig_prefixes + site.PREFIXES
main()
| {"golden_diff": "diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n--- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n+++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n@@ -158,7 +158,7 @@\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n- site.PREFIXES = orig_prefixes\n+ site.PREFIXES = orig_prefixes + site.PREFIXES\n \n \n main()\n", "issue": "site.getsitepackages() doesn't respect --system-site-packages on python2\n**Issue**\r\n\r\nsite.getsitepackages() doesn't respect --system-site-packages being set on python2. System site-package paths are never included. \r\nI came across this while working on #2105. In contrast to #2105 this is not specific to debian, which is why I am creating a separate issue for it.\r\n\r\n```\r\npython -m virtualenv --system-site-packages -ppython2 testenv\r\n. testenv/bin/activate\r\npython -c \"import site; print(site.getsitepackages())\"\r\n['/home/adrian/Documents/dev/uni/bachelor/mypy/testenv/lib/python2.7/site-packages', '/home/adrian/Documents/dev/uni/bachelor/mypy/testenv/lib/site-python']\r\n```\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Tested on both Ubuntu 18.04.3 LTS and Arch Linux\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n\r\nSame as #2105 \r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\nSame as #2105 \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nA simple shim module to fix up things on Python 2 only.\n\nNote: until we setup correctly the paths we can only import built-ins.\n\"\"\"\nimport sys\n\n\ndef main():\n \"\"\"Patch what needed, and invoke the original site.py\"\"\"\n config = read_pyvenv()\n sys.real_prefix = sys.base_prefix = config[\"base-prefix\"]\n sys.base_exec_prefix = config[\"base-exec-prefix\"]\n sys.base_executable = config[\"base-executable\"]\n global_site_package_enabled = config.get(\"include-system-site-packages\", False) == \"true\"\n rewrite_standard_library_sys_path()\n disable_user_site_package()\n load_host_site()\n if global_site_package_enabled:\n add_global_site_package()\n\n\ndef load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies\n # because this is what pip will be using; the host site.py though may contain it's own pattern for where the\n # platform and pure library paths should exist\n\n # notably on Ubuntu there's a patch for getsitepackages to point to\n # - prefix + local/lib/pythonx.y/dist-packages\n # - prefix + lib/pythonx.y/dist-packages\n # while distutils.install.cmd still points both of these to\n # - prefix + lib/python2.7/site-packages\n\n # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,\n # as this will ensure that initialization code within host site.py runs\n\n here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here\n\n # ___RELOAD_CODE___\n\n # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add\n # them by invoking add_site_dir to trigger the processing of pth files\n import os\n\n site_packages = r\"\"\"\n ___EXPECTED_SITE_PACKAGES___\n \"\"\"\n import json\n\n add_site_dir = sys.modules[\"site\"].addsitedir\n for path in json.loads(site_packages):\n full_path = os.path.abspath(os.path.join(here, path.encode(\"utf-8\")))\n add_site_dir(full_path)\n\n\nsep = \"\\\\\" if sys.platform == \"win32\" else \"/\" # no os module here yet - poor mans version\n\n\ndef read_pyvenv():\n \"\"\"read pyvenv.cfg\"\"\"\n config_file = \"{}{}pyvenv.cfg\".format(sys.prefix, sep)\n with open(config_file) as file_handler:\n lines = file_handler.readlines()\n config = {}\n for line in lines:\n try:\n split_at = line.index(\"=\")\n except ValueError:\n continue # ignore bad/empty lines\n else:\n config[line[:split_at].strip()] = line[split_at + 1 :].strip()\n return config\n\n\ndef rewrite_standard_library_sys_path():\n \"\"\"Once this site file is loaded the standard library paths have already been set, fix them up\"\"\"\n exe, prefix, exec_prefix = get_exe_prefixes(base=False)\n base_exe, base_prefix, base_exec = get_exe_prefixes(base=True)\n exe_dir = exe[: exe.rfind(sep)]\n for at, path in enumerate(sys.path):\n path = abs_path(path) # replace old sys prefix path starts with new\n skip_rewrite = path == exe_dir # don't fix the current executable location, notably on Windows this gets added\n skip_rewrite = skip_rewrite # ___SKIP_REWRITE____\n if not skip_rewrite:\n sys.path[at] = map_path(path, base_exe, exe_dir, exec_prefix, base_prefix, prefix, base_exec)\n\n # the rewrite above may have changed elements from PYTHONPATH, revert these if on\n if sys.flags.ignore_environment:\n return\n import os\n\n python_paths = []\n if \"PYTHONPATH\" in os.environ and os.environ[\"PYTHONPATH\"]:\n for path in os.environ[\"PYTHONPATH\"].split(os.pathsep):\n if path not in python_paths:\n python_paths.append(path)\n sys.path[: len(python_paths)] = python_paths\n\n\ndef get_exe_prefixes(base=False):\n return tuple(abs_path(getattr(sys, (\"base_\" if base else \"\") + i)) for i in (\"executable\", \"prefix\", \"exec_prefix\"))\n\n\ndef abs_path(value):\n values, keep = value.split(sep), []\n at = len(values) - 1\n while at >= 0:\n if values[at] == \"..\":\n at -= 1\n else:\n keep.append(values[at])\n at -= 1\n return sep.join(keep[::-1])\n\n\ndef map_path(path, base_executable, exe_dir, exec_prefix, base_prefix, prefix, base_exec_prefix):\n if path_starts_with(path, exe_dir):\n # content inside the exe folder needs to remap to original executables folder\n orig_exe_folder = base_executable[: base_executable.rfind(sep)]\n return \"{}{}\".format(orig_exe_folder, path[len(exe_dir) :])\n elif path_starts_with(path, prefix):\n return \"{}{}\".format(base_prefix, path[len(prefix) :])\n elif path_starts_with(path, exec_prefix):\n return \"{}{}\".format(base_exec_prefix, path[len(exec_prefix) :])\n return path\n\n\ndef path_starts_with(directory, value):\n return directory.startswith(value if value[-1] == sep else value + sep)\n\n\ndef disable_user_site_package():\n \"\"\"Flip the switch on enable user site package\"\"\"\n # sys.flags is a c-extension type, so we cannot monkeypatch it, replace it with a python class to flip it\n sys.original_flags = sys.flags\n\n class Flags(object):\n def __init__(self):\n self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith(\"_\")}\n\n sys.flags = Flags()\n sys.flags.no_user_site = 1\n\n\ndef add_global_site_package():\n \"\"\"add the global site package\"\"\"\n import site\n\n # add user site package\n sys.flags = sys.original_flags # restore original\n site.ENABLE_USER_SITE = None # reset user site check\n # add the global site package to the path - use new prefix and delegate to site.py\n orig_prefixes = None\n try:\n orig_prefixes = site.PREFIXES\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n site.PREFIXES = orig_prefixes\n\n\nmain()\n", "path": "src/virtualenv/create/via_global_ref/builtin/python2/site.py"}]} | 2,659 | 139 |
gh_patches_debug_8582 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wildcard imports in `monai.losses`
**Is your feature request related to a problem? Please describe.**
`monai.losses` uses wildcard imports to import the losses from the individual files (e.g.
`monai.losses.dice`) This will also import quite a lot of unnecessary things which are imported in those files.
**Describe the solution you'd like**
Use explicit imports like in `monai.metrics` OR add `__all__` to the individual files to specify which functions should be imported by the wildcard import.
**Describe alternatives you've considered**
See above.
**Additional context**
//
Wildcard imports in `monai.losses`
**Is your feature request related to a problem? Please describe.**
`monai.losses` uses wildcard imports to import the losses from the individual files (e.g.
`monai.losses.dice`) This will also import quite a lot of unnecessary things which are imported in those files.
**Describe the solution you'd like**
Use explicit imports like in `monai.metrics` OR add `__all__` to the individual files to specify which functions should be imported by the wildcard import.
**Describe alternatives you've considered**
See above.
**Additional context**
//
</issue>
<code>
[start of monai/losses/dice.py]
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import warnings
13 from typing import Callable
14
15 import torch
16 from torch.nn.modules.loss import _Loss
17
18 from monai.networks.utils import one_hot
19
20
21 class DiceLoss(_Loss):
22 """
23 Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
24 Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
25 Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
26 while the same axis of `target` can be 1 or N (one-hot format). The `smooth` parameter is a value added to the
27 intersection and union components of the inter-over-union calculation to smooth results and prevent divide by 0,
28 this value should be small. The `include_background` class attribute can be set to False for an instance of
29 DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background.
30 If the non-background segmentations are small compared to the total image size they can get overwhelmed by
31 the signal from the background so excluding it in such cases helps convergence.
32 """
33
34 def __init__(
35 self,
36 include_background: bool = True,
37 to_onehot_y: bool = False,
38 do_sigmoid: bool = False,
39 do_softmax: bool = False,
40 squared_pred: bool = False,
41 jaccard: bool = False,
42 reduction: str = "mean",
43 ):
44 """
45 Args:
46 include_background (bool): If False channel index 0 (background category) is excluded from the calculation.
47 to_onehot_y (bool): whether to convert `y` into the one-hot format. Defaults to False.
48 do_sigmoid (bool): If True, apply a sigmoid function to the prediction.
49 do_softmax (bool): If True, apply a softmax function to the prediction.
50 squared_pred (bool): use squared versions of targets and predictions in the denominator or not.
51 jaccard (bool): compute Jaccard Index (soft IoU) instead of dice or not.
52 reduction (`none|mean|sum`): Specifies the reduction to apply to the output:
53 ``'none'``: no reduction will be applied,
54 ``'mean'``: the sum of the output will be divided by the number of elements in the output,
55 ``'sum'``: the output will be summed.
56 Default: ``'mean'``.
57 """
58 super().__init__(reduction=reduction)
59 self.include_background = include_background
60 self.to_onehot_y = to_onehot_y
61 if do_sigmoid and do_softmax:
62 raise ValueError("do_sigmoid=True and do_softmax=True are not compatible.")
63 self.do_sigmoid = do_sigmoid
64 self.do_softmax = do_softmax
65 self.squared_pred = squared_pred
66 self.jaccard = jaccard
67
68 def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):
69 """
70 Args:
71 input (tensor): the shape should be BNH[WD].
72 target (tensor): the shape should be BNH[WD].
73 smooth (float): a small constant to avoid nan.
74 """
75 if self.do_sigmoid:
76 input = torch.sigmoid(input)
77 n_pred_ch = input.shape[1]
78 if n_pred_ch == 1:
79 if self.do_softmax:
80 warnings.warn("single channel prediction, `do_softmax=True` ignored.")
81 if self.to_onehot_y:
82 warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
83 if not self.include_background:
84 warnings.warn("single channel prediction, `include_background=False` ignored.")
85 else:
86 if self.do_softmax:
87 input = torch.softmax(input, 1)
88 if self.to_onehot_y:
89 target = one_hot(target, n_pred_ch)
90 if not self.include_background:
91 # if skipping background, removing first channel
92 target = target[:, 1:]
93 input = input[:, 1:]
94 assert (
95 target.shape == input.shape
96 ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})"
97
98 # reducing only spatial dimensions (not batch nor channels)
99 reduce_axis = list(range(2, len(input.shape)))
100 intersection = torch.sum(target * input, reduce_axis)
101
102 if self.squared_pred:
103 target = torch.pow(target, 2)
104 input = torch.pow(input, 2)
105
106 ground_o = torch.sum(target, reduce_axis)
107 pred_o = torch.sum(input, reduce_axis)
108
109 denominator = ground_o + pred_o
110
111 if self.jaccard:
112 denominator -= intersection
113
114 f = 1.0 - (2.0 * intersection + smooth) / (denominator + smooth)
115 if self.reduction == "sum":
116 return f.sum() # sum over the batch and channel dims
117 if self.reduction == "none":
118 return f # returns [N, n_classes] losses
119 if self.reduction == "mean":
120 return f.mean() # the batch and channel average
121 raise ValueError(f"reduction={self.reduction} is invalid.")
122
123
124 class GeneralizedDiceLoss(_Loss):
125 """
126 Compute the generalised Dice loss defined in:
127
128 Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
129 loss function for highly unbalanced segmentations. DLMIA 2017.
130
131 Adapted from:
132 https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
133 """
134
135 def __init__(
136 self,
137 include_background: bool = True,
138 to_onehot_y: bool = False,
139 do_sigmoid: bool = False,
140 do_softmax: bool = False,
141 w_type: str = "square",
142 reduction: str = "mean",
143 ):
144 """
145 Args:
146 include_background (bool): If False channel index 0 (background category) is excluded from the calculation.
147 to_onehot_y (bool): whether to convert `y` into the one-hot format. Defaults to False.
148 do_sigmoid (bool): If True, apply a sigmoid function to the prediction.
149 do_softmax (bool): If True, apply a softmax function to the prediction.
150 w_type ('square'|'simple'|'uniform'): type of function to transform ground truth volume to a weight factor.
151 Default: `'square'`
152 reduction (`none|mean|sum`): Specifies the reduction to apply to the output:
153 ``'none'``: no reduction will be applied,
154 ``'mean'``: the sum of the output will be divided by the batch size in the output,
155 ``'sum'``: the output will be summed over the batch dim.
156 Default: ``'mean'``.
157 """
158 super().__init__(reduction=reduction)
159 self.include_background = include_background
160 self.to_onehot_y = to_onehot_y
161 if do_sigmoid and do_softmax:
162 raise ValueError("do_sigmoid=True and do_softmax=True are not compatible.")
163 self.do_sigmoid = do_sigmoid
164 self.do_softmax = do_softmax
165 self.w_func: Callable = torch.ones_like
166 if w_type == "simple":
167 self.w_func = torch.reciprocal
168 elif w_type == "square":
169 self.w_func = lambda x: torch.reciprocal(x * x)
170
171 def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):
172 """
173 Args:
174 input (tensor): the shape should be BNH[WD].
175 target (tensor): the shape should be BNH[WD].
176 smooth (float): a small constant to avoid nan.
177 """
178 if self.do_sigmoid:
179 input = torch.sigmoid(input)
180 n_pred_ch = input.shape[1]
181 if n_pred_ch == 1:
182 if self.do_softmax:
183 warnings.warn("single channel prediction, `do_softmax=True` ignored.")
184 if self.to_onehot_y:
185 warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
186 if not self.include_background:
187 warnings.warn("single channel prediction, `include_background=False` ignored.")
188 else:
189 if self.do_softmax:
190 input = torch.softmax(input, 1)
191 if self.to_onehot_y:
192 target = one_hot(target, n_pred_ch)
193 if not self.include_background:
194 # if skipping background, removing first channel
195 target = target[:, 1:]
196 input = input[:, 1:]
197 assert (
198 target.shape == input.shape
199 ), f"ground truth has differing shape ({target.shape}) from input ({input.shape})"
200
201 # reducing only spatial dimensions (not batch nor channels)
202 reduce_axis = list(range(2, len(input.shape)))
203 intersection = torch.sum(target * input, reduce_axis)
204
205 ground_o = torch.sum(target, reduce_axis)
206 pred_o = torch.sum(input, reduce_axis)
207
208 denominator = ground_o + pred_o
209
210 w = self.w_func(ground_o.float())
211 for b in w:
212 infs = torch.isinf(b)
213 b[infs] = 0.0
214 b[infs] = torch.max(b)
215
216 f = 1.0 - (2.0 * (intersection * w).sum(1) + smooth) / ((denominator * w).sum(1) + smooth)
217 if self.reduction == "sum":
218 return f.sum() # sum over the batch dim
219 if self.reduction == "none":
220 return f # returns [N] losses
221 if self.reduction == "mean":
222 return f.mean() # the batch and channel average
223 raise ValueError(f"reduction={self.reduction} is invalid.")
224
225
226 dice = Dice = DiceLoss
227 generalized_dice = generalised_dice = GeneralizedDiceLoss
228
[end of monai/losses/dice.py]
[start of monai/losses/__init__.py]
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from .dice import *
13 from .focal_loss import *
14 from .tversky import *
15
[end of monai/losses/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/losses/__init__.py b/monai/losses/__init__.py
--- a/monai/losses/__init__.py
+++ b/monai/losses/__init__.py
@@ -9,6 +9,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from .dice import *
-from .focal_loss import *
-from .tversky import *
+from .dice import Dice, DiceLoss, GeneralizedDiceLoss, dice, generalized_dice
+from .focal_loss import FocalLoss
+from .tversky import TverskyLoss
diff --git a/monai/losses/dice.py b/monai/losses/dice.py
--- a/monai/losses/dice.py
+++ b/monai/losses/dice.py
@@ -224,4 +224,4 @@
dice = Dice = DiceLoss
-generalized_dice = generalised_dice = GeneralizedDiceLoss
+generalized_dice = GeneralizedDiceLoss
| {"golden_diff": "diff --git a/monai/losses/__init__.py b/monai/losses/__init__.py\n--- a/monai/losses/__init__.py\n+++ b/monai/losses/__init__.py\n@@ -9,6 +9,6 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from .dice import *\n-from .focal_loss import *\n-from .tversky import *\n+from .dice import Dice, DiceLoss, GeneralizedDiceLoss, dice, generalized_dice\n+from .focal_loss import FocalLoss\n+from .tversky import TverskyLoss\ndiff --git a/monai/losses/dice.py b/monai/losses/dice.py\n--- a/monai/losses/dice.py\n+++ b/monai/losses/dice.py\n@@ -224,4 +224,4 @@\n \n \n dice = Dice = DiceLoss\n-generalized_dice = generalised_dice = GeneralizedDiceLoss\n+generalized_dice = GeneralizedDiceLoss\n", "issue": "Wildcard imports in `monai.losses`\n**Is your feature request related to a problem? Please describe.**\r\n`monai.losses` uses wildcard imports to import the losses from the individual files (e.g. \r\n `monai.losses.dice`) This will also import quite a lot of unnecessary things which are imported in those files.\r\n\r\n**Describe the solution you'd like**\r\nUse explicit imports like in `monai.metrics` OR add `__all__` to the individual files to specify which functions should be imported by the wildcard import.\r\n\r\n**Describe alternatives you've considered**\r\nSee above.\r\n\r\n**Additional context**\r\n//\r\n\nWildcard imports in `monai.losses`\n**Is your feature request related to a problem? Please describe.**\r\n`monai.losses` uses wildcard imports to import the losses from the individual files (e.g. \r\n `monai.losses.dice`) This will also import quite a lot of unnecessary things which are imported in those files.\r\n\r\n**Describe the solution you'd like**\r\nUse explicit imports like in `monai.metrics` OR add `__all__` to the individual files to specify which functions should be imported by the wildcard import.\r\n\r\n**Describe alternatives you've considered**\r\nSee above.\r\n\r\n**Additional context**\r\n//\r\n\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import Callable\n\nimport torch\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.networks.utils import one_hot\n\n\nclass DiceLoss(_Loss):\n \"\"\"\n Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.\n Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).\n Axis N of `input` is expected to have logit predictions for each class rather than being image channels,\n while the same axis of `target` can be 1 or N (one-hot format). The `smooth` parameter is a value added to the\n intersection and union components of the inter-over-union calculation to smooth results and prevent divide by 0,\n this value should be small. The `include_background` class attribute can be set to False for an instance of\n DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background.\n If the non-background segmentations are small compared to the total image size they can get overwhelmed by\n the signal from the background so excluding it in such cases helps convergence.\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n do_sigmoid: bool = False,\n do_softmax: bool = False,\n squared_pred: bool = False,\n jaccard: bool = False,\n reduction: str = \"mean\",\n ):\n \"\"\"\n Args:\n include_background (bool): If False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y (bool): whether to convert `y` into the one-hot format. Defaults to False.\n do_sigmoid (bool): If True, apply a sigmoid function to the prediction.\n do_softmax (bool): If True, apply a softmax function to the prediction.\n squared_pred (bool): use squared versions of targets and predictions in the denominator or not.\n jaccard (bool): compute Jaccard Index (soft IoU) instead of dice or not.\n reduction (`none|mean|sum`): Specifies the reduction to apply to the output:\n ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of elements in the output,\n ``'sum'``: the output will be summed.\n Default: ``'mean'``.\n \"\"\"\n super().__init__(reduction=reduction)\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n if do_sigmoid and do_softmax:\n raise ValueError(\"do_sigmoid=True and do_softmax=True are not compatible.\")\n self.do_sigmoid = do_sigmoid\n self.do_softmax = do_softmax\n self.squared_pred = squared_pred\n self.jaccard = jaccard\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):\n \"\"\"\n Args:\n input (tensor): the shape should be BNH[WD].\n target (tensor): the shape should be BNH[WD].\n smooth (float): a small constant to avoid nan.\n \"\"\"\n if self.do_sigmoid:\n input = torch.sigmoid(input)\n n_pred_ch = input.shape[1]\n if n_pred_ch == 1:\n if self.do_softmax:\n warnings.warn(\"single channel prediction, `do_softmax=True` ignored.\")\n if self.to_onehot_y:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n if not self.include_background:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n if self.do_softmax:\n input = torch.softmax(input, 1)\n if self.to_onehot_y:\n target = one_hot(target, n_pred_ch)\n if not self.include_background:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n assert (\n target.shape == input.shape\n ), f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\"\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis = list(range(2, len(input.shape)))\n intersection = torch.sum(target * input, reduce_axis)\n\n if self.squared_pred:\n target = torch.pow(target, 2)\n input = torch.pow(input, 2)\n\n ground_o = torch.sum(target, reduce_axis)\n pred_o = torch.sum(input, reduce_axis)\n\n denominator = ground_o + pred_o\n\n if self.jaccard:\n denominator -= intersection\n\n f = 1.0 - (2.0 * intersection + smooth) / (denominator + smooth)\n if self.reduction == \"sum\":\n return f.sum() # sum over the batch and channel dims\n if self.reduction == \"none\":\n return f # returns [N, n_classes] losses\n if self.reduction == \"mean\":\n return f.mean() # the batch and channel average\n raise ValueError(f\"reduction={self.reduction} is invalid.\")\n\n\nclass GeneralizedDiceLoss(_Loss):\n \"\"\"\n Compute the generalised Dice loss defined in:\n\n Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning\n loss function for highly unbalanced segmentations. DLMIA 2017.\n\n Adapted from:\n https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n do_sigmoid: bool = False,\n do_softmax: bool = False,\n w_type: str = \"square\",\n reduction: str = \"mean\",\n ):\n \"\"\"\n Args:\n include_background (bool): If False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y (bool): whether to convert `y` into the one-hot format. Defaults to False.\n do_sigmoid (bool): If True, apply a sigmoid function to the prediction.\n do_softmax (bool): If True, apply a softmax function to the prediction.\n w_type ('square'|'simple'|'uniform'): type of function to transform ground truth volume to a weight factor.\n Default: `'square'`\n reduction (`none|mean|sum`): Specifies the reduction to apply to the output:\n ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the batch size in the output,\n ``'sum'``: the output will be summed over the batch dim.\n Default: ``'mean'``.\n \"\"\"\n super().__init__(reduction=reduction)\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n if do_sigmoid and do_softmax:\n raise ValueError(\"do_sigmoid=True and do_softmax=True are not compatible.\")\n self.do_sigmoid = do_sigmoid\n self.do_softmax = do_softmax\n self.w_func: Callable = torch.ones_like\n if w_type == \"simple\":\n self.w_func = torch.reciprocal\n elif w_type == \"square\":\n self.w_func = lambda x: torch.reciprocal(x * x)\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, smooth: float = 1e-5):\n \"\"\"\n Args:\n input (tensor): the shape should be BNH[WD].\n target (tensor): the shape should be BNH[WD].\n smooth (float): a small constant to avoid nan.\n \"\"\"\n if self.do_sigmoid:\n input = torch.sigmoid(input)\n n_pred_ch = input.shape[1]\n if n_pred_ch == 1:\n if self.do_softmax:\n warnings.warn(\"single channel prediction, `do_softmax=True` ignored.\")\n if self.to_onehot_y:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n if not self.include_background:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n if self.do_softmax:\n input = torch.softmax(input, 1)\n if self.to_onehot_y:\n target = one_hot(target, n_pred_ch)\n if not self.include_background:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n assert (\n target.shape == input.shape\n ), f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\"\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis = list(range(2, len(input.shape)))\n intersection = torch.sum(target * input, reduce_axis)\n\n ground_o = torch.sum(target, reduce_axis)\n pred_o = torch.sum(input, reduce_axis)\n\n denominator = ground_o + pred_o\n\n w = self.w_func(ground_o.float())\n for b in w:\n infs = torch.isinf(b)\n b[infs] = 0.0\n b[infs] = torch.max(b)\n\n f = 1.0 - (2.0 * (intersection * w).sum(1) + smooth) / ((denominator * w).sum(1) + smooth)\n if self.reduction == \"sum\":\n return f.sum() # sum over the batch dim\n if self.reduction == \"none\":\n return f # returns [N] losses\n if self.reduction == \"mean\":\n return f.mean() # the batch and channel average\n raise ValueError(f\"reduction={self.reduction} is invalid.\")\n\n\ndice = Dice = DiceLoss\ngeneralized_dice = generalised_dice = GeneralizedDiceLoss\n", "path": "monai/losses/dice.py"}, {"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .dice import *\nfrom .focal_loss import *\nfrom .tversky import *\n", "path": "monai/losses/__init__.py"}]} | 3,850 | 236 |
gh_patches_debug_56671 | rasdani/github-patches | git_diff | talonhub__community-978 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
please map both comma and coma to , for now in keys.py
coma being recognized instead of comma comes up a lot for conformer b-series
I think for now we should bind both comma and coma as , in keys.py
and revisit if a future model sorts out the difference better
</issue>
<code>
[start of code/keys.py]
1 from talon import Context, Module, actions, app
2
3 default_alphabet = "air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip".split(
4 " "
5 )
6 letters_string = "abcdefghijklmnopqrstuvwxyz"
7
8 default_digits = "zero one two three four five six seven eight nine".split(" ")
9 numbers = [str(i) for i in range(10)]
10 default_f_digits = (
11 "one two three four five six seven eight nine ten eleven twelve".split(" ")
12 )
13
14 mod = Module()
15 mod.list("letter", desc="The spoken phonetic alphabet")
16 mod.list("symbol_key", desc="All symbols from the keyboard")
17 mod.list("arrow_key", desc="All arrow keys")
18 mod.list("number_key", desc="All number keys")
19 mod.list("modifier_key", desc="All modifier keys")
20 mod.list("function_key", desc="All function keys")
21 mod.list("special_key", desc="All special keys")
22 mod.list("punctuation", desc="words for inserting punctuation into text")
23
24
25 @mod.capture(rule="{self.modifier_key}+")
26 def modifiers(m) -> str:
27 "One or more modifier keys"
28 return "-".join(m.modifier_key_list)
29
30
31 @mod.capture(rule="{self.arrow_key}")
32 def arrow_key(m) -> str:
33 "One directional arrow key"
34 return m.arrow_key
35
36
37 @mod.capture(rule="<self.arrow_key>+")
38 def arrow_keys(m) -> str:
39 "One or more arrow keys separated by a space"
40 return str(m)
41
42
43 @mod.capture(rule="{self.number_key}")
44 def number_key(m) -> str:
45 "One number key"
46 return m.number_key
47
48
49 @mod.capture(rule="{self.letter}")
50 def letter(m) -> str:
51 "One letter key"
52 return m.letter
53
54
55 @mod.capture(rule="{self.special_key}")
56 def special_key(m) -> str:
57 "One special key"
58 return m.special_key
59
60
61 @mod.capture(rule="{self.symbol_key}")
62 def symbol_key(m) -> str:
63 "One symbol key"
64 return m.symbol_key
65
66
67 @mod.capture(rule="{self.function_key}")
68 def function_key(m) -> str:
69 "One function key"
70 return m.function_key
71
72
73 @mod.capture(rule="( <self.letter> | <self.number_key> | <self.symbol_key> )")
74 def any_alphanumeric_key(m) -> str:
75 "any alphanumeric key"
76 return str(m)
77
78
79 @mod.capture(
80 rule="( <self.letter> | <self.number_key> | <self.symbol_key> "
81 "| <self.arrow_key> | <self.function_key> | <self.special_key> )"
82 )
83 def unmodified_key(m) -> str:
84 "A single key with no modifiers"
85 return str(m)
86
87
88 @mod.capture(rule="{self.modifier_key}* <self.unmodified_key>")
89 def key(m) -> str:
90 "A single key with optional modifiers"
91 try:
92 mods = m.modifier_key_list
93 except AttributeError:
94 mods = []
95 return "-".join(mods + [m.unmodified_key])
96
97
98 @mod.capture(rule="<self.key>+")
99 def keys(m) -> str:
100 "A sequence of one or more keys with optional modifiers"
101 return " ".join(m.key_list)
102
103
104 @mod.capture(rule="{self.letter}+")
105 def letters(m) -> str:
106 "Multiple letter keys"
107 return "".join(m.letter_list)
108
109
110 ctx = Context()
111 modifier_keys = {
112 # If you find 'alt' is often misrecognized, try using 'alter'.
113 "alt": "alt", #'alter': 'alt',
114 "control": "ctrl", #'troll': 'ctrl',
115 "shift": "shift", #'sky': 'shift',
116 "super": "super",
117 }
118 if app.platform == "mac":
119 modifier_keys["command"] = "cmd"
120 modifier_keys["option"] = "alt"
121 ctx.lists["self.modifier_key"] = modifier_keys
122 alphabet = dict(zip(default_alphabet, letters_string))
123 ctx.lists["self.letter"] = alphabet
124
125 # `punctuation_words` is for words you want available BOTH in dictation and as key names in command mode.
126 # `symbol_key_words` is for key names that should be available in command mode, but NOT during dictation.
127 punctuation_words = {
128 # TODO: I'm not sure why we need these, I think it has something to do with
129 # Dragon. Possibly it has been fixed by later improvements to talon? -rntz
130 "`": "`",
131 ",": ",", # <== these things
132 "back tick": "`",
133 "grave": "`",
134 "comma": ",",
135 "period": ".",
136 "full stop": ".",
137 "semicolon": ";",
138 "colon": ":",
139 "forward slash": "/",
140 "question mark": "?",
141 "exclamation mark": "!",
142 "exclamation point": "!",
143 "asterisk": "*",
144 "hash sign": "#",
145 "number sign": "#",
146 "percent sign": "%",
147 "at sign": "@",
148 "and sign": "&",
149 "ampersand": "&",
150 # Currencies
151 "dollar sign": "$",
152 "pound sign": "£",
153 }
154 symbol_key_words = {
155 "dot": ".",
156 "point": ".",
157 "quote": "'",
158 "question": "?",
159 "apostrophe": "'",
160 "L square": "[",
161 "left square": "[",
162 "square": "[",
163 "R square": "]",
164 "right square": "]",
165 "slash": "/",
166 "backslash": "\\",
167 "minus": "-",
168 "dash": "-",
169 "equals": "=",
170 "plus": "+",
171 "tilde": "~",
172 "bang": "!",
173 "down score": "_",
174 "underscore": "_",
175 "paren": "(",
176 "L paren": "(",
177 "left paren": "(",
178 "R paren": ")",
179 "right paren": ")",
180 "brace": "{",
181 "left brace": "{",
182 "brack": "{",
183 "bracket": "{",
184 "left bracket": "{",
185 "r brace": "}",
186 "right brace": "}",
187 "r brack": "}",
188 "r bracket": "}",
189 "right bracket": "}",
190 "angle": "<",
191 "left angle": "<",
192 "less than": "<",
193 "rangle": ">",
194 "R angle": ">",
195 "right angle": ">",
196 "greater than": ">",
197 "star": "*",
198 "hash": "#",
199 "percent": "%",
200 "caret": "^",
201 "amper": "&",
202 "pipe": "|",
203 "dubquote": '"',
204 "double quote": '"',
205 # Currencies
206 "dollar": "$",
207 "pound": "£",
208 }
209
210 # make punctuation words also included in {user.symbol_keys}
211 symbol_key_words.update(punctuation_words)
212 ctx.lists["self.punctuation"] = punctuation_words
213 ctx.lists["self.symbol_key"] = symbol_key_words
214 ctx.lists["self.number_key"] = dict(zip(default_digits, numbers))
215 ctx.lists["self.arrow_key"] = {
216 "down": "down",
217 "left": "left",
218 "right": "right",
219 "up": "up",
220 }
221
222 simple_keys = [
223 "end",
224 "enter",
225 "escape",
226 "home",
227 "insert",
228 "pagedown",
229 "pageup",
230 "space",
231 "tab",
232 ]
233
234 alternate_keys = {
235 "wipe": "backspace",
236 "delete": "backspace",
237 #'junk': 'backspace',
238 "forward delete": "delete",
239 "page up": "pageup",
240 "page down": "pagedown",
241 }
242 # mac apparently doesn't have the menu key.
243 if app.platform in ("windows", "linux"):
244 alternate_keys["menu key"] = "menu"
245 alternate_keys["print screen"] = "printscr"
246
247 special_keys = {k: k for k in simple_keys}
248 special_keys.update(alternate_keys)
249 ctx.lists["self.special_key"] = special_keys
250 ctx.lists["self.function_key"] = {
251 f"F {default_f_digits[i]}": f"f{i + 1}" for i in range(12)
252 }
253
254
255 @mod.action_class
256 class Actions:
257 def move_cursor(s: str):
258 """Given a sequence of directions, eg. 'left left up', moves the cursor accordingly using edit.{left,right,up,down}."""
259 for d in s.split():
260 if d in ("left", "right", "up", "down"):
261 getattr(actions.edit, d)()
262 else:
263 raise RuntimeError(f"invalid arrow key: {d}")
264
[end of code/keys.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/code/keys.py b/code/keys.py
--- a/code/keys.py
+++ b/code/keys.py
@@ -132,6 +132,8 @@
"back tick": "`",
"grave": "`",
"comma": ",",
+ # Workaround for issue with conformer b-series; see #946
+ "coma": ",",
"period": ".",
"full stop": ".",
"semicolon": ";",
| {"golden_diff": "diff --git a/code/keys.py b/code/keys.py\n--- a/code/keys.py\n+++ b/code/keys.py\n@@ -132,6 +132,8 @@\n \"back tick\": \"`\",\n \"grave\": \"`\",\n \"comma\": \",\",\n+ # Workaround for issue with conformer b-series; see #946\n+ \"coma\": \",\",\n \"period\": \".\",\n \"full stop\": \".\",\n \"semicolon\": \";\",\n", "issue": "please map both comma and coma to , for now in keys.py\ncoma being recognized instead of comma comes up a lot for conformer b-series\r\nI think for now we should bind both comma and coma as , in keys.py\r\nand revisit if a future model sorts out the difference better\n", "before_files": [{"content": "from talon import Context, Module, actions, app\n\ndefault_alphabet = \"air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip\".split(\n \" \"\n)\nletters_string = \"abcdefghijklmnopqrstuvwxyz\"\n\ndefault_digits = \"zero one two three four five six seven eight nine\".split(\" \")\nnumbers = [str(i) for i in range(10)]\ndefault_f_digits = (\n \"one two three four five six seven eight nine ten eleven twelve\".split(\" \")\n)\n\nmod = Module()\nmod.list(\"letter\", desc=\"The spoken phonetic alphabet\")\nmod.list(\"symbol_key\", desc=\"All symbols from the keyboard\")\nmod.list(\"arrow_key\", desc=\"All arrow keys\")\nmod.list(\"number_key\", desc=\"All number keys\")\nmod.list(\"modifier_key\", desc=\"All modifier keys\")\nmod.list(\"function_key\", desc=\"All function keys\")\nmod.list(\"special_key\", desc=\"All special keys\")\nmod.list(\"punctuation\", desc=\"words for inserting punctuation into text\")\n\n\[email protected](rule=\"{self.modifier_key}+\")\ndef modifiers(m) -> str:\n \"One or more modifier keys\"\n return \"-\".join(m.modifier_key_list)\n\n\[email protected](rule=\"{self.arrow_key}\")\ndef arrow_key(m) -> str:\n \"One directional arrow key\"\n return m.arrow_key\n\n\[email protected](rule=\"<self.arrow_key>+\")\ndef arrow_keys(m) -> str:\n \"One or more arrow keys separated by a space\"\n return str(m)\n\n\[email protected](rule=\"{self.number_key}\")\ndef number_key(m) -> str:\n \"One number key\"\n return m.number_key\n\n\[email protected](rule=\"{self.letter}\")\ndef letter(m) -> str:\n \"One letter key\"\n return m.letter\n\n\[email protected](rule=\"{self.special_key}\")\ndef special_key(m) -> str:\n \"One special key\"\n return m.special_key\n\n\[email protected](rule=\"{self.symbol_key}\")\ndef symbol_key(m) -> str:\n \"One symbol key\"\n return m.symbol_key\n\n\[email protected](rule=\"{self.function_key}\")\ndef function_key(m) -> str:\n \"One function key\"\n return m.function_key\n\n\[email protected](rule=\"( <self.letter> | <self.number_key> | <self.symbol_key> )\")\ndef any_alphanumeric_key(m) -> str:\n \"any alphanumeric key\"\n return str(m)\n\n\[email protected](\n rule=\"( <self.letter> | <self.number_key> | <self.symbol_key> \"\n \"| <self.arrow_key> | <self.function_key> | <self.special_key> )\"\n)\ndef unmodified_key(m) -> str:\n \"A single key with no modifiers\"\n return str(m)\n\n\[email protected](rule=\"{self.modifier_key}* <self.unmodified_key>\")\ndef key(m) -> str:\n \"A single key with optional modifiers\"\n try:\n mods = m.modifier_key_list\n except AttributeError:\n mods = []\n return \"-\".join(mods + [m.unmodified_key])\n\n\[email protected](rule=\"<self.key>+\")\ndef keys(m) -> str:\n \"A sequence of one or more keys with optional modifiers\"\n return \" \".join(m.key_list)\n\n\[email protected](rule=\"{self.letter}+\")\ndef letters(m) -> str:\n \"Multiple letter keys\"\n return \"\".join(m.letter_list)\n\n\nctx = Context()\nmodifier_keys = {\n # If you find 'alt' is often misrecognized, try using 'alter'.\n \"alt\": \"alt\", #'alter': 'alt',\n \"control\": \"ctrl\", #'troll': 'ctrl',\n \"shift\": \"shift\", #'sky': 'shift',\n \"super\": \"super\",\n}\nif app.platform == \"mac\":\n modifier_keys[\"command\"] = \"cmd\"\n modifier_keys[\"option\"] = \"alt\"\nctx.lists[\"self.modifier_key\"] = modifier_keys\nalphabet = dict(zip(default_alphabet, letters_string))\nctx.lists[\"self.letter\"] = alphabet\n\n# `punctuation_words` is for words you want available BOTH in dictation and as key names in command mode.\n# `symbol_key_words` is for key names that should be available in command mode, but NOT during dictation.\npunctuation_words = {\n # TODO: I'm not sure why we need these, I think it has something to do with\n # Dragon. Possibly it has been fixed by later improvements to talon? -rntz\n \"`\": \"`\",\n \",\": \",\", # <== these things\n \"back tick\": \"`\",\n \"grave\": \"`\",\n \"comma\": \",\",\n \"period\": \".\",\n \"full stop\": \".\",\n \"semicolon\": \";\",\n \"colon\": \":\",\n \"forward slash\": \"/\",\n \"question mark\": \"?\",\n \"exclamation mark\": \"!\",\n \"exclamation point\": \"!\",\n \"asterisk\": \"*\",\n \"hash sign\": \"#\",\n \"number sign\": \"#\",\n \"percent sign\": \"%\",\n \"at sign\": \"@\",\n \"and sign\": \"&\",\n \"ampersand\": \"&\",\n # Currencies\n \"dollar sign\": \"$\",\n \"pound sign\": \"\u00a3\",\n}\nsymbol_key_words = {\n \"dot\": \".\",\n \"point\": \".\",\n \"quote\": \"'\",\n \"question\": \"?\",\n \"apostrophe\": \"'\",\n \"L square\": \"[\",\n \"left square\": \"[\",\n \"square\": \"[\",\n \"R square\": \"]\",\n \"right square\": \"]\",\n \"slash\": \"/\",\n \"backslash\": \"\\\\\",\n \"minus\": \"-\",\n \"dash\": \"-\",\n \"equals\": \"=\",\n \"plus\": \"+\",\n \"tilde\": \"~\",\n \"bang\": \"!\",\n \"down score\": \"_\",\n \"underscore\": \"_\",\n \"paren\": \"(\",\n \"L paren\": \"(\",\n \"left paren\": \"(\",\n \"R paren\": \")\",\n \"right paren\": \")\",\n \"brace\": \"{\",\n \"left brace\": \"{\",\n \"brack\": \"{\",\n \"bracket\": \"{\",\n \"left bracket\": \"{\",\n \"r brace\": \"}\",\n \"right brace\": \"}\",\n \"r brack\": \"}\",\n \"r bracket\": \"}\",\n \"right bracket\": \"}\",\n \"angle\": \"<\",\n \"left angle\": \"<\",\n \"less than\": \"<\",\n \"rangle\": \">\",\n \"R angle\": \">\",\n \"right angle\": \">\",\n \"greater than\": \">\",\n \"star\": \"*\",\n \"hash\": \"#\",\n \"percent\": \"%\",\n \"caret\": \"^\",\n \"amper\": \"&\",\n \"pipe\": \"|\",\n \"dubquote\": '\"',\n \"double quote\": '\"',\n # Currencies\n \"dollar\": \"$\",\n \"pound\": \"\u00a3\",\n}\n\n# make punctuation words also included in {user.symbol_keys}\nsymbol_key_words.update(punctuation_words)\nctx.lists[\"self.punctuation\"] = punctuation_words\nctx.lists[\"self.symbol_key\"] = symbol_key_words\nctx.lists[\"self.number_key\"] = dict(zip(default_digits, numbers))\nctx.lists[\"self.arrow_key\"] = {\n \"down\": \"down\",\n \"left\": \"left\",\n \"right\": \"right\",\n \"up\": \"up\",\n}\n\nsimple_keys = [\n \"end\",\n \"enter\",\n \"escape\",\n \"home\",\n \"insert\",\n \"pagedown\",\n \"pageup\",\n \"space\",\n \"tab\",\n]\n\nalternate_keys = {\n \"wipe\": \"backspace\",\n \"delete\": \"backspace\",\n #'junk': 'backspace',\n \"forward delete\": \"delete\",\n \"page up\": \"pageup\",\n \"page down\": \"pagedown\",\n}\n# mac apparently doesn't have the menu key.\nif app.platform in (\"windows\", \"linux\"):\n alternate_keys[\"menu key\"] = \"menu\"\n alternate_keys[\"print screen\"] = \"printscr\"\n\nspecial_keys = {k: k for k in simple_keys}\nspecial_keys.update(alternate_keys)\nctx.lists[\"self.special_key\"] = special_keys\nctx.lists[\"self.function_key\"] = {\n f\"F {default_f_digits[i]}\": f\"f{i + 1}\" for i in range(12)\n}\n\n\[email protected]_class\nclass Actions:\n def move_cursor(s: str):\n \"\"\"Given a sequence of directions, eg. 'left left up', moves the cursor accordingly using edit.{left,right,up,down}.\"\"\"\n for d in s.split():\n if d in (\"left\", \"right\", \"up\", \"down\"):\n getattr(actions.edit, d)()\n else:\n raise RuntimeError(f\"invalid arrow key: {d}\")\n", "path": "code/keys.py"}]} | 3,187 | 106 |
gh_patches_debug_13138 | rasdani/github-patches | git_diff | ivy-llc__ivy-15454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cosh
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/math.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
10 @to_ivy_arrays_and_back
11 def sin(x, name=None):
12 return ivy.sin(x)
13
14
15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
16 @to_ivy_arrays_and_back
17 def cos(x, name=None):
18 return ivy.cos(x)
19
20
21
22 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
23 @to_ivy_arrays_and_back
24 def acos(x, name=None):
25 return ivy.acos(x)
26
27
28
29 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
30 @to_ivy_arrays_and_back
31 def tanh(x, name=None):
32 return ivy.tanh(x)
33
34
[end of ivy/functional/frontends/paddle/tensor/math.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py
--- a/ivy/functional/frontends/paddle/tensor/math.py
+++ b/ivy/functional/frontends/paddle/tensor/math.py
@@ -18,16 +18,19 @@
return ivy.cos(x)
-
@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def acos(x, name=None):
return ivy.acos(x)
+@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+@to_ivy_arrays_and_back
+def cosh(x, name=None):
+ return ivy.cosh(x)
+
@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def tanh(x, name=None):
return ivy.tanh(x)
-
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -18,16 +18,19 @@\n return ivy.cos(x)\n \n \n-\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def acos(x, name=None):\n return ivy.acos(x)\n \n \n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def cosh(x, name=None):\n+ return ivy.cosh(x)\n+\n \n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def tanh(x, name=None):\n return ivy.tanh(x)\n-\n", "issue": "cosh\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]} | 877 | 255 |
gh_patches_debug_61251 | rasdani/github-patches | git_diff | ultrabug__py3status-1378 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better handling of transient interfaces in net_rate
I use net_rate for a couple of tun interfaces that are transient depending on VPN connections. It would be nice if net_rate had some additional configuration on what to do for non-existent interfaces. Currently, the hardcoded text "net_rate" shows up, where ideally I'd just have no output from net_rate.
I saw "format_no_connection" - it doesn't help with this case but seems like a potential candidate to tie in to this (where no connection and no device are arguably similar).
I'll look into implementing this, but figured I'd park the feature request here for now.
</issue>
<code>
[start of py3status/modules/net_rate.py]
1 # -*- coding: utf-8 -*-
2 """
3 Display network transfer rate.
4
5 Configuration parameters:
6 all_interfaces: ignore self.interfaces, but not self.interfaces_blacklist
7 (default True)
8 cache_timeout: how often we refresh this module in seconds
9 (default 2)
10 devfile: location of dev file under /proc
11 (default '/proc/net/dev')
12 format: format of the module output
13 (default '{interface}: {total}')
14 format_no_connection: when there is no data transmitted from the start of the plugin
15 (default '')
16 format_value: format to use for values
17 (default "[\?min_length=11 {value:.1f} {unit}]")
18 hide_if_zero: hide indicator if rate == 0
19 (default False)
20 interfaces: comma separated list of interfaces to track
21 (default [])
22 interfaces_blacklist: comma separated list of interfaces to ignore
23 (default 'lo')
24 si_units: use SI units
25 (default False)
26 sum_values: sum values of each interface instead of taking the top one
27 (default False)
28 thresholds: thresholds to use for colors
29 (default [(0, 'bad'), (1024, 'degraded'), (1024 * 1024, 'good')])
30 unit: unit to use. If the unit contains a multiplier prefix, only this
31 exact unit will ever be used
32 (default "B/s")
33
34 Format placeholders:
35 {down} download rate
36 {interface} name of interface
37 {total} total rate
38 {up} upload rate
39
40 format_value placeholders:
41 {unit} current unit
42 {value} numeric value
43
44 Color thresholds:
45 {down} Change color based on the value of down
46 {total} Change color based on the value of total
47 {up} Change color based on the value of up
48
49 @author shadowprince
50 @license Eclipse Public License
51
52 SAMPLE OUTPUT
53 {'full_text': 'eno1: 852.2 KiB/s'}
54 """
55
56 from __future__ import division # python2 compatibility
57 from time import time
58
59
60 class Py3status:
61 """
62 """
63 # available configuration parameters
64 all_interfaces = True
65 cache_timeout = 2
66 devfile = '/proc/net/dev'
67 format = "{interface}: {total}"
68 format_no_connection = ''
69 format_value = "[\?min_length=11 {value:.1f} {unit}]"
70 hide_if_zero = False
71 interfaces = []
72 interfaces_blacklist = 'lo'
73 si_units = False
74 sum_values = False
75 thresholds = [(0, "bad"), (1024, "degraded"), (1024 * 1024, "good")]
76 unit = "B/s"
77
78 class Meta:
79
80 def deprecate_function(config):
81 # support old thresholds
82 precision = config.get('precision', 1)
83 padding = 3 + 1 + precision + 1 + 5
84 format_value = "[\?min_length={padding} {{value:.{precision}f}} {{unit}}]".format(
85 padding=padding, precision=precision
86 )
87 return {'format_value': format_value}
88
89 deprecated = {
90 'function': [
91 {'function': deprecate_function},
92 ],
93 'remove': [
94 {
95 'param': 'precision',
96 'msg': 'obsolete, use format_value instead',
97 },
98 ]
99 }
100
101 def post_config_hook(self):
102 # parse some configuration parameters
103 if not isinstance(self.interfaces, list):
104 self.interfaces = self.interfaces.split(',')
105 if not isinstance(self.interfaces_blacklist, list):
106 self.interfaces_blacklist = self.interfaces_blacklist.split(',')
107 placeholders = self.py3.get_placeholder_formats_list(self.format_value)
108 values = ['{%s}' % x[1] for x in placeholders if x[0] == 'value']
109 self._value_formats = values
110 # last
111 self.last_interface = None
112 self.last_stat = self._get_stat()
113 self.last_time = time()
114
115 def currentSpeed(self):
116 ns = self._get_stat()
117 deltas = {}
118 try:
119 # time from previous check
120 timedelta = time() - self.last_time
121
122 # calculate deltas for all interfaces
123 for old, new in zip(self.last_stat, ns):
124 down = int(new[1]) - int(old[1])
125 up = int(new[9]) - int(old[9])
126
127 down /= timedelta
128 up /= timedelta
129
130 deltas[new[0]] = {'total': up + down, 'up': up, 'down': down, }
131
132 # update last_ info
133 self.last_stat = self._get_stat()
134 self.last_time = time()
135
136 # get the interface with max rate
137 if self.sum_values:
138 interface = 'sum'
139 sum_up = sum([itm['up'] for _, itm in deltas.items()])
140 sum_down = sum([itm['down'] for _, itm in deltas.items()])
141 deltas[interface] = {'total': sum_up + sum_down, 'up': sum_up, 'down': sum_down}
142 else:
143 interface = max(deltas, key=lambda x: deltas[x]['total'])
144
145 # if there is no rate - show last active interface, or hide
146
147 # we need to check if it will be zero after it is formatted
148 # with the desired unit eg MB/s
149 total, _ = self.py3.format_units(
150 deltas[interface]['total'], unit=self.unit, si=self.si_units
151 )
152 values = [float(x.format(total)) for x in self._value_formats]
153 if max(values) == 0:
154 interface = self.last_interface
155 hide = self.hide_if_zero
156 # if there is - update last_interface
157 else:
158 self.last_interface = interface
159 hide = False
160
161 # get the deltas into variable
162 delta = deltas[interface] if interface else None
163
164 except TypeError:
165 delta = None
166 interface = None
167 hide = self.hide_if_zero
168
169 response = {'cached_until': self.py3.time_in(self.cache_timeout)}
170
171 if hide:
172 response['full_text'] = ""
173 elif not interface:
174 response['full_text'] = self.format_no_connection
175 else:
176 self.py3.threshold_get_color(delta['down'], 'down')
177 self.py3.threshold_get_color(delta['total'], 'total')
178 self.py3.threshold_get_color(delta['up'], 'up')
179 response['full_text'] = self.py3.safe_format(self.format, {
180 'down': self._format_value(delta['down']),
181 'total': self._format_value(delta['total']),
182 'up': self._format_value(delta['up']),
183 'interface': interface[:-1],
184 })
185
186 return response
187
188 def _get_stat(self):
189 """
190 Get statistics from devfile in list of lists of words
191 """
192 def dev_filter(x):
193 # get first word and remove trailing interface number
194 x = x.strip().split(" ")[0][:-1]
195
196 if x in self.interfaces_blacklist:
197 return False
198
199 if self.all_interfaces:
200 return True
201
202 if x in self.interfaces:
203 return True
204
205 return False
206
207 # read devfile, skip two header files
208 x = filter(dev_filter, open(self.devfile).readlines()[2:])
209
210 try:
211 # split info into words, filter empty ones
212 return [list(filter(lambda x: x, _x.split(" "))) for _x in x]
213
214 except StopIteration:
215 return None
216
217 def _format_value(self, value):
218 """
219 Return formatted string
220 """
221 value, unit = self.py3.format_units(value, unit=self.unit, si=self.si_units)
222 return self.py3.safe_format(self.format_value, {'value': value, 'unit': unit})
223
224
225 if __name__ == "__main__":
226 """
227 Run module in test mode.
228 """
229 from py3status.module_test import module_test
230 module_test(Py3status)
231
[end of py3status/modules/net_rate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/modules/net_rate.py b/py3status/modules/net_rate.py
--- a/py3status/modules/net_rate.py
+++ b/py3status/modules/net_rate.py
@@ -161,7 +161,7 @@
# get the deltas into variable
delta = deltas[interface] if interface else None
- except TypeError:
+ except (TypeError, ValueError):
delta = None
interface = None
hide = self.hide_if_zero
| {"golden_diff": "diff --git a/py3status/modules/net_rate.py b/py3status/modules/net_rate.py\n--- a/py3status/modules/net_rate.py\n+++ b/py3status/modules/net_rate.py\n@@ -161,7 +161,7 @@\n # get the deltas into variable\n delta = deltas[interface] if interface else None\n \n- except TypeError:\n+ except (TypeError, ValueError):\n delta = None\n interface = None\n hide = self.hide_if_zero\n", "issue": "Better handling of transient interfaces in net_rate\nI use net_rate for a couple of tun interfaces that are transient depending on VPN connections. It would be nice if net_rate had some additional configuration on what to do for non-existent interfaces. Currently, the hardcoded text \"net_rate\" shows up, where ideally I'd just have no output from net_rate.\r\n\r\nI saw \"format_no_connection\" - it doesn't help with this case but seems like a potential candidate to tie in to this (where no connection and no device are arguably similar).\r\n\r\nI'll look into implementing this, but figured I'd park the feature request here for now.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay network transfer rate.\n\nConfiguration parameters:\n all_interfaces: ignore self.interfaces, but not self.interfaces_blacklist\n (default True)\n cache_timeout: how often we refresh this module in seconds\n (default 2)\n devfile: location of dev file under /proc\n (default '/proc/net/dev')\n format: format of the module output\n (default '{interface}: {total}')\n format_no_connection: when there is no data transmitted from the start of the plugin\n (default '')\n format_value: format to use for values\n (default \"[\\?min_length=11 {value:.1f} {unit}]\")\n hide_if_zero: hide indicator if rate == 0\n (default False)\n interfaces: comma separated list of interfaces to track\n (default [])\n interfaces_blacklist: comma separated list of interfaces to ignore\n (default 'lo')\n si_units: use SI units\n (default False)\n sum_values: sum values of each interface instead of taking the top one\n (default False)\n thresholds: thresholds to use for colors\n (default [(0, 'bad'), (1024, 'degraded'), (1024 * 1024, 'good')])\n unit: unit to use. If the unit contains a multiplier prefix, only this\n exact unit will ever be used\n (default \"B/s\")\n\nFormat placeholders:\n {down} download rate\n {interface} name of interface\n {total} total rate\n {up} upload rate\n\nformat_value placeholders:\n {unit} current unit\n {value} numeric value\n\nColor thresholds:\n {down} Change color based on the value of down\n {total} Change color based on the value of total\n {up} Change color based on the value of up\n\n@author shadowprince\n@license Eclipse Public License\n\nSAMPLE OUTPUT\n{'full_text': 'eno1: 852.2 KiB/s'}\n\"\"\"\n\nfrom __future__ import division # python2 compatibility\nfrom time import time\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n all_interfaces = True\n cache_timeout = 2\n devfile = '/proc/net/dev'\n format = \"{interface}: {total}\"\n format_no_connection = ''\n format_value = \"[\\?min_length=11 {value:.1f} {unit}]\"\n hide_if_zero = False\n interfaces = []\n interfaces_blacklist = 'lo'\n si_units = False\n sum_values = False\n thresholds = [(0, \"bad\"), (1024, \"degraded\"), (1024 * 1024, \"good\")]\n unit = \"B/s\"\n\n class Meta:\n\n def deprecate_function(config):\n # support old thresholds\n precision = config.get('precision', 1)\n padding = 3 + 1 + precision + 1 + 5\n format_value = \"[\\?min_length={padding} {{value:.{precision}f}} {{unit}}]\".format(\n padding=padding, precision=precision\n )\n return {'format_value': format_value}\n\n deprecated = {\n 'function': [\n {'function': deprecate_function},\n ],\n 'remove': [\n {\n 'param': 'precision',\n 'msg': 'obsolete, use format_value instead',\n },\n ]\n }\n\n def post_config_hook(self):\n # parse some configuration parameters\n if not isinstance(self.interfaces, list):\n self.interfaces = self.interfaces.split(',')\n if not isinstance(self.interfaces_blacklist, list):\n self.interfaces_blacklist = self.interfaces_blacklist.split(',')\n placeholders = self.py3.get_placeholder_formats_list(self.format_value)\n values = ['{%s}' % x[1] for x in placeholders if x[0] == 'value']\n self._value_formats = values\n # last\n self.last_interface = None\n self.last_stat = self._get_stat()\n self.last_time = time()\n\n def currentSpeed(self):\n ns = self._get_stat()\n deltas = {}\n try:\n # time from previous check\n timedelta = time() - self.last_time\n\n # calculate deltas for all interfaces\n for old, new in zip(self.last_stat, ns):\n down = int(new[1]) - int(old[1])\n up = int(new[9]) - int(old[9])\n\n down /= timedelta\n up /= timedelta\n\n deltas[new[0]] = {'total': up + down, 'up': up, 'down': down, }\n\n # update last_ info\n self.last_stat = self._get_stat()\n self.last_time = time()\n\n # get the interface with max rate\n if self.sum_values:\n interface = 'sum'\n sum_up = sum([itm['up'] for _, itm in deltas.items()])\n sum_down = sum([itm['down'] for _, itm in deltas.items()])\n deltas[interface] = {'total': sum_up + sum_down, 'up': sum_up, 'down': sum_down}\n else:\n interface = max(deltas, key=lambda x: deltas[x]['total'])\n\n # if there is no rate - show last active interface, or hide\n\n # we need to check if it will be zero after it is formatted\n # with the desired unit eg MB/s\n total, _ = self.py3.format_units(\n deltas[interface]['total'], unit=self.unit, si=self.si_units\n )\n values = [float(x.format(total)) for x in self._value_formats]\n if max(values) == 0:\n interface = self.last_interface\n hide = self.hide_if_zero\n # if there is - update last_interface\n else:\n self.last_interface = interface\n hide = False\n\n # get the deltas into variable\n delta = deltas[interface] if interface else None\n\n except TypeError:\n delta = None\n interface = None\n hide = self.hide_if_zero\n\n response = {'cached_until': self.py3.time_in(self.cache_timeout)}\n\n if hide:\n response['full_text'] = \"\"\n elif not interface:\n response['full_text'] = self.format_no_connection\n else:\n self.py3.threshold_get_color(delta['down'], 'down')\n self.py3.threshold_get_color(delta['total'], 'total')\n self.py3.threshold_get_color(delta['up'], 'up')\n response['full_text'] = self.py3.safe_format(self.format, {\n 'down': self._format_value(delta['down']),\n 'total': self._format_value(delta['total']),\n 'up': self._format_value(delta['up']),\n 'interface': interface[:-1],\n })\n\n return response\n\n def _get_stat(self):\n \"\"\"\n Get statistics from devfile in list of lists of words\n \"\"\"\n def dev_filter(x):\n # get first word and remove trailing interface number\n x = x.strip().split(\" \")[0][:-1]\n\n if x in self.interfaces_blacklist:\n return False\n\n if self.all_interfaces:\n return True\n\n if x in self.interfaces:\n return True\n\n return False\n\n # read devfile, skip two header files\n x = filter(dev_filter, open(self.devfile).readlines()[2:])\n\n try:\n # split info into words, filter empty ones\n return [list(filter(lambda x: x, _x.split(\" \"))) for _x in x]\n\n except StopIteration:\n return None\n\n def _format_value(self, value):\n \"\"\"\n Return formatted string\n \"\"\"\n value, unit = self.py3.format_units(value, unit=self.unit, si=self.si_units)\n return self.py3.safe_format(self.format_value, {'value': value, 'unit': unit})\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/net_rate.py"}]} | 3,007 | 105 |
gh_patches_debug_569 | rasdani/github-patches | git_diff | pex-tool__pex-945 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.8
On the docket:
+ [x] Cache pip.pex. #937
+ [x] Ensure the interpreter path is a file #938
+ [x] Support an unzip toggle for PEXes. #939
+ [x] Better support unzip mode PEXes. #941
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.7'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.7'
+__version__ = '2.1.8'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.7'\n+__version__ = '2.1.8'\n", "issue": "Release 2.1.8\nOn the docket:\r\n+ [x] Cache pip.pex. #937\r\n+ [x] Ensure the interpreter path is a file #938\r\n+ [x] Support an unzip toggle for PEXes. #939\r\n+ [x] Better support unzip mode PEXes. #941\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.7'\n", "path": "pex/version.py"}]} | 659 | 94 |
gh_patches_debug_8594 | rasdani/github-patches | git_diff | mozilla__bugbug-1094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support multiclass classifiers in bug_classifier script
The bug_classifier script at scripts/bug_classifier.py currently assumes the model is a binary model. We need to make it work for multiclass models too (e.g. defectenhancementtask).
In particular, https://github.com/mozilla/bugbug/blob/65bf1b4604ca55a67490d27adc99c6441bad38c8/scripts/bug_classifier.py#L75-L78 needs to be changed.
To test your changes, simply run `python3 -m scripts.bug_classifier defectenhancementtask`
</issue>
<code>
[start of scripts/bug_classifier.py]
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import os
5 from logging import INFO, basicConfig, getLogger
6
7 import numpy as np
8 import requests
9
10 from bugbug import bugzilla
11 from bugbug.models import get_model_class
12 from bugbug.utils import download_check_etag, zstd_decompress
13
14 MODELS_WITH_TYPE = ("component",)
15
16 basicConfig(level=INFO)
17 logger = getLogger(__name__)
18
19
20 def classify_bugs(model_name, classifier, bug_id):
21 if classifier != "default":
22 assert (
23 model_name in MODELS_WITH_TYPE
24 ), f"{classifier} is not a valid classifier type for {model_name}"
25
26 model_file_name = f"{model_name}{classifier}model"
27 model_name = f"{model_name}_{classifier}"
28 else:
29 model_file_name = f"{model_name}model"
30
31 if not os.path.exists(model_file_name):
32 logger.info(f"{model_file_name} does not exist. Downloading the model....")
33 try:
34 download_check_etag(
35 f"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst",
36 f"{model_file_name}.zst",
37 )
38 except requests.HTTPError:
39 logger.error(
40 f"A pre-trained model is not available, you will need to train it yourself using the trainer script"
41 )
42 raise SystemExit(1)
43
44 zstd_decompress(model_file_name)
45 assert os.path.exists(model_file_name), "Decompressed file doesn't exist"
46
47 model_class = get_model_class(model_name)
48 model = model_class.load(model_file_name)
49
50 if bug_id:
51 bugs = bugzilla.get(bug_id).values()
52 assert bugs, f"A bug with a bug id of {bug_id} was not found"
53 else:
54 bugs = bugzilla.get_bugs()
55
56 for bug in bugs:
57 print(
58 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} '
59 )
60
61 if model.calculate_importance:
62 probas, importance = model.classify(
63 bug, probabilities=True, importances=True
64 )
65
66 model.print_feature_importances(
67 importance["importances"], class_probabilities=probas
68 )
69 else:
70 probas = model.classify(bug, probabilities=True, importances=False)
71
72 if np.argmax(probas) == 1:
73 print(f"Positive! {probas}")
74 else:
75 print(f"Negative! {probas}")
76 input()
77
78
79 def main():
80 description = "Perform evaluation on bugs using the specified model"
81 parser = argparse.ArgumentParser(description=description)
82
83 parser.add_argument("model", help="Which model to use for evaluation")
84 parser.add_argument(
85 "--classifier",
86 help="Type of the classifier. Only used for component classification.",
87 choices=["default", "nn"],
88 default="default",
89 )
90 parser.add_argument("--bug-id", help="Classify the given bug id")
91
92 args = parser.parse_args()
93
94 classify_bugs(args.model, args.classifier, args.bug_id)
95
96
97 if __name__ == "__main__":
98 main()
99
[end of scripts/bug_classifier.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py
--- a/scripts/bug_classifier.py
+++ b/scripts/bug_classifier.py
@@ -69,10 +69,13 @@
else:
probas = model.classify(bug, probabilities=True, importances=False)
- if np.argmax(probas) == 1:
- print(f"Positive! {probas}")
+ probability = probas[0]
+ pred_index = np.argmax(probability)
+ if len(probability) > 2:
+ pred_class = model.le.inverse_transform([pred_index])[0]
else:
- print(f"Negative! {probas}")
+ pred_class = "Positive" if pred_index == 1 else "Negative"
+ print(f"{pred_class} {probability}")
input()
| {"golden_diff": "diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py\n--- a/scripts/bug_classifier.py\n+++ b/scripts/bug_classifier.py\n@@ -69,10 +69,13 @@\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n \n- if np.argmax(probas) == 1:\n- print(f\"Positive! {probas}\")\n+ probability = probas[0]\n+ pred_index = np.argmax(probability)\n+ if len(probability) > 2:\n+ pred_class = model.le.inverse_transform([pred_index])[0]\n else:\n- print(f\"Negative! {probas}\")\n+ pred_class = \"Positive\" if pred_index == 1 else \"Negative\"\n+ print(f\"{pred_class} {probability}\")\n input()\n", "issue": "Support multiclass classifiers in bug_classifier script\nThe bug_classifier script at scripts/bug_classifier.py currently assumes the model is a binary model. We need to make it work for multiclass models too (e.g. defectenhancementtask).\r\n\r\nIn particular, https://github.com/mozilla/bugbug/blob/65bf1b4604ca55a67490d27adc99c6441bad38c8/scripts/bug_classifier.py#L75-L78 needs to be changed.\r\n\r\nTo test your changes, simply run `python3 -m scripts.bug_classifier defectenhancementtask`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom logging import INFO, basicConfig, getLogger\n\nimport numpy as np\nimport requests\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import download_check_etag, zstd_decompress\n\nMODELS_WITH_TYPE = (\"component\",)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\n\ndef classify_bugs(model_name, classifier, bug_id):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n if not os.path.exists(model_file_name):\n logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n try:\n download_check_etag(\n f\"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n f\"{model_file_name}.zst\",\n )\n except requests.HTTPError:\n logger.error(\n f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n )\n raise SystemExit(1)\n\n zstd_decompress(model_file_name)\n assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n if bug_id:\n bugs = bugzilla.get(bug_id).values()\n assert bugs, f\"A bug with a bug id of {bug_id} was not found\"\n else:\n bugs = bugzilla.get_bugs()\n\n for bug in bugs:\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n model.print_feature_importances(\n importance[\"importances\"], class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--bug-id\", help=\"Classify the given bug id\")\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier, args.bug_id)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/bug_classifier.py"}]} | 1,552 | 181 |
gh_patches_debug_19749 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2162 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OH: `get_session_list` cannot "see" current session
Ohio's `get_session_list` appears to have the current session _manually appended_. Not to mention, it hasn't been updated in a year. This should be fixed, to automatically "guess" the current session instead.
https://github.com/openstates/openstates/blob/master/openstates/oh/__init__.py#L91-L92
</issue>
<code>
[start of openstates/oh/__init__.py]
1 from pupa.scrape import Jurisdiction, Organization
2 from openstates.utils import url_xpath
3
4 from .people import OHLegislatorScraper
5 # from .events import OHEventScraper
6 from .bills import OHBillScraper
7
8
9 class Ohio(Jurisdiction):
10 division_id = "ocd-division/country:us/state:oh"
11 classification = "government"
12 name = "Ohio"
13 url = "http://www.legislature.state.oh.us/"
14 scrapers = {
15 'people': OHLegislatorScraper,
16 # 'events': OHEventScraper,
17 'bills': OHBillScraper,
18 }
19 legislative_sessions = [
20 {
21 "_scraped_name": "128",
22 "identifier": "128",
23 "name": "128th Legislature (2009-2010)"
24 },
25 {
26 "_scraped_name": "129",
27 "identifier": "129",
28 "name": "129th Legislature (2011-2012)",
29 "start_date": "2011-01-03"
30 },
31 {
32 "_scraped_name": "130",
33 "identifier": "130",
34 "name": "130th Legislature (2013-2014)"
35 },
36 {
37 "_scraped_name": "131",
38 "identifier": "131",
39 "name": "131st Legislature (2015-2016)"
40 },
41 {
42 "_scraped_name": "132",
43 "identifier": "132",
44 "name": "132st Legislature (2017-2018)",
45 "start_date": "2017-01-02",
46 "end_date": "2017-12-31"
47 }
48 ]
49 ignored_scraped_sessions = [
50 "127",
51 "126",
52 "125",
53 "124",
54 "123",
55 "122"
56 ]
57
58 def get_organizations(self):
59 legislature_name = "Ohio General Assembly"
60 lower_chamber_name = "House"
61 lower_seats = 99
62 lower_title = "Representative"
63 upper_chamber_name = "Senate"
64 upper_seats = 33
65 upper_title = "Senator"
66
67 legislature = Organization(name=legislature_name,
68 classification="legislature")
69 upper = Organization(upper_chamber_name, classification='upper',
70 parent_id=legislature._id)
71 lower = Organization(lower_chamber_name, classification='lower',
72 parent_id=legislature._id)
73
74 for n in range(1, upper_seats+1):
75 upper.add_post(
76 label=str(n), role=upper_title,
77 division_id='{}/sldu:{}'.format(self.division_id, n))
78 for n in range(1, lower_seats+1):
79 lower.add_post(
80 label=str(n), role=lower_title,
81 division_id='{}/sldl:{}'.format(self.division_id, n))
82
83 yield legislature
84 yield upper
85 yield lower
86
87 def get_session_list(self):
88 sessions = url_xpath('http://archives.legislature.state.oh.us',
89 '//form[@action="bill_search.cfm"]//input[@type="radio"'
90 ' and @name="SESSION"]/@value')
91 # Archive does not include current session
92 sessions.append('131')
93 return sessions
94
[end of openstates/oh/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/oh/__init__.py b/openstates/oh/__init__.py
--- a/openstates/oh/__init__.py
+++ b/openstates/oh/__init__.py
@@ -46,14 +46,7 @@
"end_date": "2017-12-31"
}
]
- ignored_scraped_sessions = [
- "127",
- "126",
- "125",
- "124",
- "123",
- "122"
- ]
+ ignored_scraped_sessions = []
def get_organizations(self):
legislature_name = "Ohio General Assembly"
@@ -85,9 +78,7 @@
yield lower
def get_session_list(self):
- sessions = url_xpath('http://archives.legislature.state.oh.us',
- '//form[@action="bill_search.cfm"]//input[@type="radio"'
- ' and @name="SESSION"]/@value')
+ sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation',
+ '//div[@class="selectedValues"]/ul/span/li/text()')
# Archive does not include current session
- sessions.append('131')
return sessions
| {"golden_diff": "diff --git a/openstates/oh/__init__.py b/openstates/oh/__init__.py\n--- a/openstates/oh/__init__.py\n+++ b/openstates/oh/__init__.py\n@@ -46,14 +46,7 @@\n \"end_date\": \"2017-12-31\"\n }\n ]\n- ignored_scraped_sessions = [\n- \"127\",\n- \"126\",\n- \"125\",\n- \"124\",\n- \"123\",\n- \"122\"\n- ]\n+ ignored_scraped_sessions = []\n \n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n@@ -85,9 +78,7 @@\n yield lower\n \n def get_session_list(self):\n- sessions = url_xpath('http://archives.legislature.state.oh.us',\n- '//form[@action=\"bill_search.cfm\"]//input[@type=\"radio\"'\n- ' and @name=\"SESSION\"]/@value')\n+ sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation',\n+ '//div[@class=\"selectedValues\"]/ul/span/li/text()')\n # Archive does not include current session\n- sessions.append('131')\n return sessions\n", "issue": "OH: `get_session_list` cannot \"see\" current session\nOhio's `get_session_list` appears to have the current session _manually appended_. Not to mention, it hasn't been updated in a year. This should be fixed, to automatically \"guess\" the current session instead.\r\n\r\nhttps://github.com/openstates/openstates/blob/master/openstates/oh/__init__.py#L91-L92\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .people import OHLegislatorScraper\n# from .events import OHEventScraper\nfrom .bills import OHBillScraper\n\n\nclass Ohio(Jurisdiction):\n division_id = \"ocd-division/country:us/state:oh\"\n classification = \"government\"\n name = \"Ohio\"\n url = \"http://www.legislature.state.oh.us/\"\n scrapers = {\n 'people': OHLegislatorScraper,\n # 'events': OHEventScraper,\n 'bills': OHBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"128\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"129\",\n \"identifier\": \"129\",\n \"name\": \"129th Legislature (2011-2012)\",\n \"start_date\": \"2011-01-03\"\n },\n {\n \"_scraped_name\": \"130\",\n \"identifier\": \"130\",\n \"name\": \"130th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"131\",\n \"identifier\": \"131\",\n \"name\": \"131st Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"132\",\n \"identifier\": \"132\",\n \"name\": \"132st Legislature (2017-2018)\",\n \"start_date\": \"2017-01-02\",\n \"end_date\": \"2017-12-31\"\n }\n ]\n ignored_scraped_sessions = [\n \"127\",\n \"126\",\n \"125\",\n \"124\",\n \"123\",\n \"122\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 99\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 33\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats+1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats+1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('http://archives.legislature.state.oh.us',\n '//form[@action=\"bill_search.cfm\"]//input[@type=\"radio\"'\n ' and @name=\"SESSION\"]/@value')\n # Archive does not include current session\n sessions.append('131')\n return sessions\n", "path": "openstates/oh/__init__.py"}]} | 1,600 | 294 |
gh_patches_debug_13150 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1631 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show domain name of the email address that was used to verify the account
Google Scholar shows on profiles instead of the entire email address only the domain name. Something like "Verified email address at radboudumc.nl". Would be a good feature for grand challenge as well, this would make it possible to check if users used an email address from their institution to verify their account. A similar text to what Google Scholar shows could be displayed when hovering over the check mark icon, for example.
</issue>
<code>
[start of app/grandchallenge/profiles/templatetags/profiles.py]
1 from typing import Union
2
3 from django import template
4 from django.contrib.auth import get_user_model
5 from django.contrib.auth.models import AbstractUser
6 from django.core.exceptions import ObjectDoesNotExist
7 from django.utils.html import format_html
8 from django.utils.safestring import mark_safe
9
10 from grandchallenge.subdomains.utils import reverse
11
12 register = template.Library()
13
14
15 @register.filter
16 def user_profile_link(user: Union[AbstractUser, None]) -> str:
17 verified = ""
18
19 if user:
20 username = user.username
21 profile_url = reverse(
22 "userena_profile_detail", kwargs={"username": user.username}
23 )
24 mugshot = format_html(
25 (
26 '<img class="mugshot" loading="lazy" src="{0}" '
27 'alt="User Mugshot" '
28 # Match the "fa-lg" class style
29 'style="height: 1.33em; vertical-align: -25%;"/>'
30 ),
31 user.user_profile.get_mugshot_url(),
32 )
33
34 try:
35 if user.verification.is_verified:
36 verified = mark_safe(
37 '<i class="fas fa-user-check text-success" '
38 'title="Verified User"></i>'
39 )
40 except ObjectDoesNotExist:
41 # No verification request
42 pass
43 else:
44 username = "Unknown"
45 profile_url = "#"
46 mugshot = mark_safe('<i class="fas fa-user fa-lg"></i>')
47
48 return format_html(
49 '<span class="text-nowrap"><a href="{0}">{1}</a> <a href="{0}">{2}</a> {3}</span>',
50 profile_url,
51 mugshot,
52 username,
53 verified,
54 )
55
56
57 @register.filter
58 def user_profile_link_username(username: str) -> str:
59 User = get_user_model() # noqa: N806
60 return user_profile_link(User.objects.get(username=username))
61
[end of app/grandchallenge/profiles/templatetags/profiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/profiles/templatetags/profiles.py b/app/grandchallenge/profiles/templatetags/profiles.py
--- a/app/grandchallenge/profiles/templatetags/profiles.py
+++ b/app/grandchallenge/profiles/templatetags/profiles.py
@@ -33,9 +33,15 @@
try:
if user.verification.is_verified:
- verified = mark_safe(
+ email = (
+ user.verification.email
+ if user.verification.email
+ else user.email
+ )
+ verified = format_html(
'<i class="fas fa-user-check text-success" '
- 'title="Verified User"></i>'
+ 'title="Verified email address at {}"></i>',
+ email.split("@")[1],
)
except ObjectDoesNotExist:
# No verification request
| {"golden_diff": "diff --git a/app/grandchallenge/profiles/templatetags/profiles.py b/app/grandchallenge/profiles/templatetags/profiles.py\n--- a/app/grandchallenge/profiles/templatetags/profiles.py\n+++ b/app/grandchallenge/profiles/templatetags/profiles.py\n@@ -33,9 +33,15 @@\n \n try:\n if user.verification.is_verified:\n- verified = mark_safe(\n+ email = (\n+ user.verification.email\n+ if user.verification.email\n+ else user.email\n+ )\n+ verified = format_html(\n '<i class=\"fas fa-user-check text-success\" '\n- 'title=\"Verified User\"></i>'\n+ 'title=\"Verified email address at {}\"></i>',\n+ email.split(\"@\")[1],\n )\n except ObjectDoesNotExist:\n # No verification request\n", "issue": "Show domain name of the email address that was used to verify the account\nGoogle Scholar shows on profiles instead of the entire email address only the domain name. Something like \"Verified email address at radboudumc.nl\". Would be a good feature for grand challenge as well, this would make it possible to check if users used an email address from their institution to verify their account. A similar text to what Google Scholar shows could be displayed when hovering over the check mark icon, for example.\n", "before_files": [{"content": "from typing import Union\n\nfrom django import template\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom grandchallenge.subdomains.utils import reverse\n\nregister = template.Library()\n\n\[email protected]\ndef user_profile_link(user: Union[AbstractUser, None]) -> str:\n verified = \"\"\n\n if user:\n username = user.username\n profile_url = reverse(\n \"userena_profile_detail\", kwargs={\"username\": user.username}\n )\n mugshot = format_html(\n (\n '<img class=\"mugshot\" loading=\"lazy\" src=\"{0}\" '\n 'alt=\"User Mugshot\" '\n # Match the \"fa-lg\" class style\n 'style=\"height: 1.33em; vertical-align: -25%;\"/>'\n ),\n user.user_profile.get_mugshot_url(),\n )\n\n try:\n if user.verification.is_verified:\n verified = mark_safe(\n '<i class=\"fas fa-user-check text-success\" '\n 'title=\"Verified User\"></i>'\n )\n except ObjectDoesNotExist:\n # No verification request\n pass\n else:\n username = \"Unknown\"\n profile_url = \"#\"\n mugshot = mark_safe('<i class=\"fas fa-user fa-lg\"></i>')\n\n return format_html(\n '<span class=\"text-nowrap\"><a href=\"{0}\">{1}</a> <a href=\"{0}\">{2}</a> {3}</span>',\n profile_url,\n mugshot,\n username,\n verified,\n )\n\n\[email protected]\ndef user_profile_link_username(username: str) -> str:\n User = get_user_model() # noqa: N806\n return user_profile_link(User.objects.get(username=username))\n", "path": "app/grandchallenge/profiles/templatetags/profiles.py"}]} | 1,174 | 194 |
gh_patches_debug_23466 | rasdani/github-patches | git_diff | StackStorm__st2-2400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in alias format regex parsing once defaults are involved.
Based on some debugging to reduce problem reported in community by @tonybaloney.
Note : an extra print statement added for debugging purposes locally help point out the errant regex.
```
In [42]: format = "acl {{a}} {{b}} {{c}}"
In [43]: format = "acl {{a}} {{b}} {{c}} {{d=1}}"
In [44]: param_stream = 'acl "x" "y" "z"'
In [45]: p = action_alias_utils.ActionAliasFormatParser(format, param_stream)
In [46]: p.get_extracted_param_value()
^\s*acl\s*["\']?(?P<a}}\s*["\']?(?P<b>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?(?:\s*["\']?(?P<c>(?:(?<=\').+?(?=\')|(?<=").+?(?>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?\s+)?\s*\s*$
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-46-b3174c03dc30> in <module>()
----> 1 p.get_extracted_param_value()
/mnt/st2repos/st2/st2common/st2common/models/utils/action_alias_utils.py in get_extracted_param_value(self)
80 # 2. Matched parameters
81 # 3. Extra parameters
---> 82 matched_stream = re.match(reg, self._param_stream, re.DOTALL)
83 if matched_stream:
84 values = matched_stream.groupdict()
/mnt/st2repos/st2/virtualenv/lib/python2.7/re.pyc in match(pattern, string, flags)
135 """Try to apply the pattern at the start of the string, returning
136 a match object, or None if no match was found."""
--> 137 return _compile(pattern, flags).match(string)
138
139 def search(pattern, string, flags=0):
/mnt/st2repos/st2/virtualenv/lib/python2.7/re.pyc in _compile(*key)
242 p = sre_compile.compile(pattern, flags)
243 except error, v:
--> 244 raise error, v # invalid expression
245 if not bypass_cache:
246 if len(_cache) >= _MAXCACHE:
error: bad character in group name 'a}}\\s*["\\\']?(?P<b'
In [47]: format = "acl {{a}} {{b}} {{c}}"
In [48]: p = action_alias_utils.ActionAliasFormatParser(format, param_stream)
In [49]: p.get_extracted_param_value()
^\s*acl\s*["\']?(?P<a>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']? \s*["\']?(?P<b>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']? \s*["\']?(?P<c>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?\s*$
Out[49]: {'a': 'x', 'b': 'y', 'c': 'z'}
```
</issue>
<code>
[start of st2common/st2common/models/utils/action_alias_utils.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import re
17 from st2common.exceptions import content
18
19 __all__ = [
20 'ActionAliasFormatParser'
21 ]
22
23
24 class ActionAliasFormatParser(object):
25
26 def __init__(self, alias_format=None, param_stream=None):
27 self._format = alias_format or ''
28 self._param_stream = param_stream or ''
29
30 def get_extracted_param_value(self):
31
32 result = {}
33
34 # As there's a lot of questions about using regular expressions,
35 # I'll try to be thorough when documenting this code.
36
37 # We're parsing the arbitrary key-value pairs at the end of the stream
38 # to support passing of parameters not specified in the format string,
39 # and cutting them from the stream as they're no longer needed.
40 # Possible values are quoted strings, a word, or anything inside "{}".
41 pairs_match = r'(?:^|\s+)(\S+)=("(.*?)"|\'(.*?)\'|({.*?})|(\S+))'
42 extra = re.match(r'.*?((' + pairs_match + r'\s*)*)$',
43 self._param_stream, re.DOTALL)
44 if extra:
45 kv_pairs = re.findall(pairs_match,
46 extra.group(1), re.DOTALL)
47 self._param_stream = self._param_stream.replace(extra.group(1), '')
48 self._param_stream = " %s " % self._param_stream
49
50 # Now we'll match parameters with default values in form of
51 # {{ value = parameter }} (and all possible permutations of spaces),
52 # compiling them into a list.
53 # "test {{ url = http://google.com }} {{ extra = Test }}" will become
54 # [ ["url", "http://google.com"], ["extra", "Test"] ]
55 params = re.findall(r'{{\s*(.+?)\s*(?:=\s*[\'"]?({.+?}|.+?)[\'"]?)?\s*}}',
56 self._format, re.DOTALL)
57
58 # Now we're transforming our format string into a regular expression,
59 # substituting {{ ... }} with regex named groups, so that param_stream
60 # matched against this expression yields a dict of params with values.
61 param_match = r'["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?'
62 reg = re.sub(r'(\s*){{\s*([^=]+?)\s*}}(?=\s+{{[^}]+?=)',
63 r'\s*' + param_match + r'\s+',
64 self._format)
65 reg = re.sub(r'(\s*){{\s*(\S+)\s*=\s*(?:{.+?}|.+?)\s*}}(\s*)',
66 r'(?:\s*' + param_match + r'\s+)?\s*',
67 reg)
68 reg = re.sub(r'(\s*){{\s*(.+?)\s*}}(\s*)',
69 r'\s*' + param_match + r'\3',
70 reg)
71 reg = '^\s*' + reg + r'\s*$'
72
73 # Now we're matching param_stream against our format string regex,
74 # getting a dict of values. We'll also get default values from
75 # "params" list if something is not present.
76 # Priority, from lowest to highest:
77 # 1. Default parameters
78 # 2. Matched parameters
79 # 3. Extra parameters
80 matched_stream = re.match(reg, self._param_stream, re.DOTALL)
81 if matched_stream:
82 values = matched_stream.groupdict()
83 for param in params:
84 matched_value = values[param[0]] if matched_stream else None
85 result[param[0]] = matched_value or param[1]
86 if extra:
87 for pair in kv_pairs:
88 result[pair[0]] = ''.join(pair[2:])
89
90 if self._format and not (self._param_stream.strip() or any(result.values())):
91 raise content.ParseException('No value supplied and no default value found.')
92
93 return result
94
[end of st2common/st2common/models/utils/action_alias_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2common/st2common/models/utils/action_alias_utils.py b/st2common/st2common/models/utils/action_alias_utils.py
--- a/st2common/st2common/models/utils/action_alias_utils.py
+++ b/st2common/st2common/models/utils/action_alias_utils.py
@@ -59,14 +59,14 @@
# substituting {{ ... }} with regex named groups, so that param_stream
# matched against this expression yields a dict of params with values.
param_match = r'["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?'
- reg = re.sub(r'(\s*){{\s*([^=]+?)\s*}}(?=\s+{{[^}]+?=)',
- r'\s*' + param_match + r'\s+',
+ reg = re.sub(r'(\s*){{\s*([^=}]+?)\s*}}(?![\'"]?\s+}})',
+ r'\1' + param_match,
self._format)
- reg = re.sub(r'(\s*){{\s*(\S+)\s*=\s*(?:{.+?}|.+?)\s*}}(\s*)',
- r'(?:\s*' + param_match + r'\s+)?\s*',
+ reg = re.sub(r'(\s*){{\s*(\S+)\s*=\s*(?:{.+?}|.+?)\s*}}',
+ r'(?:\1' + param_match + r')?',
reg)
- reg = re.sub(r'(\s*){{\s*(.+?)\s*}}(\s*)',
- r'\s*' + param_match + r'\3',
+ reg = re.sub(r'(\s*){{\s*(.+?)\s*}}',
+ r'\1' + param_match,
reg)
reg = '^\s*' + reg + r'\s*$'
| {"golden_diff": "diff --git a/st2common/st2common/models/utils/action_alias_utils.py b/st2common/st2common/models/utils/action_alias_utils.py\n--- a/st2common/st2common/models/utils/action_alias_utils.py\n+++ b/st2common/st2common/models/utils/action_alias_utils.py\n@@ -59,14 +59,14 @@\n # substituting {{ ... }} with regex named groups, so that param_stream\n # matched against this expression yields a dict of params with values.\n param_match = r'[\"\\']?(?P<\\2>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?'\n- reg = re.sub(r'(\\s*){{\\s*([^=]+?)\\s*}}(?=\\s+{{[^}]+?=)',\n- r'\\s*' + param_match + r'\\s+',\n+ reg = re.sub(r'(\\s*){{\\s*([^=}]+?)\\s*}}(?![\\'\"]?\\s+}})',\n+ r'\\1' + param_match,\n self._format)\n- reg = re.sub(r'(\\s*){{\\s*(\\S+)\\s*=\\s*(?:{.+?}|.+?)\\s*}}(\\s*)',\n- r'(?:\\s*' + param_match + r'\\s+)?\\s*',\n+ reg = re.sub(r'(\\s*){{\\s*(\\S+)\\s*=\\s*(?:{.+?}|.+?)\\s*}}',\n+ r'(?:\\1' + param_match + r')?',\n reg)\n- reg = re.sub(r'(\\s*){{\\s*(.+?)\\s*}}(\\s*)',\n- r'\\s*' + param_match + r'\\3',\n+ reg = re.sub(r'(\\s*){{\\s*(.+?)\\s*}}',\n+ r'\\1' + param_match,\n reg)\n reg = '^\\s*' + reg + r'\\s*$'\n", "issue": "Error in alias format regex parsing once defaults are involved.\nBased on some debugging to reduce problem reported in community by @tonybaloney.\n\nNote : an extra print statement added for debugging purposes locally help point out the errant regex.\n\n```\nIn [42]: format = \"acl {{a}} {{b}} {{c}}\"\n\nIn [43]: format = \"acl {{a}} {{b}} {{c}} {{d=1}}\"\n\nIn [44]: param_stream = 'acl \"x\" \"y\" \"z\"'\n\nIn [45]: p = action_alias_utils.ActionAliasFormatParser(format, param_stream)\n\nIn [46]: p.get_extracted_param_value()\n^\\s*acl\\s*[\"\\']?(?P<a}}\\s*[\"\\']?(?P<b>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?(?:\\s*[\"\\']?(?P<c>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?\\s+)?\\s*\\s*$\n---------------------------------------------------------------------------\nerror Traceback (most recent call last)\n<ipython-input-46-b3174c03dc30> in <module>()\n----> 1 p.get_extracted_param_value()\n\n/mnt/st2repos/st2/st2common/st2common/models/utils/action_alias_utils.py in get_extracted_param_value(self)\n 80 # 2. Matched parameters\n 81 # 3. Extra parameters\n---> 82 matched_stream = re.match(reg, self._param_stream, re.DOTALL)\n 83 if matched_stream:\n 84 values = matched_stream.groupdict()\n\n/mnt/st2repos/st2/virtualenv/lib/python2.7/re.pyc in match(pattern, string, flags)\n 135 \"\"\"Try to apply the pattern at the start of the string, returning\n 136 a match object, or None if no match was found.\"\"\"\n--> 137 return _compile(pattern, flags).match(string)\n 138\n 139 def search(pattern, string, flags=0):\n\n/mnt/st2repos/st2/virtualenv/lib/python2.7/re.pyc in _compile(*key)\n 242 p = sre_compile.compile(pattern, flags)\n 243 except error, v:\n--> 244 raise error, v # invalid expression\n 245 if not bypass_cache:\n 246 if len(_cache) >= _MAXCACHE:\n\nerror: bad character in group name 'a}}\\\\s*[\"\\\\\\']?(?P<b'\n\n\nIn [47]: format = \"acl {{a}} {{b}} {{c}}\"\n\nIn [48]: p = action_alias_utils.ActionAliasFormatParser(format, param_stream)\n\nIn [49]: p.get_extracted_param_value()\n^\\s*acl\\s*[\"\\']?(?P<a>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']? \\s*[\"\\']?(?P<b>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']? \\s*[\"\\']?(?P<c>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?\\s*$\nOut[49]: {'a': 'x', 'b': 'y', 'c': 'z'}\n```\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom st2common.exceptions import content\n\n__all__ = [\n 'ActionAliasFormatParser'\n]\n\n\nclass ActionAliasFormatParser(object):\n\n def __init__(self, alias_format=None, param_stream=None):\n self._format = alias_format or ''\n self._param_stream = param_stream or ''\n\n def get_extracted_param_value(self):\n\n result = {}\n\n # As there's a lot of questions about using regular expressions,\n # I'll try to be thorough when documenting this code.\n\n # We're parsing the arbitrary key-value pairs at the end of the stream\n # to support passing of parameters not specified in the format string,\n # and cutting them from the stream as they're no longer needed.\n # Possible values are quoted strings, a word, or anything inside \"{}\".\n pairs_match = r'(?:^|\\s+)(\\S+)=(\"(.*?)\"|\\'(.*?)\\'|({.*?})|(\\S+))'\n extra = re.match(r'.*?((' + pairs_match + r'\\s*)*)$',\n self._param_stream, re.DOTALL)\n if extra:\n kv_pairs = re.findall(pairs_match,\n extra.group(1), re.DOTALL)\n self._param_stream = self._param_stream.replace(extra.group(1), '')\n self._param_stream = \" %s \" % self._param_stream\n\n # Now we'll match parameters with default values in form of\n # {{ value = parameter }} (and all possible permutations of spaces),\n # compiling them into a list.\n # \"test {{ url = http://google.com }} {{ extra = Test }}\" will become\n # [ [\"url\", \"http://google.com\"], [\"extra\", \"Test\"] ]\n params = re.findall(r'{{\\s*(.+?)\\s*(?:=\\s*[\\'\"]?({.+?}|.+?)[\\'\"]?)?\\s*}}',\n self._format, re.DOTALL)\n\n # Now we're transforming our format string into a regular expression,\n # substituting {{ ... }} with regex named groups, so that param_stream\n # matched against this expression yields a dict of params with values.\n param_match = r'[\"\\']?(?P<\\2>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?'\n reg = re.sub(r'(\\s*){{\\s*([^=]+?)\\s*}}(?=\\s+{{[^}]+?=)',\n r'\\s*' + param_match + r'\\s+',\n self._format)\n reg = re.sub(r'(\\s*){{\\s*(\\S+)\\s*=\\s*(?:{.+?}|.+?)\\s*}}(\\s*)',\n r'(?:\\s*' + param_match + r'\\s+)?\\s*',\n reg)\n reg = re.sub(r'(\\s*){{\\s*(.+?)\\s*}}(\\s*)',\n r'\\s*' + param_match + r'\\3',\n reg)\n reg = '^\\s*' + reg + r'\\s*$'\n\n # Now we're matching param_stream against our format string regex,\n # getting a dict of values. We'll also get default values from\n # \"params\" list if something is not present.\n # Priority, from lowest to highest:\n # 1. Default parameters\n # 2. Matched parameters\n # 3. Extra parameters\n matched_stream = re.match(reg, self._param_stream, re.DOTALL)\n if matched_stream:\n values = matched_stream.groupdict()\n for param in params:\n matched_value = values[param[0]] if matched_stream else None\n result[param[0]] = matched_value or param[1]\n if extra:\n for pair in kv_pairs:\n result[pair[0]] = ''.join(pair[2:])\n\n if self._format and not (self._param_stream.strip() or any(result.values())):\n raise content.ParseException('No value supplied and no default value found.')\n\n return result\n", "path": "st2common/st2common/models/utils/action_alias_utils.py"}]} | 2,625 | 449 |
gh_patches_debug_7871 | rasdani/github-patches | git_diff | explosion__spaCy-1905 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dummy command argument wasn't removed from auto-link after download
Auto-linking of downloaded languages was broken in https://github.com/explosion/spaCy/commit/7f0ab145e95036a55af4802184a4b1c496557d0a. The dummy argument wasn't removed from the `link` call at https://github.com/explosion/spaCy/blob/master/spacy/cli/download.py#L44. I can make a PR for the fix unless it's easier for a maintainer to just fix this quickly.
## Your Environment
* Operating System: Docker python:3.6
* Python Version Used: 3.6.2
* spaCy Version Used: Changes made since 2.0.5
* Environment Information: Docker python:3.6
</issue>
<code>
[start of spacy/cli/download.py]
1 # coding: utf8
2 from __future__ import unicode_literals
3
4 import plac
5 import requests
6 import os
7 import subprocess
8 import sys
9
10 from .link import link
11 from ..util import prints, get_package_path
12 from .. import about
13
14
15 @plac.annotations(
16 model=("model to download, shortcut or name)", "positional", None, str),
17 direct=("force direct download. Needs model name with version and won't "
18 "perform compatibility check", "flag", "d", bool))
19 def download(model, direct=False):
20 """
21 Download compatible model from default download path using pip. Model
22 can be shortcut, model name or, if --direct flag is set, full model name
23 with version.
24 """
25 if direct:
26 dl = download_model('{m}/{m}.tar.gz'.format(m=model))
27 else:
28 shortcuts = get_json(about.__shortcuts__, "available shortcuts")
29 model_name = shortcuts.get(model, model)
30 compatibility = get_compatibility()
31 version = get_version(model_name, compatibility)
32 dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name,
33 v=version))
34 if dl != 0:
35 # if download subprocess doesn't return 0, exit with the respective
36 # exit code before doing anything else
37 sys.exit(dl)
38 try:
39 # Get package path here because link uses
40 # pip.get_installed_distributions() to check if model is a
41 # package, which fails if model was just installed via
42 # subprocess
43 package_path = get_package_path(model_name)
44 link(None, model_name, model, force=True,
45 model_path=package_path)
46 except:
47 # Dirty, but since spacy.download and the auto-linking is
48 # mostly a convenience wrapper, it's best to show a success
49 # message and loading instructions, even if linking fails.
50 prints(
51 "Creating a shortcut link for 'en' didn't work (maybe "
52 "you don't have admin permissions?), but you can still "
53 "load the model via its full package name:",
54 "nlp = spacy.load('%s')" % model_name,
55 title="Download successful but linking failed")
56
57
58 def get_json(url, desc):
59 r = requests.get(url)
60 if r.status_code != 200:
61 msg = ("Couldn't fetch %s. Please find a model for your spaCy "
62 "installation (v%s), and download it manually.")
63 prints(msg % (desc, about.__version__), about.__docs_models__,
64 title="Server error (%d)" % r.status_code, exits=1)
65 return r.json()
66
67
68 def get_compatibility():
69 version = about.__version__
70 version = version.rsplit('.dev', 1)[0]
71 comp_table = get_json(about.__compatibility__, "compatibility table")
72 comp = comp_table['spacy']
73 if version not in comp:
74 prints("No compatible models found for v%s of spaCy." % version,
75 title="Compatibility error", exits=1)
76 return comp[version]
77
78
79 def get_version(model, comp):
80 model = model.rsplit('.dev', 1)[0]
81 if model not in comp:
82 version = about.__version__
83 msg = "No compatible model found for '%s' (spaCy v%s)."
84 prints(msg % (model, version), title="Compatibility error", exits=1)
85 return comp[model][0]
86
87
88 def download_model(filename):
89 download_url = about.__download_url__ + '/' + filename
90 return subprocess.call(
91 [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps',
92 download_url], env=os.environ.copy())
93
[end of spacy/cli/download.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spacy/cli/download.py b/spacy/cli/download.py
--- a/spacy/cli/download.py
+++ b/spacy/cli/download.py
@@ -41,7 +41,7 @@
# package, which fails if model was just installed via
# subprocess
package_path = get_package_path(model_name)
- link(None, model_name, model, force=True,
+ link(model_name, model, force=True,
model_path=package_path)
except:
# Dirty, but since spacy.download and the auto-linking is
| {"golden_diff": "diff --git a/spacy/cli/download.py b/spacy/cli/download.py\n--- a/spacy/cli/download.py\n+++ b/spacy/cli/download.py\n@@ -41,7 +41,7 @@\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n- link(None, model_name, model, force=True,\n+ link(model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n", "issue": "Dummy command argument wasn't removed from auto-link after download\nAuto-linking of downloaded languages was broken in https://github.com/explosion/spaCy/commit/7f0ab145e95036a55af4802184a4b1c496557d0a. The dummy argument wasn't removed from the `link` call at https://github.com/explosion/spaCy/blob/master/spacy/cli/download.py#L44. I can make a PR for the fix unless it's easier for a maintainer to just fix this quickly.\r\n\r\n## Your Environment\r\n* Operating System: Docker python:3.6\r\n* Python Version Used: 3.6.2\r\n* spaCy Version Used: Changes made since 2.0.5\r\n* Environment Information: Docker python:3.6\r\n\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport plac\nimport requests\nimport os\nimport subprocess\nimport sys\n\nfrom .link import link\nfrom ..util import prints, get_package_path\nfrom .. import about\n\n\[email protected](\n model=(\"model to download, shortcut or name)\", \"positional\", None, str),\n direct=(\"force direct download. Needs model name with version and won't \"\n \"perform compatibility check\", \"flag\", \"d\", bool))\ndef download(model, direct=False):\n \"\"\"\n Download compatible model from default download path using pip. Model\n can be shortcut, model name or, if --direct flag is set, full model name\n with version.\n \"\"\"\n if direct:\n dl = download_model('{m}/{m}.tar.gz'.format(m=model))\n else:\n shortcuts = get_json(about.__shortcuts__, \"available shortcuts\")\n model_name = shortcuts.get(model, model)\n compatibility = get_compatibility()\n version = get_version(model_name, compatibility)\n dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name,\n v=version))\n if dl != 0:\n # if download subprocess doesn't return 0, exit with the respective\n # exit code before doing anything else\n sys.exit(dl)\n try:\n # Get package path here because link uses\n # pip.get_installed_distributions() to check if model is a\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n link(None, model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n # mostly a convenience wrapper, it's best to show a success\n # message and loading instructions, even if linking fails.\n prints(\n \"Creating a shortcut link for 'en' didn't work (maybe \"\n \"you don't have admin permissions?), but you can still \"\n \"load the model via its full package name:\",\n \"nlp = spacy.load('%s')\" % model_name,\n title=\"Download successful but linking failed\")\n\n\ndef get_json(url, desc):\n r = requests.get(url)\n if r.status_code != 200:\n msg = (\"Couldn't fetch %s. Please find a model for your spaCy \"\n \"installation (v%s), and download it manually.\")\n prints(msg % (desc, about.__version__), about.__docs_models__,\n title=\"Server error (%d)\" % r.status_code, exits=1)\n return r.json()\n\n\ndef get_compatibility():\n version = about.__version__\n version = version.rsplit('.dev', 1)[0]\n comp_table = get_json(about.__compatibility__, \"compatibility table\")\n comp = comp_table['spacy']\n if version not in comp:\n prints(\"No compatible models found for v%s of spaCy.\" % version,\n title=\"Compatibility error\", exits=1)\n return comp[version]\n\n\ndef get_version(model, comp):\n model = model.rsplit('.dev', 1)[0]\n if model not in comp:\n version = about.__version__\n msg = \"No compatible model found for '%s' (spaCy v%s).\"\n prints(msg % (model, version), title=\"Compatibility error\", exits=1)\n return comp[model][0]\n\n\ndef download_model(filename):\n download_url = about.__download_url__ + '/' + filename\n return subprocess.call(\n [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps',\n download_url], env=os.environ.copy())\n", "path": "spacy/cli/download.py"}]} | 1,700 | 121 |
gh_patches_debug_30536 | rasdani/github-patches | git_diff | mdn__kuma-7776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search results presents the locale in the wrong case
**Summary**
E.g. https://developer.mozilla.org/en-US/search?q=mdn+contribute+
See screenshot:
<img width="932" alt="Screen Shot 2021-02-04 at 10 59 44 AM" src="https://user-images.githubusercontent.com/26739/106919753-6cd80e80-66d8-11eb-97a1-d409dfc2e36b.png">
**Additional context**
The Yari site-search is coming. But this might be easy to fix.
</issue>
<code>
[start of kuma/search/views.py]
1 from urllib.parse import parse_qs, urlencode
2
3 from django.conf import settings
4 from django.shortcuts import render
5 from django.urls import reverse_lazy
6 from django.views.decorators.cache import never_cache
7 from django.views.decorators.http import require_GET
8 from django.views.generic import RedirectView
9 from ratelimit.decorators import ratelimit
10
11 from kuma.api.v1.search import search as search_api
12 from kuma.core.decorators import shared_cache_control
13
14
15 # Since the search endpoint accepts user input (via query parameters) and its
16 # response is compressed, use rate limiting to mitigate the BREACH attack
17 # (see http://breachattack.com/). It still needs to allow a user to click
18 # the filter switches (bug 1426968).
19 # Alternate: forbid gzip by setting Content-Encoding: identity
20 @never_cache
21 @require_GET
22 @ratelimit(key="user_or_ip", rate="25/m", block=True)
23 def search(request, *args, **kwargs):
24 """
25 The search view.
26
27 --2021-- THIS VIEW IS A HACK! --2021--
28 This Django view exists to server-side render the search results page.
29 But we're moving the search result page to Yari and that one will use a XHR
30 request (to /api/v1/search) from a skeleton page (aka. SPA).
31 But as a way to get to that, we need to transition from the old to the new.
32 So, this page uses the Django view in kuma.api.v1.search.search, which
33 returns a special `JsonResponse` instance whose data we can pluck out
34 to our needs for this old view.
35 Once we've fully moved to the Yari (static + XHR to v1 API) site-search,
36 we can comfortably delete this view.
37 """
38 # The underlying v1 API supports searching without a 'q' but the web
39 # UI doesn't. For example, the search input field requires a value.
40 # So we match that here too.
41 if not request.GET.get("q", "").strip():
42 status = 400
43 context = {"results": {}}
44 else:
45 # TODO consider, if the current locale is *not* en-US, that we force
46 # it to do a search in both locales.
47 # This might come in handy for people searching in a locale where
48 # there's very little results but they'd be happy to get the en-US ones.
49 response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)
50 results = response.data
51
52 error = None
53 status = response.status_code
54
55 # Determine if there were validation errors
56 if status == 400:
57 error = ""
58 for key, messages in results["errors"].items():
59 for message in messages:
60 error += f"{key}: {message['message']}\n"
61 else:
62 # Have to rearrange the 'results' in a way the old search expects it.
63 # ...which is as follows:
64 # - `count`: integer number of matched documents
65 # - `previous`: a URL or empty string
66 # - `next`: a URL or empty string
67 # - `query`: string
68 # - `start`: pagination number
69 # - `end`: pagination number
70 # - `documents`:
71 # - `title`
72 # - `locale`
73 # - `slug`
74 # - `excerpt`: string of safe HTML
75 next_url = ""
76 previous_url = ""
77 page = results["metadata"]["page"]
78 size = results["metadata"]["size"]
79 count = results["metadata"]["total"]["value"]
80 query_string = request.META.get("QUERY_STRING")
81 query_string_parsed = parse_qs(query_string)
82 if (page + 1) * size < count:
83 query_string_parsed["page"] = f"{page + 1}"
84 next_url = f"?{urlencode(query_string_parsed, True)}"
85 if page > 1:
86 if page == 2:
87 del query_string_parsed["page"]
88 else:
89 query_string_parsed["page"] = f"{page - 1}"
90 previous_url = f"?{urlencode(query_string_parsed, True)}"
91
92 results = {
93 "count": count,
94 "next": next_url,
95 "previous": previous_url,
96 "query": request.GET.get("q"),
97 "start": (page - 1) * size + 1,
98 "end": page * size,
99 "documents": [
100 {
101 "title": x["title"],
102 "slug": x["slug"],
103 "locale": x["locale"],
104 "summary": x["summary"],
105 "excerpt": "<br>".join(x["highlight"].get("body", [])),
106 }
107 for x in results["documents"]
108 ],
109 }
110
111 context = {"results": {"results": None if error else results, "error": error}}
112 return render(request, "search/react.html", context, status=status)
113
114
115 class SearchRedirectView(RedirectView):
116 permanent = True
117
118 def get_redirect_url(self, *args, **kwargs):
119 query_string = self.request.META.get("QUERY_STRING")
120 url = reverse_lazy("api.v1.search")
121 qs = parse_qs(query_string)
122 # If you used `/en-Us/search.json` you can skip the `?locale=`
123 # because the default locale in `/api/v1/search` is `en-US`.
124 if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():
125 qs["locale"] = self.request.LANGUAGE_CODE
126 if qs:
127 url += "?" + urlencode(qs, True)
128 return url
129
130
131 @shared_cache_control(s_maxage=60 * 60 * 24 * 7)
132 def plugin(request):
133 """Render an OpenSearch Plugin."""
134 return render(
135 request,
136 "search/plugin.html",
137 {"locale": request.LANGUAGE_CODE},
138 content_type="application/opensearchdescription+xml",
139 )
140
[end of kuma/search/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/search/views.py b/kuma/search/views.py
--- a/kuma/search/views.py
+++ b/kuma/search/views.py
@@ -89,6 +89,21 @@
query_string_parsed["page"] = f"{page - 1}"
previous_url = f"?{urlencode(query_string_parsed, True)}"
+ def package_document(document):
+ # The `results['documents']` will have the `locale` in lowercase.
+ # That's good for searching but now what we want to display.
+ # Here in Kuma we can't use the `mdn_url` so to get that right
+ # we have to manually correct that.
+ locale, slug = document["mdn_url"][1:].split("/docs/")
+ data = {
+ "title": document["title"],
+ "slug": slug,
+ "locale": locale,
+ "summary": document["summary"],
+ "excerpt": "<br>".join(document["highlight"].get("body", [])),
+ }
+ return data
+
results = {
"count": count,
"next": next_url,
@@ -96,16 +111,7 @@
"query": request.GET.get("q"),
"start": (page - 1) * size + 1,
"end": page * size,
- "documents": [
- {
- "title": x["title"],
- "slug": x["slug"],
- "locale": x["locale"],
- "summary": x["summary"],
- "excerpt": "<br>".join(x["highlight"].get("body", [])),
- }
- for x in results["documents"]
- ],
+ "documents": [package_document(x) for x in results["documents"]],
}
context = {"results": {"results": None if error else results, "error": error}}
| {"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -89,6 +89,21 @@\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n \n+ def package_document(document):\n+ # The `results['documents']` will have the `locale` in lowercase.\n+ # That's good for searching but now what we want to display.\n+ # Here in Kuma we can't use the `mdn_url` so to get that right\n+ # we have to manually correct that.\n+ locale, slug = document[\"mdn_url\"][1:].split(\"/docs/\")\n+ data = {\n+ \"title\": document[\"title\"],\n+ \"slug\": slug,\n+ \"locale\": locale,\n+ \"summary\": document[\"summary\"],\n+ \"excerpt\": \"<br>\".join(document[\"highlight\"].get(\"body\", [])),\n+ }\n+ return data\n+\n results = {\n \"count\": count,\n \"next\": next_url,\n@@ -96,16 +111,7 @@\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n- \"documents\": [\n- {\n- \"title\": x[\"title\"],\n- \"slug\": x[\"slug\"],\n- \"locale\": x[\"locale\"],\n- \"summary\": x[\"summary\"],\n- \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n- }\n- for x in results[\"documents\"]\n- ],\n+ \"documents\": [package_document(x) for x in results[\"documents\"]],\n }\n \n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n", "issue": "Search results presents the locale in the wrong case\n**Summary**\r\nE.g. https://developer.mozilla.org/en-US/search?q=mdn+contribute+\r\nSee screenshot:\r\n<img width=\"932\" alt=\"Screen Shot 2021-02-04 at 10 59 44 AM\" src=\"https://user-images.githubusercontent.com/26739/106919753-6cd80e80-66d8-11eb-97a1-d409dfc2e36b.png\">\r\n\r\n\r\n**Additional context**\r\n\r\nThe Yari site-search is coming. But this might be easy to fix. \r\n\n", "before_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [\n {\n \"title\": x[\"title\"],\n \"slug\": x[\"slug\"],\n \"locale\": x[\"locale\"],\n \"summary\": x[\"summary\"],\n \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n }\n for x in results[\"documents\"]\n ],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}]} | 2,304 | 417 |
gh_patches_debug_35390 | rasdani/github-patches | git_diff | kymatio__kymatio-344 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unused `fftshift` option
In the `gabor_2d` function, we have an [`fftshift` argument](https://github.com/kymatio/kymatio/blob/02f31a7824011ef7c7c1145342473d32a1133b51/kymatio/scattering2d/filter_bank.py#L196) that is never used. Is there a reason for keeping this?
</issue>
<code>
[start of kymatio/scattering2d/filter_bank.py]
1 """
2 Authors: Eugene Belilovsky, Edouard Oyallon and Sergey Zagoruyko
3 All rights reserved, 2017.
4 """
5
6 __all__ = ['filter_bank']
7
8 import torch
9 import numpy as np
10 from .utils import fft2
11
12
13 def filter_bank(M, N, J, L=8):
14 """
15 Builds in Fourier the Morlet filters used for the scattering transform.
16 Each single filter is provided as a dictionary with the following keys:
17 * 'j' : scale
18 * 'theta' : angle used
19 Parameters
20 ----------
21 M, N : int
22 spatial support of the input
23 J : int
24 logscale of the scattering
25 L : int, optional
26 number of angles used for the wavelet transform
27 Returns
28 -------
29 filters : list
30 A two list of dictionary containing respectively the low-pass and
31 wavelet filters.
32 Notes
33 -----
34 The design of the filters is optimized for the value L = 8.
35 """
36 filters = {}
37 filters['psi'] = []
38
39 offset_unpad = 0
40 for j in range(J):
41 for theta in range(L):
42 psi = {}
43 psi['j'] = j
44 psi['theta'] = theta
45 psi_signal = morlet_2d(M, N, 0.8 * 2**j,
46 (int(L-L/2-1)-theta) * np.pi / L,
47 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)
48 psi_signal_fourier = fft2(psi_signal)
49 for res in range(min(j + 1, J - 1)):
50 psi_signal_fourier_res = periodize_filter_fft(
51 psi_signal_fourier, res)
52 psi[res] = torch.FloatTensor(
53 np.stack((np.real(psi_signal_fourier_res),
54 np.imag(psi_signal_fourier_res)), axis=2))
55 # Normalization to avoid doing it with the FFT.
56 psi[res].div_(M*N// 2**(2*j))
57 filters['psi'].append(psi)
58
59 filters['phi'] = {}
60 phi_signal = gabor_2d(M, N, 0.8 * 2**(J-1), 0, 0, offset=offset_unpad)
61 phi_signal_fourier = fft2(phi_signal)
62 filters['phi']['j'] = J
63 for res in range(J):
64 phi_signal_fourier_res = periodize_filter_fft(phi_signal_fourier, res)
65 filters['phi'][res] = torch.FloatTensor(np.stack(
66 (np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)),
67 axis=2))
68 filters['phi'][res].div_(M*N // 2 ** (2 * J))
69
70 return filters
71
72
73 def periodize_filter_fft(x, res):
74 """
75 Parameters
76 ----------
77 x : numpy array
78 signal to periodize in Fourier
79 res :
80 resolution to which the signal is cropped.
81
82 Returns
83 -------
84 crop : numpy array
85 It returns a crop version of the filter, assuming that
86 the convolutions will be done via compactly supported signals.
87 """
88 M = x.shape[0]
89 N = x.shape[1]
90
91 crop = np.zeros((M // 2 ** res, N // 2 ** res), np.complex64)
92
93 mask = np.ones(x.shape, np.float32)
94 len_x = int(M * (1 - 2 ** (-res)))
95 start_x = int(M * 2 ** (-res - 1))
96 len_y = int(N * (1 - 2 ** (-res)))
97 start_y = int(N * 2 ** (-res - 1))
98 mask[start_x:start_x + len_x,:] = 0
99 mask[:, start_y:start_y + len_y] = 0
100 x = np.multiply(x,mask)
101
102 for k in range(int(M / 2 ** res)):
103 for l in range(int(N / 2 ** res)):
104 for i in range(int(2 ** res)):
105 for j in range(int(2 ** res)):
106 crop[k, l] += x[k + i * int(M / 2 ** res), l + j * int(N / 2 ** res)]
107
108 return crop
109
110
111 def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):
112 """
113 Computes a 2D Morlet filter.
114 A Morlet filter is the sum of a Gabor filter and a low-pass filter
115 to ensure that the sum has exactly zero mean in the temporal domain.
116 It is defined by the following formula in space:
117 psi(u) = g_{sigma}(u) (e^(i xi^T u) - beta)
118 where g_{sigma} is a Gaussian envelope, xi is a frequency and beta is
119 the cancelling parameter.
120
121 Parameters
122 ----------
123 M, N : int
124 spatial sizes
125 sigma : float
126 bandwidth parameter
127 xi : float
128 central frequency (in [0, 1])
129 theta : float
130 angle in [0, pi]
131 slant : float, optional
132 parameter which guides the elipsoidal shape of the morlet
133 offset : int, optional
134 offset by which the signal starts
135 fft_shift : boolean
136 if true, shift the signal in a numpy style
137
138 Returns
139 -------
140 morlet_fft : ndarray
141 numpy array of size (M, N)
142 """
143 wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)
144 wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)
145 K = np.sum(wv) / np.sum(wv_modulus)
146
147 mor = wv - K * wv_modulus
148 return mor
149
150
151 def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):
152 """
153 Computes a 2D Gabor filter.
154 A Gabor filter is defined by the following formula in space:
155 psi(u) = g_{sigma}(u) e^(i xi^T u)
156 where g_{sigma} is a Gaussian envelope and xi is a frequency.
157
158 Parameters
159 ----------
160 M, N : int
161 spatial sizes
162 sigma : float
163 bandwidth parameter
164 xi : float
165 central frequency (in [0, 1])
166 theta : float
167 angle in [0, pi]
168 slant : float, optional
169 parameter which guides the elipsoidal shape of the morlet
170 offset : int, optional
171 offset by which the signal starts
172 fft_shift : boolean
173 if true, shift the signal in a numpy style
174
175 Returns
176 -------
177 morlet_fft : ndarray
178 numpy array of size (M, N)
179 """
180 gab = np.zeros((M, N), np.complex64)
181 R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)
182 R_inv = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], np.float32)
183 D = np.array([[1, 0], [0, slant * slant]])
184 curv = np.dot(R, np.dot(D, R_inv)) / ( 2 * sigma * sigma)
185
186 for ex in [-2, -1, 0, 1, 2]:
187 for ey in [-2, -1, 0, 1, 2]:
188 [xx, yy] = np.mgrid[offset + ex * M:offset + M + ex * M, offset + ey * N:offset + N + ey * N]
189 arg = -(curv[0, 0] * np.multiply(xx, xx) + (curv[0, 1] + curv[1, 0]) * np.multiply(xx, yy) + curv[
190 1, 1] * np.multiply(yy, yy)) + 1.j * (xx * xi * np.cos(theta) + yy * xi * np.sin(theta))
191 gab = gab + np.exp(arg)
192
193 norm_factor = (2 * 3.1415 * sigma * sigma / slant)
194 gab = gab / norm_factor
195
196 if (fft_shift):
197 gab = np.fft.fftshift(gab, axes=(0, 1))
198 return gab
199
[end of kymatio/scattering2d/filter_bank.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kymatio/scattering2d/filter_bank.py b/kymatio/scattering2d/filter_bank.py
--- a/kymatio/scattering2d/filter_bank.py
+++ b/kymatio/scattering2d/filter_bank.py
@@ -108,7 +108,7 @@
return crop
-def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):
+def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0):
"""
Computes a 2D Morlet filter.
A Morlet filter is the sum of a Gabor filter and a low-pass filter
@@ -132,23 +132,21 @@
parameter which guides the elipsoidal shape of the morlet
offset : int, optional
offset by which the signal starts
- fft_shift : boolean
- if true, shift the signal in a numpy style
Returns
-------
morlet_fft : ndarray
numpy array of size (M, N)
"""
- wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)
- wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)
+ wv = gabor_2d(M, N, sigma, theta, xi, slant, offset)
+ wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset)
K = np.sum(wv) / np.sum(wv_modulus)
mor = wv - K * wv_modulus
return mor
-def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):
+def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0):
"""
Computes a 2D Gabor filter.
A Gabor filter is defined by the following formula in space:
@@ -169,8 +167,6 @@
parameter which guides the elipsoidal shape of the morlet
offset : int, optional
offset by which the signal starts
- fft_shift : boolean
- if true, shift the signal in a numpy style
Returns
-------
@@ -192,7 +188,4 @@
norm_factor = (2 * 3.1415 * sigma * sigma / slant)
gab = gab / norm_factor
-
- if (fft_shift):
- gab = np.fft.fftshift(gab, axes=(0, 1))
return gab
| {"golden_diff": "diff --git a/kymatio/scattering2d/filter_bank.py b/kymatio/scattering2d/filter_bank.py\n--- a/kymatio/scattering2d/filter_bank.py\n+++ b/kymatio/scattering2d/filter_bank.py\n@@ -108,7 +108,7 @@\n return crop\n \n \n-def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):\n+def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0):\n \"\"\"\n Computes a 2D Morlet filter.\n A Morlet filter is the sum of a Gabor filter and a low-pass filter\n@@ -132,23 +132,21 @@\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n- fft_shift : boolean\n- if true, shift the signal in a numpy style\n \n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n- wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)\n- wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)\n+ wv = gabor_2d(M, N, sigma, theta, xi, slant, offset)\n+ wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset)\n K = np.sum(wv) / np.sum(wv_modulus)\n \n mor = wv - K * wv_modulus\n return mor\n \n \n-def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):\n+def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0):\n \"\"\"\n Computes a 2D Gabor filter.\n A Gabor filter is defined by the following formula in space:\n@@ -169,8 +167,6 @@\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n- fft_shift : boolean\n- if true, shift the signal in a numpy style\n \n Returns\n -------\n@@ -192,7 +188,4 @@\n \n norm_factor = (2 * 3.1415 * sigma * sigma / slant)\n gab = gab / norm_factor\n-\n- if (fft_shift):\n- gab = np.fft.fftshift(gab, axes=(0, 1))\n return gab\n", "issue": "Unused `fftshift` option\nIn the `gabor_2d` function, we have an [`fftshift` argument](https://github.com/kymatio/kymatio/blob/02f31a7824011ef7c7c1145342473d32a1133b51/kymatio/scattering2d/filter_bank.py#L196) that is never used. Is there a reason for keeping this?\n", "before_files": [{"content": "\"\"\"\nAuthors: Eugene Belilovsky, Edouard Oyallon and Sergey Zagoruyko\nAll rights reserved, 2017.\n\"\"\"\n\n__all__ = ['filter_bank']\n\nimport torch\nimport numpy as np\nfrom .utils import fft2\n\n\ndef filter_bank(M, N, J, L=8):\n \"\"\"\n Builds in Fourier the Morlet filters used for the scattering transform.\n Each single filter is provided as a dictionary with the following keys:\n * 'j' : scale\n * 'theta' : angle used\n Parameters\n ----------\n M, N : int\n spatial support of the input\n J : int\n logscale of the scattering\n L : int, optional\n number of angles used for the wavelet transform\n Returns\n -------\n filters : list\n A two list of dictionary containing respectively the low-pass and\n wavelet filters.\n Notes\n -----\n The design of the filters is optimized for the value L = 8.\n \"\"\"\n filters = {}\n filters['psi'] = []\n\n offset_unpad = 0\n for j in range(J):\n for theta in range(L):\n psi = {}\n psi['j'] = j\n psi['theta'] = theta\n psi_signal = morlet_2d(M, N, 0.8 * 2**j,\n (int(L-L/2-1)-theta) * np.pi / L,\n 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)\n psi_signal_fourier = fft2(psi_signal)\n for res in range(min(j + 1, J - 1)):\n psi_signal_fourier_res = periodize_filter_fft(\n psi_signal_fourier, res)\n psi[res] = torch.FloatTensor(\n np.stack((np.real(psi_signal_fourier_res),\n np.imag(psi_signal_fourier_res)), axis=2))\n # Normalization to avoid doing it with the FFT.\n psi[res].div_(M*N// 2**(2*j))\n filters['psi'].append(psi)\n\n filters['phi'] = {}\n phi_signal = gabor_2d(M, N, 0.8 * 2**(J-1), 0, 0, offset=offset_unpad)\n phi_signal_fourier = fft2(phi_signal)\n filters['phi']['j'] = J\n for res in range(J):\n phi_signal_fourier_res = periodize_filter_fft(phi_signal_fourier, res)\n filters['phi'][res] = torch.FloatTensor(np.stack(\n (np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)),\n axis=2))\n filters['phi'][res].div_(M*N // 2 ** (2 * J))\n\n return filters\n\n\ndef periodize_filter_fft(x, res):\n \"\"\"\n Parameters\n ----------\n x : numpy array\n signal to periodize in Fourier\n res :\n resolution to which the signal is cropped.\n\n Returns\n -------\n crop : numpy array\n It returns a crop version of the filter, assuming that\n the convolutions will be done via compactly supported signals.\n \"\"\"\n M = x.shape[0]\n N = x.shape[1]\n\n crop = np.zeros((M // 2 ** res, N // 2 ** res), np.complex64)\n\n mask = np.ones(x.shape, np.float32)\n len_x = int(M * (1 - 2 ** (-res)))\n start_x = int(M * 2 ** (-res - 1))\n len_y = int(N * (1 - 2 ** (-res)))\n start_y = int(N * 2 ** (-res - 1))\n mask[start_x:start_x + len_x,:] = 0\n mask[:, start_y:start_y + len_y] = 0\n x = np.multiply(x,mask)\n\n for k in range(int(M / 2 ** res)):\n for l in range(int(N / 2 ** res)):\n for i in range(int(2 ** res)):\n for j in range(int(2 ** res)):\n crop[k, l] += x[k + i * int(M / 2 ** res), l + j * int(N / 2 ** res)]\n\n return crop\n\n\ndef morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):\n \"\"\"\n Computes a 2D Morlet filter.\n A Morlet filter is the sum of a Gabor filter and a low-pass filter\n to ensure that the sum has exactly zero mean in the temporal domain.\n It is defined by the following formula in space:\n psi(u) = g_{sigma}(u) (e^(i xi^T u) - beta)\n where g_{sigma} is a Gaussian envelope, xi is a frequency and beta is\n the cancelling parameter.\n\n Parameters\n ----------\n M, N : int\n spatial sizes\n sigma : float\n bandwidth parameter\n xi : float\n central frequency (in [0, 1])\n theta : float\n angle in [0, pi]\n slant : float, optional\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n fft_shift : boolean\n if true, shift the signal in a numpy style\n\n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)\n wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)\n K = np.sum(wv) / np.sum(wv_modulus)\n\n mor = wv - K * wv_modulus\n return mor\n\n\ndef gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):\n \"\"\"\n Computes a 2D Gabor filter.\n A Gabor filter is defined by the following formula in space:\n psi(u) = g_{sigma}(u) e^(i xi^T u)\n where g_{sigma} is a Gaussian envelope and xi is a frequency.\n\n Parameters\n ----------\n M, N : int\n spatial sizes\n sigma : float\n bandwidth parameter\n xi : float\n central frequency (in [0, 1])\n theta : float\n angle in [0, pi]\n slant : float, optional\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n fft_shift : boolean\n if true, shift the signal in a numpy style\n\n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n gab = np.zeros((M, N), np.complex64)\n R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)\n R_inv = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], np.float32)\n D = np.array([[1, 0], [0, slant * slant]])\n curv = np.dot(R, np.dot(D, R_inv)) / ( 2 * sigma * sigma)\n\n for ex in [-2, -1, 0, 1, 2]:\n for ey in [-2, -1, 0, 1, 2]:\n [xx, yy] = np.mgrid[offset + ex * M:offset + M + ex * M, offset + ey * N:offset + N + ey * N]\n arg = -(curv[0, 0] * np.multiply(xx, xx) + (curv[0, 1] + curv[1, 0]) * np.multiply(xx, yy) + curv[\n 1, 1] * np.multiply(yy, yy)) + 1.j * (xx * xi * np.cos(theta) + yy * xi * np.sin(theta))\n gab = gab + np.exp(arg)\n\n norm_factor = (2 * 3.1415 * sigma * sigma / slant)\n gab = gab / norm_factor\n\n if (fft_shift):\n gab = np.fft.fftshift(gab, axes=(0, 1))\n return gab\n", "path": "kymatio/scattering2d/filter_bank.py"}]} | 3,028 | 625 |
gh_patches_debug_13436 | rasdani/github-patches | git_diff | pyca__cryptography-3584 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate Whirlpool and RIPEMD
</issue>
<code>
[start of src/cryptography/hazmat/primitives/hashes.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8
9 import six
10
11 from cryptography import utils
12 from cryptography.exceptions import (
13 AlreadyFinalized, UnsupportedAlgorithm, _Reasons
14 )
15 from cryptography.hazmat.backends.interfaces import HashBackend
16
17
18 @six.add_metaclass(abc.ABCMeta)
19 class HashAlgorithm(object):
20 @abc.abstractproperty
21 def name(self):
22 """
23 A string naming this algorithm (e.g. "sha256", "md5").
24 """
25
26 @abc.abstractproperty
27 def digest_size(self):
28 """
29 The size of the resulting digest in bytes.
30 """
31
32 @abc.abstractproperty
33 def block_size(self):
34 """
35 The internal block size of the hash algorithm in bytes.
36 """
37
38
39 @six.add_metaclass(abc.ABCMeta)
40 class HashContext(object):
41 @abc.abstractproperty
42 def algorithm(self):
43 """
44 A HashAlgorithm that will be used by this context.
45 """
46
47 @abc.abstractmethod
48 def update(self, data):
49 """
50 Processes the provided bytes through the hash.
51 """
52
53 @abc.abstractmethod
54 def finalize(self):
55 """
56 Finalizes the hash context and returns the hash digest as bytes.
57 """
58
59 @abc.abstractmethod
60 def copy(self):
61 """
62 Return a HashContext that is a copy of the current context.
63 """
64
65
66 @utils.register_interface(HashContext)
67 class Hash(object):
68 def __init__(self, algorithm, backend, ctx=None):
69 if not isinstance(backend, HashBackend):
70 raise UnsupportedAlgorithm(
71 "Backend object does not implement HashBackend.",
72 _Reasons.BACKEND_MISSING_INTERFACE
73 )
74
75 if not isinstance(algorithm, HashAlgorithm):
76 raise TypeError("Expected instance of hashes.HashAlgorithm.")
77 self._algorithm = algorithm
78
79 self._backend = backend
80
81 if ctx is None:
82 self._ctx = self._backend.create_hash_ctx(self.algorithm)
83 else:
84 self._ctx = ctx
85
86 algorithm = utils.read_only_property("_algorithm")
87
88 def update(self, data):
89 if self._ctx is None:
90 raise AlreadyFinalized("Context was already finalized.")
91 if not isinstance(data, bytes):
92 raise TypeError("data must be bytes.")
93 self._ctx.update(data)
94
95 def copy(self):
96 if self._ctx is None:
97 raise AlreadyFinalized("Context was already finalized.")
98 return Hash(
99 self.algorithm, backend=self._backend, ctx=self._ctx.copy()
100 )
101
102 def finalize(self):
103 if self._ctx is None:
104 raise AlreadyFinalized("Context was already finalized.")
105 digest = self._ctx.finalize()
106 self._ctx = None
107 return digest
108
109
110 @utils.register_interface(HashAlgorithm)
111 class SHA1(object):
112 name = "sha1"
113 digest_size = 20
114 block_size = 64
115
116
117 @utils.register_interface(HashAlgorithm)
118 class SHA224(object):
119 name = "sha224"
120 digest_size = 28
121 block_size = 64
122
123
124 @utils.register_interface(HashAlgorithm)
125 class SHA256(object):
126 name = "sha256"
127 digest_size = 32
128 block_size = 64
129
130
131 @utils.register_interface(HashAlgorithm)
132 class SHA384(object):
133 name = "sha384"
134 digest_size = 48
135 block_size = 128
136
137
138 @utils.register_interface(HashAlgorithm)
139 class SHA512(object):
140 name = "sha512"
141 digest_size = 64
142 block_size = 128
143
144
145 @utils.register_interface(HashAlgorithm)
146 class RIPEMD160(object):
147 name = "ripemd160"
148 digest_size = 20
149 block_size = 64
150
151
152 @utils.register_interface(HashAlgorithm)
153 class Whirlpool(object):
154 name = "whirlpool"
155 digest_size = 64
156 block_size = 64
157
158
159 @utils.register_interface(HashAlgorithm)
160 class MD5(object):
161 name = "md5"
162 digest_size = 16
163 block_size = 64
164
165
166 @utils.register_interface(HashAlgorithm)
167 class BLAKE2b(object):
168 name = "blake2b"
169 _max_digest_size = 64
170 _min_digest_size = 1
171 block_size = 128
172
173 def __init__(self, digest_size):
174 if (
175 digest_size > self._max_digest_size or
176 digest_size < self._min_digest_size
177 ):
178 raise ValueError("Digest size must be {0}-{1}".format(
179 self._min_digest_size, self._max_digest_size)
180 )
181
182 self._digest_size = digest_size
183
184 digest_size = utils.read_only_property("_digest_size")
185
186
187 @utils.register_interface(HashAlgorithm)
188 class BLAKE2s(object):
189 name = "blake2s"
190 block_size = 64
191 _max_digest_size = 32
192 _min_digest_size = 1
193
194 def __init__(self, digest_size):
195 if (
196 digest_size > self._max_digest_size or
197 digest_size < self._min_digest_size
198 ):
199 raise ValueError("Digest size must be {0}-{1}".format(
200 self._min_digest_size, self._max_digest_size)
201 )
202
203 self._digest_size = digest_size
204
205 digest_size = utils.read_only_property("_digest_size")
206
[end of src/cryptography/hazmat/primitives/hashes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/hashes.py b/src/cryptography/hazmat/primitives/hashes.py
--- a/src/cryptography/hazmat/primitives/hashes.py
+++ b/src/cryptography/hazmat/primitives/hashes.py
@@ -149,6 +149,14 @@
block_size = 64
+RIPEMD160 = utils.deprecated(
+ RIPEMD160,
+ __name__,
+ "The RIPEMD160 hash was deprecated in version 1.9.",
+ utils.DeprecatedIn19
+)
+
+
@utils.register_interface(HashAlgorithm)
class Whirlpool(object):
name = "whirlpool"
@@ -156,6 +164,14 @@
block_size = 64
+Whirlpool = utils.deprecated(
+ Whirlpool,
+ __name__,
+ "The Whirlpool hash was deprecated in version 1.9.",
+ utils.DeprecatedIn19
+)
+
+
@utils.register_interface(HashAlgorithm)
class MD5(object):
name = "md5"
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/hashes.py b/src/cryptography/hazmat/primitives/hashes.py\n--- a/src/cryptography/hazmat/primitives/hashes.py\n+++ b/src/cryptography/hazmat/primitives/hashes.py\n@@ -149,6 +149,14 @@\n block_size = 64\n \n \n+RIPEMD160 = utils.deprecated(\n+ RIPEMD160,\n+ __name__,\n+ \"The RIPEMD160 hash was deprecated in version 1.9.\",\n+ utils.DeprecatedIn19\n+)\n+\n+\n @utils.register_interface(HashAlgorithm)\n class Whirlpool(object):\n name = \"whirlpool\"\n@@ -156,6 +164,14 @@\n block_size = 64\n \n \n+Whirlpool = utils.deprecated(\n+ Whirlpool,\n+ __name__,\n+ \"The Whirlpool hash was deprecated in version 1.9.\",\n+ utils.DeprecatedIn19\n+)\n+\n+\n @utils.register_interface(HashAlgorithm)\n class MD5(object):\n name = \"md5\"\n", "issue": "Deprecate Whirlpool and RIPEMD\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HashBackend\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HashAlgorithm(object):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this algorithm (e.g. \"sha256\", \"md5\").\n \"\"\"\n\n @abc.abstractproperty\n def digest_size(self):\n \"\"\"\n The size of the resulting digest in bytes.\n \"\"\"\n\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The internal block size of the hash algorithm in bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HashContext(object):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n A HashAlgorithm that will be used by this context.\n \"\"\"\n\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the hash.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalizes the hash context and returns the hash digest as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def copy(self):\n \"\"\"\n Return a HashContext that is a copy of the current context.\n \"\"\"\n\n\[email protected]_interface(HashContext)\nclass Hash(object):\n def __init__(self, algorithm, backend, ctx=None):\n if not isinstance(backend, HashBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HashBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n if not isinstance(algorithm, HashAlgorithm):\n raise TypeError(\"Expected instance of hashes.HashAlgorithm.\")\n self._algorithm = algorithm\n\n self._backend = backend\n\n if ctx is None:\n self._ctx = self._backend.create_hash_ctx(self.algorithm)\n else:\n self._ctx = ctx\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n self._ctx.update(data)\n\n def copy(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n return Hash(\n self.algorithm, backend=self._backend, ctx=self._ctx.copy()\n )\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n digest = self._ctx.finalize()\n self._ctx = None\n return digest\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA1(object):\n name = \"sha1\"\n digest_size = 20\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA224(object):\n name = \"sha224\"\n digest_size = 28\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA256(object):\n name = \"sha256\"\n digest_size = 32\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA384(object):\n name = \"sha384\"\n digest_size = 48\n block_size = 128\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA512(object):\n name = \"sha512\"\n digest_size = 64\n block_size = 128\n\n\[email protected]_interface(HashAlgorithm)\nclass RIPEMD160(object):\n name = \"ripemd160\"\n digest_size = 20\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass Whirlpool(object):\n name = \"whirlpool\"\n digest_size = 64\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass MD5(object):\n name = \"md5\"\n digest_size = 16\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass BLAKE2b(object):\n name = \"blake2b\"\n _max_digest_size = 64\n _min_digest_size = 1\n block_size = 128\n\n def __init__(self, digest_size):\n if (\n digest_size > self._max_digest_size or\n digest_size < self._min_digest_size\n ):\n raise ValueError(\"Digest size must be {0}-{1}\".format(\n self._min_digest_size, self._max_digest_size)\n )\n\n self._digest_size = digest_size\n\n digest_size = utils.read_only_property(\"_digest_size\")\n\n\[email protected]_interface(HashAlgorithm)\nclass BLAKE2s(object):\n name = \"blake2s\"\n block_size = 64\n _max_digest_size = 32\n _min_digest_size = 1\n\n def __init__(self, digest_size):\n if (\n digest_size > self._max_digest_size or\n digest_size < self._min_digest_size\n ):\n raise ValueError(\"Digest size must be {0}-{1}\".format(\n self._min_digest_size, self._max_digest_size)\n )\n\n self._digest_size = digest_size\n\n digest_size = utils.read_only_property(\"_digest_size\")\n", "path": "src/cryptography/hazmat/primitives/hashes.py"}]} | 2,312 | 255 |
gh_patches_debug_59506 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-2973 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixes #2942
</issue>
<code>
[start of pytorch_lightning/accelerators/ddp_backend.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14
15 import os
16 import subprocess
17 import sys
18 from os.path import abspath
19 from time import sleep
20 from typing import Optional
21
22 import numpy as np
23 import torch
24
25 from pytorch_lightning import _logger as log
26 from pytorch_lightning.utilities import AMPType
27 from pytorch_lightning.utilities.distributed import rank_zero_only
28
29 try:
30 from hydra.utils import to_absolute_path, get_original_cwd
31 from hydra.core.hydra_config import HydraConfig
32 except ImportError:
33 HYDRA_AVAILABLE = False
34 else:
35 HYDRA_AVAILABLE = True
36
37 try:
38 from apex import amp
39 except ImportError:
40 amp = None
41
42
43 class DDPBackend(object):
44
45 def __init__(self, trainer):
46 self.trainer = trainer
47 self.task_idx = None
48
49 def slurm_setup(self):
50 self.task_idx = int(os.environ['SLURM_LOCALID'])
51
52 def torchelastic_setup(self):
53 self.task_idx = int(os.environ['LOCAL_RANK'])
54
55 def train(self, model):
56 self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)
57
58 def spawn_ddp_children(self, model):
59 port = os.environ['MASTER_PORT']
60
61 master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']
62 os.environ['MASTER_PORT'] = f'{port}'
63 os.environ['MASTER_ADDR'] = f'{master_address}'
64
65 # allow the user to pass the node rank
66 node_rank = '0'
67 if 'NODE_RANK' in os.environ:
68 node_rank = os.environ['NODE_RANK']
69 if 'GROUP_RANK' in os.environ:
70 node_rank = os.environ['GROUP_RANK']
71
72 os.environ['NODE_RANK'] = node_rank
73 os.environ['LOCAL_RANK'] = '0'
74
75 # when user is using hydra find the absolute path
76 path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path
77
78 # pull out the commands used to run the script and resolve the abs file path
79 command = sys.argv
80 try:
81 full_path = path_lib(command[0])
82 except Exception as e:
83 full_path = abspath(command[0])
84
85 command[0] = full_path
86 # use the same python interpreter and actually running
87 command = [sys.executable] + command
88
89 # since this script sets the visible devices we replace the gpus flag with a number
90 num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()
91
92 if '--gpus' in command:
93 gpu_flag_idx = command.index('--gpus')
94 command[gpu_flag_idx + 1] = f'{num_gpus}'
95
96 os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'
97
98 self.trainer.interactive_ddp_procs = []
99 for local_rank in range(1, self.trainer.num_processes):
100 env_copy = os.environ.copy()
101 env_copy['LOCAL_RANK'] = f'{local_rank}'
102
103 # start process
104 # if hydra is available and initialized, make sure to set the cwd correctly
105 cwd: Optional[str] = None
106 if HYDRA_AVAILABLE:
107 if HydraConfig.initialized():
108 cwd = get_original_cwd()
109 proc = subprocess.Popen(command, env=env_copy, cwd=cwd)
110 self.trainer.interactive_ddp_procs.append(proc)
111
112 # starting all processes at once can cause issues
113 # with dataloaders delay between 1-10 seconds
114 delay = np.random.uniform(1, 5, 1)[0]
115 sleep(delay)
116
117 local_rank = 0
118 results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)
119 del os.environ['WORLD_SIZE']
120
121 return results
122
123 def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
124 """
125 Entry point for ddp
126
127 Args:
128 process_idx:
129 mp_queue: multiprocessing queue
130 model:
131 is_master:
132 proc_offset:
133
134 Returns:
135
136 """
137 # offset the process id if requested
138 process_idx = process_idx + proc_offset
139
140 # show progressbar only on progress_rank 0
141 if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
142 self.trainer.progress_bar_callback.disable()
143
144 # determine which process we are and world size
145 self.trainer.local_rank = process_idx
146 self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx
147 self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
148
149 # set warning rank
150 rank_zero_only.rank = self.trainer.global_rank
151
152 # set up server using proc 0's ip address
153 # try to init for 20 times at max in case ports are taken
154 # where to store ip_table
155 model.trainer = self.trainer
156 model.init_ddp_connection(
157 self.trainer.global_rank,
158 self.trainer.world_size,
159 self.trainer.is_slurm_managing_tasks
160 )
161
162 # call setup after the ddp process has connected
163 self.trainer.call_setup_hook(model)
164
165 # on world_size=0 let everyone know training is starting
166 if self.trainer.is_global_zero:
167 log.info('-' * 100)
168 log.info(f'distributed_backend={self.trainer.distributed_backend}')
169 log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
170 log.info('-' * 100)
171
172 # call sync_bn before .cuda(), configure_apex and configure_ddp
173 if self.trainer.sync_batchnorm:
174 model = model.configure_sync_batchnorm(model)
175
176 # MODEL
177 # copy model to each gpu
178 if self.trainer.on_gpu:
179 gpu_idx = process_idx
180
181 # when using ddp, the master process (proc 0) continues running as the main one
182 # this means that the local rank will always be 0
183 # (even if cuda visible devices has other visible gpus)
184 # this means that the master process needs to pull the 0th visible index as the device number
185 if is_master:
186 available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
187 gpu_idx = int(available_gpus[self.trainer.local_rank])
188
189 self.trainer.root_gpu = gpu_idx
190 torch.cuda.set_device(self.trainer.root_gpu)
191 model.cuda(self.trainer.root_gpu)
192
193 # CHOOSE OPTIMIZER
194 # allow for lr schedulers as well
195 optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
196 self.trainer.optimizers = optimizers
197 self.trainer.lr_schedulers = lr_schedulers
198 self.trainer.optimizer_frequencies = optimizer_frequencies
199
200 # set model properties before going into wrapper
201 self.trainer.copy_trainer_model_properties(model)
202
203 # AMP - run through amp wrapper before going to distributed DP
204 if self.trainer.amp_backend == AMPType.APEX:
205 model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
206 self.trainer.optimizers = optimizers
207 self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
208
209 # DDP2 uses all GPUs on the machine
210 if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':
211 device_ids = [self.trainer.root_gpu]
212 else: # includes ddp_cpu
213 device_ids = None
214
215 # allow user to configure ddp
216 model = model.configure_ddp(model, device_ids)
217
218 # continue training routine
219 results = self.trainer.run_pretrain_routine(model)
220
221 # get original model
222 model = self.trainer.get_model()
223
224 # persist info in ddp_spawn
225 self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)
226
227 # clean up memory
228 torch.cuda.empty_cache()
229
230 if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:
231 return results
232
[end of pytorch_lightning/accelerators/ddp_backend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py
--- a/pytorch_lightning/accelerators/ddp_backend.py
+++ b/pytorch_lightning/accelerators/ddp_backend.py
@@ -87,7 +87,7 @@
command = [sys.executable] + command
# since this script sets the visible devices we replace the gpus flag with a number
- num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()
+ num_gpus = torch.cuda.device_count()
if '--gpus' in command:
gpu_flag_idx = command.index('--gpus')
| {"golden_diff": "diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py\n--- a/pytorch_lightning/accelerators/ddp_backend.py\n+++ b/pytorch_lightning/accelerators/ddp_backend.py\n@@ -87,7 +87,7 @@\n command = [sys.executable] + command\n \n # since this script sets the visible devices we replace the gpus flag with a number\n- num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\n+ num_gpus = torch.cuda.device_count()\n \n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n", "issue": "Fixes #2942\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport os\nimport subprocess\nimport sys\nfrom os.path import abspath\nfrom time import sleep\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\n\ntry:\n from hydra.utils import to_absolute_path, get_original_cwd\n from hydra.core.hydra_config import HydraConfig\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\n\nclass DDPBackend(object):\n\n def __init__(self, trainer):\n self.trainer = trainer\n self.task_idx = None\n\n def slurm_setup(self):\n self.task_idx = int(os.environ['SLURM_LOCALID'])\n\n def torchelastic_setup(self):\n self.task_idx = int(os.environ['LOCAL_RANK'])\n\n def train(self, model):\n self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)\n\n def spawn_ddp_children(self, model):\n port = os.environ['MASTER_PORT']\n\n master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']\n os.environ['MASTER_PORT'] = f'{port}'\n os.environ['MASTER_ADDR'] = f'{master_address}'\n\n # allow the user to pass the node rank\n node_rank = '0'\n if 'NODE_RANK' in os.environ:\n node_rank = os.environ['NODE_RANK']\n if 'GROUP_RANK' in os.environ:\n node_rank = os.environ['GROUP_RANK']\n\n os.environ['NODE_RANK'] = node_rank\n os.environ['LOCAL_RANK'] = '0'\n\n # when user is using hydra find the absolute path\n path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path\n\n # pull out the commands used to run the script and resolve the abs file path\n command = sys.argv\n try:\n full_path = path_lib(command[0])\n except Exception as e:\n full_path = abspath(command[0])\n\n command[0] = full_path\n # use the same python interpreter and actually running\n command = [sys.executable] + command\n\n # since this script sets the visible devices we replace the gpus flag with a number\n num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\n\n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n command[gpu_flag_idx + 1] = f'{num_gpus}'\n\n os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'\n\n self.trainer.interactive_ddp_procs = []\n for local_rank in range(1, self.trainer.num_processes):\n env_copy = os.environ.copy()\n env_copy['LOCAL_RANK'] = f'{local_rank}'\n\n # start process\n # if hydra is available and initialized, make sure to set the cwd correctly\n cwd: Optional[str] = None\n if HYDRA_AVAILABLE:\n if HydraConfig.initialized():\n cwd = get_original_cwd()\n proc = subprocess.Popen(command, env=env_copy, cwd=cwd)\n self.trainer.interactive_ddp_procs.append(proc)\n\n # starting all processes at once can cause issues\n # with dataloaders delay between 1-10 seconds\n delay = np.random.uniform(1, 5, 1)[0]\n sleep(delay)\n\n local_rank = 0\n results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)\n del os.environ['WORLD_SIZE']\n\n return results\n\n def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n is_master:\n proc_offset:\n\n Returns:\n\n \"\"\"\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n model.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero:\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # MODEL\n # copy model to each gpu\n if self.trainer.on_gpu:\n gpu_idx = process_idx\n\n # when using ddp, the master process (proc 0) continues running as the main one\n # this means that the local rank will always be 0\n # (even if cuda visible devices has other visible gpus)\n # this means that the master process needs to pull the 0th visible index as the device number\n if is_master:\n available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n gpu_idx = int(available_gpus[self.trainer.local_rank])\n\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n # set model properties before going into wrapper\n self.trainer.copy_trainer_model_properties(model)\n\n # AMP - run through amp wrapper before going to distributed DP\n if self.trainer.amp_backend == AMPType.APEX:\n model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)\n self.trainer.optimizers = optimizers\n self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)\n\n # DDP2 uses all GPUs on the machine\n if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':\n device_ids = [self.trainer.root_gpu]\n else: # includes ddp_cpu\n device_ids = None\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # continue training routine\n results = self.trainer.run_pretrain_routine(model)\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:\n return results\n", "path": "pytorch_lightning/accelerators/ddp_backend.py"}]} | 3,109 | 155 |
gh_patches_debug_7920 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Verbose mode not working in v3.0.0 RC2
##### Steps to reproduce the problem:
1. Start mitmdump with -v or --verbose flag
2. No DEBUG level logs prints on standard output
##### Any other comments? What have you tried so far?
In old stable version (2.0.2) the same steps produce desired output.
##### System information
Mitmproxy: 3.0.0.dev1136 (commit 15f525e)
Python: 3.6.3
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-3.16.0-5-amd64-x86_64-with-debian-8.9
</issue>
<code>
[start of mitmproxy/tools/main.py]
1 from __future__ import print_function # this is here for the version check to work on Python 2.
2
3 import sys
4
5 if sys.version_info < (3, 5):
6 # This must be before any mitmproxy imports, as they already break!
7 # Keep all other imports below with the 'noqa' magic comment.
8 print("#" * 49, file=sys.stderr)
9 print("# mitmproxy only supports Python 3.5 and above! #", file=sys.stderr)
10 print("#" * 49, file=sys.stderr)
11
12 import argparse # noqa
13 import os # noqa
14 import signal # noqa
15 import typing # noqa
16
17 from mitmproxy.tools import cmdline # noqa
18 from mitmproxy import exceptions, master # noqa
19 from mitmproxy import options # noqa
20 from mitmproxy import optmanager # noqa
21 from mitmproxy import proxy # noqa
22 from mitmproxy import log # noqa
23 from mitmproxy.utils import debug, arg_check # noqa
24
25
26 def assert_utf8_env():
27 spec = ""
28 for i in ["LANG", "LC_CTYPE", "LC_ALL"]:
29 spec += os.environ.get(i, "").lower()
30 if "utf" not in spec:
31 print(
32 "Error: mitmproxy requires a UTF console environment.",
33 file=sys.stderr
34 )
35 print(
36 "Set your LANG environment variable to something like en_US.UTF-8",
37 file=sys.stderr
38 )
39 sys.exit(1)
40
41
42 def process_options(parser, opts, args):
43 if args.version:
44 print(debug.dump_system_info())
45 sys.exit(0)
46 if args.quiet or args.options or args.commands:
47 args.verbosity = 'error'
48 args.flow_detail = 0
49
50 adict = {}
51 for n in dir(args):
52 if n in opts:
53 adict[n] = getattr(args, n)
54 opts.merge(adict)
55
56 return proxy.config.ProxyConfig(opts)
57
58
59 def run(
60 master_cls: typing.Type[master.Master],
61 make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],
62 arguments: typing.Sequence[str],
63 extra: typing.Callable[[typing.Any], dict] = None
64 ): # pragma: no cover
65 """
66 extra: Extra argument processing callable which returns a dict of
67 options.
68 """
69 debug.register_info_dumpers()
70
71 opts = options.Options()
72 master = master_cls(opts)
73
74 parser = make_parser(opts)
75
76 # To make migration from 2.x to 3.0 bearable.
77 if "-R" in sys.argv and sys.argv[sys.argv.index("-R") + 1].startswith("http"):
78 print("-R is used for specifying replacements.\n"
79 "To use mitmproxy in reverse mode please use --mode reverse:SPEC instead")
80
81 try:
82 args = parser.parse_args(arguments)
83 except SystemExit:
84 arg_check.check()
85 sys.exit(1)
86 try:
87 unknown = optmanager.load_paths(opts, args.conf)
88 pconf = process_options(parser, opts, args)
89 server = None # type: typing.Any
90 if pconf.options.server:
91 try:
92 server = proxy.server.ProxyServer(pconf)
93 except exceptions.ServerException as v:
94 print(str(v), file=sys.stderr)
95 sys.exit(1)
96 else:
97 server = proxy.server.DummyServer(pconf)
98
99 master.server = server
100 master.addons.trigger("configure", opts.keys())
101 master.addons.trigger("tick")
102 remaining = opts.update_known(**unknown)
103 if remaining and log.log_tier(opts.verbosity) > 1:
104 print("Ignored options: %s" % remaining)
105 if args.options:
106 print(optmanager.dump_defaults(opts))
107 sys.exit(0)
108 if args.commands:
109 master.commands.dump()
110 sys.exit(0)
111 opts.set(*args.setoptions)
112 if extra:
113 opts.update(**extra(args))
114
115 def cleankill(*args, **kwargs):
116 master.shutdown()
117
118 signal.signal(signal.SIGTERM, cleankill)
119 master.run()
120 except exceptions.OptionsError as e:
121 print("%s: %s" % (sys.argv[0], e), file=sys.stderr)
122 sys.exit(1)
123 except (KeyboardInterrupt, RuntimeError) as e:
124 pass
125 return master
126
127
128 def mitmproxy(args=None): # pragma: no cover
129 if os.name == "nt":
130 print("Error: mitmproxy's console interface is not supported on Windows. "
131 "You can run mitmdump or mitmweb instead.", file=sys.stderr)
132 sys.exit(1)
133
134 assert_utf8_env()
135
136 from mitmproxy.tools import console
137 run(console.master.ConsoleMaster, cmdline.mitmproxy, args)
138
139
140 def mitmdump(args=None): # pragma: no cover
141 from mitmproxy.tools import dump
142
143 def extra(args):
144 if args.filter_args:
145 v = " ".join(args.filter_args)
146 return dict(
147 view_filter=v,
148 save_stream_filter=v,
149 )
150 return {}
151
152 m = run(dump.DumpMaster, cmdline.mitmdump, args, extra)
153 if m and m.errorcheck.has_errored:
154 sys.exit(1)
155
156
157 def mitmweb(args=None): # pragma: no cover
158 from mitmproxy.tools import web
159 run(web.master.WebMaster, cmdline.mitmweb, args)
160
[end of mitmproxy/tools/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/tools/main.py b/mitmproxy/tools/main.py
--- a/mitmproxy/tools/main.py
+++ b/mitmproxy/tools/main.py
@@ -44,8 +44,13 @@
print(debug.dump_system_info())
sys.exit(0)
if args.quiet or args.options or args.commands:
+ # also reduce log verbosity if --options or --commands is passed,
+ # we don't want log messages from regular startup then.
args.verbosity = 'error'
args.flow_detail = 0
+ if args.verbose:
+ args.verbosity = 'debug'
+ args.flow_detail = 2
adict = {}
for n in dir(args):
| {"golden_diff": "diff --git a/mitmproxy/tools/main.py b/mitmproxy/tools/main.py\n--- a/mitmproxy/tools/main.py\n+++ b/mitmproxy/tools/main.py\n@@ -44,8 +44,13 @@\n print(debug.dump_system_info())\n sys.exit(0)\n if args.quiet or args.options or args.commands:\n+ # also reduce log verbosity if --options or --commands is passed,\n+ # we don't want log messages from regular startup then.\n args.verbosity = 'error'\n args.flow_detail = 0\n+ if args.verbose:\n+ args.verbosity = 'debug'\n+ args.flow_detail = 2\n \n adict = {}\n for n in dir(args):\n", "issue": "Verbose mode not working in v3.0.0 RC2\n##### Steps to reproduce the problem:\r\n\r\n1. Start mitmdump with -v or --verbose flag\r\n2. No DEBUG level logs prints on standard output\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nIn old stable version (2.0.2) the same steps produce desired output.\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev1136 (commit 15f525e)\r\nPython: 3.6.3\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-3.16.0-5-amd64-x86_64-with-debian-8.9\n", "before_files": [{"content": "from __future__ import print_function # this is here for the version check to work on Python 2.\n\nimport sys\n\nif sys.version_info < (3, 5):\n # This must be before any mitmproxy imports, as they already break!\n # Keep all other imports below with the 'noqa' magic comment.\n print(\"#\" * 49, file=sys.stderr)\n print(\"# mitmproxy only supports Python 3.5 and above! #\", file=sys.stderr)\n print(\"#\" * 49, file=sys.stderr)\n\nimport argparse # noqa\nimport os # noqa\nimport signal # noqa\nimport typing # noqa\n\nfrom mitmproxy.tools import cmdline # noqa\nfrom mitmproxy import exceptions, master # noqa\nfrom mitmproxy import options # noqa\nfrom mitmproxy import optmanager # noqa\nfrom mitmproxy import proxy # noqa\nfrom mitmproxy import log # noqa\nfrom mitmproxy.utils import debug, arg_check # noqa\n\n\ndef assert_utf8_env():\n spec = \"\"\n for i in [\"LANG\", \"LC_CTYPE\", \"LC_ALL\"]:\n spec += os.environ.get(i, \"\").lower()\n if \"utf\" not in spec:\n print(\n \"Error: mitmproxy requires a UTF console environment.\",\n file=sys.stderr\n )\n print(\n \"Set your LANG environment variable to something like en_US.UTF-8\",\n file=sys.stderr\n )\n sys.exit(1)\n\n\ndef process_options(parser, opts, args):\n if args.version:\n print(debug.dump_system_info())\n sys.exit(0)\n if args.quiet or args.options or args.commands:\n args.verbosity = 'error'\n args.flow_detail = 0\n\n adict = {}\n for n in dir(args):\n if n in opts:\n adict[n] = getattr(args, n)\n opts.merge(adict)\n\n return proxy.config.ProxyConfig(opts)\n\n\ndef run(\n master_cls: typing.Type[master.Master],\n make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],\n arguments: typing.Sequence[str],\n extra: typing.Callable[[typing.Any], dict] = None\n): # pragma: no cover\n \"\"\"\n extra: Extra argument processing callable which returns a dict of\n options.\n \"\"\"\n debug.register_info_dumpers()\n\n opts = options.Options()\n master = master_cls(opts)\n\n parser = make_parser(opts)\n\n # To make migration from 2.x to 3.0 bearable.\n if \"-R\" in sys.argv and sys.argv[sys.argv.index(\"-R\") + 1].startswith(\"http\"):\n print(\"-R is used for specifying replacements.\\n\"\n \"To use mitmproxy in reverse mode please use --mode reverse:SPEC instead\")\n\n try:\n args = parser.parse_args(arguments)\n except SystemExit:\n arg_check.check()\n sys.exit(1)\n try:\n unknown = optmanager.load_paths(opts, args.conf)\n pconf = process_options(parser, opts, args)\n server = None # type: typing.Any\n if pconf.options.server:\n try:\n server = proxy.server.ProxyServer(pconf)\n except exceptions.ServerException as v:\n print(str(v), file=sys.stderr)\n sys.exit(1)\n else:\n server = proxy.server.DummyServer(pconf)\n\n master.server = server\n master.addons.trigger(\"configure\", opts.keys())\n master.addons.trigger(\"tick\")\n remaining = opts.update_known(**unknown)\n if remaining and log.log_tier(opts.verbosity) > 1:\n print(\"Ignored options: %s\" % remaining)\n if args.options:\n print(optmanager.dump_defaults(opts))\n sys.exit(0)\n if args.commands:\n master.commands.dump()\n sys.exit(0)\n opts.set(*args.setoptions)\n if extra:\n opts.update(**extra(args))\n\n def cleankill(*args, **kwargs):\n master.shutdown()\n\n signal.signal(signal.SIGTERM, cleankill)\n master.run()\n except exceptions.OptionsError as e:\n print(\"%s: %s\" % (sys.argv[0], e), file=sys.stderr)\n sys.exit(1)\n except (KeyboardInterrupt, RuntimeError) as e:\n pass\n return master\n\n\ndef mitmproxy(args=None): # pragma: no cover\n if os.name == \"nt\":\n print(\"Error: mitmproxy's console interface is not supported on Windows. \"\n \"You can run mitmdump or mitmweb instead.\", file=sys.stderr)\n sys.exit(1)\n\n assert_utf8_env()\n\n from mitmproxy.tools import console\n run(console.master.ConsoleMaster, cmdline.mitmproxy, args)\n\n\ndef mitmdump(args=None): # pragma: no cover\n from mitmproxy.tools import dump\n\n def extra(args):\n if args.filter_args:\n v = \" \".join(args.filter_args)\n return dict(\n view_filter=v,\n save_stream_filter=v,\n )\n return {}\n\n m = run(dump.DumpMaster, cmdline.mitmdump, args, extra)\n if m and m.errorcheck.has_errored:\n sys.exit(1)\n\n\ndef mitmweb(args=None): # pragma: no cover\n from mitmproxy.tools import web\n run(web.master.WebMaster, cmdline.mitmweb, args)\n", "path": "mitmproxy/tools/main.py"}]} | 2,258 | 156 |
gh_patches_debug_21574 | rasdani/github-patches | git_diff | Mailu__Mailu-2632 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Small feature - add mailu version in the admin UI
Add mailu version in the admin UI
I think it would nice to be able to see which version of mailu we are running, unless there is other simple way?
Thanks
</issue>
<code>
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 import ipaddress
5
6 DEFAULT_CONFIG = {
7 # Specific to the admin UI
8 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
9 'BABEL_DEFAULT_LOCALE': 'en',
10 'BABEL_DEFAULT_TIMEZONE': 'UTC',
11 'BOOTSTRAP_SERVE_LOCAL': True,
12 'RATELIMIT_STORAGE_URL': '',
13 'DEBUG': False,
14 'DEBUG_PROFILER': False,
15 'DEBUG_TB_INTERCEPT_REDIRECTS': False,
16 'DEBUG_ASSETS': '',
17 'DOMAIN_REGISTRATION': False,
18 'TEMPLATES_AUTO_RELOAD': True,
19 'MEMORY_SESSIONS': False,
20 'FETCHMAIL_ENABLED': True,
21 # Database settings
22 'DB_FLAVOR': None,
23 'DB_USER': 'mailu',
24 'DB_PW': None,
25 'DB_HOST': 'database',
26 'DB_NAME': 'mailu',
27 'SQLITE_DATABASE_FILE': 'data/main.db',
28 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
29 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
30 # Statistics management
31 'INSTANCE_ID_PATH': '/data/instance',
32 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
33 # Common configuration variables
34 'SECRET_KEY': 'changeMe',
35 'DOMAIN': 'mailu.io',
36 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
37 'POSTMASTER': 'postmaster',
38 'WILDCARD_SENDERS': '',
39 'TLS_FLAVOR': 'cert',
40 'INBOUND_TLS_ENFORCE': False,
41 'DEFER_ON_TLS_ERROR': True,
42 'AUTH_RATELIMIT_IP': '60/hour',
43 'AUTH_RATELIMIT_IP_V4_MASK': 24,
44 'AUTH_RATELIMIT_IP_V6_MASK': 56,
45 'AUTH_RATELIMIT_USER': '100/day',
46 'AUTH_RATELIMIT_EXEMPTION': '',
47 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
48 'DISABLE_STATISTICS': False,
49 # Mail settings
50 'DMARC_RUA': None,
51 'DMARC_RUF': None,
52 'WELCOME': False,
53 'WELCOME_SUBJECT': 'Dummy welcome topic',
54 'WELCOME_BODY': 'Dummy welcome body',
55 'DKIM_SELECTOR': 'dkim',
56 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
57 'DEFAULT_QUOTA': 1000000000,
58 'MESSAGE_RATELIMIT': '200/day',
59 'MESSAGE_RATELIMIT_EXEMPTION': '',
60 'RECIPIENT_DELIMITER': '',
61 # Web settings
62 'SITENAME': 'Mailu',
63 'WEBSITE': 'https://mailu.io',
64 'ADMIN': 'none',
65 'WEB_ADMIN': '/admin',
66 'WEB_WEBMAIL': '/webmail',
67 'WEBMAIL': 'none',
68 'RECAPTCHA_PUBLIC_KEY': '',
69 'RECAPTCHA_PRIVATE_KEY': '',
70 'LOGO_URL': None,
71 'LOGO_BACKGROUND': None,
72 # Advanced settings
73 'API': False,
74 'WEB_API': '/api',
75 'API_TOKEN': None,
76 'LOG_LEVEL': 'WARNING',
77 'SESSION_KEY_BITS': 128,
78 'SESSION_TIMEOUT': 3600,
79 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
80 'SESSION_COOKIE_SECURE': None,
81 'CREDENTIAL_ROUNDS': 12,
82 'TLS_PERMISSIVE': True,
83 'TZ': 'Etc/UTC',
84 'DEFAULT_SPAM_THRESHOLD': 80,
85 'PROXY_AUTH_WHITELIST': '',
86 'PROXY_AUTH_HEADER': 'X-Auth-Email',
87 'PROXY_AUTH_CREATE': False,
88 'SUBNET': '192.168.203.0/24',
89 'SUBNET6': None,
90 }
91
92 class ConfigManager:
93 """ Naive configuration manager that uses environment only
94 """
95
96 DB_TEMPLATES = {
97 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
98 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
99 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
100 }
101
102 def __init__(self):
103 self.config = dict()
104
105 def __get_env(self, key, value):
106 key_file = key + "_FILE"
107 if key_file in os.environ:
108 with open(os.environ.get(key_file)) as file:
109 value_from_file = file.read()
110 return value_from_file.strip()
111 else:
112 return os.environ.get(key, value)
113
114 def __coerce_value(self, value):
115 if isinstance(value, str) and value.lower() in ('true','yes'):
116 return True
117 elif isinstance(value, str) and value.lower() in ('false', 'no'):
118 return False
119 return value
120
121 def init_app(self, app):
122 # get current app config
123 self.config.update(app.config)
124 # get environment variables
125 for key in os.environ:
126 if key.endswith('_ADDRESS'):
127 self.config[key] = os.environ[key]
128
129 self.config.update({
130 key: self.__coerce_value(self.__get_env(key, value))
131 for key, value in DEFAULT_CONFIG.items()
132 })
133
134 # automatically set the sqlalchemy string
135 if self.config['DB_FLAVOR']:
136 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
137 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
138
139 if not self.config.get('RATELIMIT_STORAGE_URL'):
140 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
141
142 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
143 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
144 self.config['SESSION_COOKIE_HTTPONLY'] = True
145 if self.config['SESSION_COOKIE_SECURE'] is None:
146 self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'
147 self.config['SESSION_PERMANENT'] = True
148 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
149 self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])
150 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
151 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
152 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
153 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
154 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
155 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
156 self.config['HOSTNAMES'] = ','.join(hostnames)
157 self.config['HOSTNAME'] = hostnames[0]
158 self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
159 self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
160
161 # update the app config
162 app.config.update(self.config)
163
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -18,6 +18,7 @@
'TEMPLATES_AUTO_RELOAD': True,
'MEMORY_SESSIONS': False,
'FETCHMAIL_ENABLED': True,
+ 'MAILU_VERSION': 'unknown',
# Database settings
'DB_FLAVOR': None,
'DB_USER': 'mailu',
@@ -157,6 +158,10 @@
self.config['HOSTNAME'] = hostnames[0]
self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
+ try:
+ self.config['MAILU_VERSION'] = open('/version', 'r').read()
+ except FileNotFoundError:
+ pass
# update the app config
app.config.update(self.config)
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -18,6 +18,7 @@\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n+ 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n@@ -157,6 +158,10 @@\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n+ try:\n+ self.config['MAILU_VERSION'] = open('/version', 'r').read()\n+ except FileNotFoundError:\n+ pass\n \n # update the app config\n app.config.update(self.config)\n", "issue": "Small feature - add mailu version in the admin UI \nAdd mailu version in the admin UI \r\nI think it would nice to be able to see which version of mailu we are running, unless there is other simple way?\r\n\r\nThanks\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}]} | 2,654 | 253 |
gh_patches_debug_6380 | rasdani/github-patches | git_diff | obspy__obspy-2311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
libgcf assumes 6 character stream IDs
The GCF reader (libgcf.py) assumes the stream ID is 6 characters, and hard-codes access to element 4:
```
header['station'] = stid[:4]
header['channel'] = (channel_prefix[:2] + stid[4]).upper()
```
The Stream ID is made up of three parts: first is up to 4 characters for serial number, then one char for channel letter, then one channel for tap number. Rather than assume the channel letter is at index 4, it should be considered the 2nd last...
```
header['station'] = stid[:-2]
header['channel'] = (channel_prefix[:2] + stid[-2]).upper()
```
This was causing some valid GCF files to be considered invalid when they had short IDs.
libgcf assumes 6 character stream IDs
The GCF reader (libgcf.py) assumes the stream ID is 6 characters, and hard-codes access to element 4:
```
header['station'] = stid[:4]
header['channel'] = (channel_prefix[:2] + stid[4]).upper()
```
The Stream ID is made up of three parts: first is up to 4 characters for serial number, then one char for channel letter, then one channel for tap number. Rather than assume the channel letter is at index 4, it should be considered the 2nd last...
```
header['station'] = stid[:-2]
header['channel'] = (channel_prefix[:2] + stid[-2]).upper()
```
This was causing some valid GCF files to be considered invalid when they had short IDs.
</issue>
<code>
[start of obspy/io/gcf/libgcf.py]
1 # -*- coding: utf-8 -*-
2 # reads Guralp Compressed Format (GCF) Files
3 # By Ran Novitsky Nof @ BSL, 2016
4 # [email protected]
5 # Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)
6 # more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro
7 # last access: June, 2016
8 from __future__ import (absolute_import, division, print_function,
9 unicode_literals)
10 from future.builtins import * # NOQA
11
12 import numpy as np
13
14 from obspy import UTCDateTime
15
16 SPS_D = { # Table 3.1: special sample rates
17 157: 0.1,
18 161: 0.125,
19 162: 0.2,
20 164: 0.25,
21 167: 0.5,
22 171: 400,
23 174: 500,
24 176: 1000,
25 179: 2000,
26 181: 4000}
27 TIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator
28 171: 8.,
29 174: 2.,
30 176: 4.,
31 179: 8.,
32 181: 16.}
33 COMPRESSION_D = { # Table 3.2: format field to data type
34 1: '>i4',
35 2: '>i2',
36 4: '>i1'}
37
38
39 def is_gcf(f):
40 """
41 Test if file is GCF by reading at least 1 data block
42 """
43 header, data = read_data_block(f)
44
45
46 def decode36(data):
47 """
48 Converts an integer into a base36 string.
49 """
50 # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm
51 s = ''
52 while data:
53 imed = data % 36
54 if imed > 9:
55 c = chr(imed - 10 + ord('A'))
56 else:
57 c = chr(imed + ord('0'))
58 s = c + s
59 data = data // 36
60 return s
61
62
63 def decode_date_time(data):
64 """
65 Decode date and time field.
66
67 The date code is a 32 bit value specifying the start time of the block.
68 Bits 0-16 contain the number of seconds since midnight,
69 and bits 17-31 the number of days since 17th November 1989.
70 """
71 # prevent numpy array
72 days = int(data >> 17)
73 secs = int(data & 0x1FFFF)
74 starttime = UTCDateTime('1989-11-17') + days * 86400 + secs
75 return starttime
76
77
78 def read_data_block(f, headonly=False, channel_prefix="HH", **kwargs):
79 """
80 Read one data block from GCF file.
81
82 more details can be found here:
83 http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm
84 f - file object to read from
85 if skipData is True, Only header is returned.
86 if not a data block (SPS=0) - returns None.
87 """
88 # get ID
89 sysid = f.read(4)
90 if not sysid:
91 raise EOFError # got to EOF
92 sysid = np.frombuffer(sysid, count=1, dtype='>u4')
93 if sysid >> 31 & 0b1 > 0:
94 sysid = (sysid << 6) >> 6
95 sysid = decode36(sysid)
96 # get Stream ID
97 stid = np.frombuffer(f.read(4), count=1, dtype='>u4')
98 stid = decode36(stid)
99 # get Date & Time
100 data = np.frombuffer(f.read(4), count=1, dtype='>u4')
101 starttime = decode_date_time(data)
102 # get data format
103 # get reserved, SPS, data type compression,
104 # number of 32bit records (num_records)
105 reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,
106 dtype='>u1')
107 compression = compress & 0b00000111 # get compression code
108 t_offset = compress >> 4 # get time offset
109 if t_offset > 0:
110 starttime = starttime + t_offset / TIME_OFFSETS_D[sps]
111 if sps in SPS_D:
112 sps = SPS_D[sps] # get special SPS value if needed
113 if not sps:
114 f.seek(num_records * 4, 1) # skip if not a data block
115 if 1008 - num_records * 4 > 0:
116 # keep skipping to get 1008 record
117 f.seek(1008 - num_records * 4, 1)
118 return None
119 npts = num_records * compression # number of samples
120 header = {}
121 header['starttime'] = starttime
122 header['station'] = stid[:4]
123 header['channel'] = (channel_prefix[:2] + stid[4]).upper()
124 header['sampling_rate'] = float(sps)
125 header['npts'] = npts
126 if headonly:
127 f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)
128 # skip to end of block if only partly filled with data
129 if 1000 - num_records * 4 > 0:
130 f.seek(1000 - num_records * 4, 1)
131 return header
132 else:
133 # get FIC
134 fic = np.frombuffer(f.read(4), count=1, dtype='>i4')
135 # get incremental data
136 data = np.frombuffer(f.read(4 * num_records), count=npts,
137 dtype=COMPRESSION_D[compression])
138 # construct time series
139 data = (fic + np.cumsum(data)).astype('i4')
140 # get RIC
141 ric = np.frombuffer(f.read(4), count=1, dtype='>i4')
142 # skip to end of block if only partly filled with data
143 if 1000 - num_records * 4 > 0:
144 f.seek(1000 - num_records * 4, 1)
145 # verify last data sample matches RIC
146 if not data[-1] == ric:
147 raise ValueError("Last sample mismatch with RIC")
148 return header, data
149
150
151 def read_header(f, **kwargs):
152 """
153 Reads header only from GCF file.
154 """
155 return read_data_block(f, headonly=True, **kwargs)
156
157
158 def read(f, **kwargs):
159 """
160 Reads header and data from GCF file.
161 """
162 return read_data_block(f, headonly=False, **kwargs)
163
[end of obspy/io/gcf/libgcf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/obspy/io/gcf/libgcf.py b/obspy/io/gcf/libgcf.py
--- a/obspy/io/gcf/libgcf.py
+++ b/obspy/io/gcf/libgcf.py
@@ -119,8 +119,8 @@
npts = num_records * compression # number of samples
header = {}
header['starttime'] = starttime
- header['station'] = stid[:4]
- header['channel'] = (channel_prefix[:2] + stid[4]).upper()
+ header['station'] = stid[:-2]
+ header['channel'] = (channel_prefix[:2] + stid[-2]).upper()
header['sampling_rate'] = float(sps)
header['npts'] = npts
if headonly:
| {"golden_diff": "diff --git a/obspy/io/gcf/libgcf.py b/obspy/io/gcf/libgcf.py\n--- a/obspy/io/gcf/libgcf.py\n+++ b/obspy/io/gcf/libgcf.py\n@@ -119,8 +119,8 @@\n npts = num_records * compression # number of samples\n header = {}\n header['starttime'] = starttime\n- header['station'] = stid[:4]\n- header['channel'] = (channel_prefix[:2] + stid[4]).upper()\n+ header['station'] = stid[:-2]\n+ header['channel'] = (channel_prefix[:2] + stid[-2]).upper()\n header['sampling_rate'] = float(sps)\n header['npts'] = npts\n if headonly:\n", "issue": "libgcf assumes 6 character stream IDs\nThe GCF reader (libgcf.py) assumes the stream ID is 6 characters, and hard-codes access to element 4:\r\n```\r\n header['station'] = stid[:4]\r\n header['channel'] = (channel_prefix[:2] + stid[4]).upper()\r\n```\r\n\r\nThe Stream ID is made up of three parts: first is up to 4 characters for serial number, then one char for channel letter, then one channel for tap number. Rather than assume the channel letter is at index 4, it should be considered the 2nd last...\r\n```\r\n header['station'] = stid[:-2]\r\n header['channel'] = (channel_prefix[:2] + stid[-2]).upper()\r\n```\r\n\r\nThis was causing some valid GCF files to be considered invalid when they had short IDs.\nlibgcf assumes 6 character stream IDs\nThe GCF reader (libgcf.py) assumes the stream ID is 6 characters, and hard-codes access to element 4:\r\n```\r\n header['station'] = stid[:4]\r\n header['channel'] = (channel_prefix[:2] + stid[4]).upper()\r\n```\r\n\r\nThe Stream ID is made up of three parts: first is up to 4 characters for serial number, then one char for channel letter, then one channel for tap number. Rather than assume the channel letter is at index 4, it should be considered the 2nd last...\r\n```\r\n header['station'] = stid[:-2]\r\n header['channel'] = (channel_prefix[:2] + stid[-2]).upper()\r\n```\r\n\r\nThis was causing some valid GCF files to be considered invalid when they had short IDs.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# reads Guralp Compressed Format (GCF) Files\n# By Ran Novitsky Nof @ BSL, 2016\n# [email protected]\n# Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)\n# more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro\n# last access: June, 2016\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport numpy as np\n\nfrom obspy import UTCDateTime\n\nSPS_D = { # Table 3.1: special sample rates\n 157: 0.1,\n 161: 0.125,\n 162: 0.2,\n 164: 0.25,\n 167: 0.5,\n 171: 400,\n 174: 500,\n 176: 1000,\n 179: 2000,\n 181: 4000}\nTIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator\n 171: 8.,\n 174: 2.,\n 176: 4.,\n 179: 8.,\n 181: 16.}\nCOMPRESSION_D = { # Table 3.2: format field to data type\n 1: '>i4',\n 2: '>i2',\n 4: '>i1'}\n\n\ndef is_gcf(f):\n \"\"\"\n Test if file is GCF by reading at least 1 data block\n \"\"\"\n header, data = read_data_block(f)\n\n\ndef decode36(data):\n \"\"\"\n Converts an integer into a base36 string.\n \"\"\"\n # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm\n s = ''\n while data:\n imed = data % 36\n if imed > 9:\n c = chr(imed - 10 + ord('A'))\n else:\n c = chr(imed + ord('0'))\n s = c + s\n data = data // 36\n return s\n\n\ndef decode_date_time(data):\n \"\"\"\n Decode date and time field.\n\n The date code is a 32 bit value specifying the start time of the block.\n Bits 0-16 contain the number of seconds since midnight,\n and bits 17-31 the number of days since 17th November 1989.\n \"\"\"\n # prevent numpy array\n days = int(data >> 17)\n secs = int(data & 0x1FFFF)\n starttime = UTCDateTime('1989-11-17') + days * 86400 + secs\n return starttime\n\n\ndef read_data_block(f, headonly=False, channel_prefix=\"HH\", **kwargs):\n \"\"\"\n Read one data block from GCF file.\n\n more details can be found here:\n http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm\n f - file object to read from\n if skipData is True, Only header is returned.\n if not a data block (SPS=0) - returns None.\n \"\"\"\n # get ID\n sysid = f.read(4)\n if not sysid:\n raise EOFError # got to EOF\n sysid = np.frombuffer(sysid, count=1, dtype='>u4')\n if sysid >> 31 & 0b1 > 0:\n sysid = (sysid << 6) >> 6\n sysid = decode36(sysid)\n # get Stream ID\n stid = np.frombuffer(f.read(4), count=1, dtype='>u4')\n stid = decode36(stid)\n # get Date & Time\n data = np.frombuffer(f.read(4), count=1, dtype='>u4')\n starttime = decode_date_time(data)\n # get data format\n # get reserved, SPS, data type compression,\n # number of 32bit records (num_records)\n reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,\n dtype='>u1')\n compression = compress & 0b00000111 # get compression code\n t_offset = compress >> 4 # get time offset\n if t_offset > 0:\n starttime = starttime + t_offset / TIME_OFFSETS_D[sps]\n if sps in SPS_D:\n sps = SPS_D[sps] # get special SPS value if needed\n if not sps:\n f.seek(num_records * 4, 1) # skip if not a data block\n if 1008 - num_records * 4 > 0:\n # keep skipping to get 1008 record\n f.seek(1008 - num_records * 4, 1)\n return None\n npts = num_records * compression # number of samples\n header = {}\n header['starttime'] = starttime\n header['station'] = stid[:4]\n header['channel'] = (channel_prefix[:2] + stid[4]).upper()\n header['sampling_rate'] = float(sps)\n header['npts'] = npts\n if headonly:\n f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n return header\n else:\n # get FIC\n fic = np.frombuffer(f.read(4), count=1, dtype='>i4')\n # get incremental data\n data = np.frombuffer(f.read(4 * num_records), count=npts,\n dtype=COMPRESSION_D[compression])\n # construct time series\n data = (fic + np.cumsum(data)).astype('i4')\n # get RIC\n ric = np.frombuffer(f.read(4), count=1, dtype='>i4')\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n # verify last data sample matches RIC\n if not data[-1] == ric:\n raise ValueError(\"Last sample mismatch with RIC\")\n return header, data\n\n\ndef read_header(f, **kwargs):\n \"\"\"\n Reads header only from GCF file.\n \"\"\"\n return read_data_block(f, headonly=True, **kwargs)\n\n\ndef read(f, **kwargs):\n \"\"\"\n Reads header and data from GCF file.\n \"\"\"\n return read_data_block(f, headonly=False, **kwargs)\n", "path": "obspy/io/gcf/libgcf.py"}]} | 2,942 | 184 |
gh_patches_debug_4110 | rasdani/github-patches | git_diff | piskvorky__gensim-2629 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
D2VTransformer raises if passed a Pandas series without index key 0
`D2VTransformer` raises if passed a Pandas series with an index that does not contain the key 0:
```python
import pandas as pd
from gensim.sklearn_api import D2VTransformer
from gensim.test.utils import common_texts
series = pd.Series(common_texts)
series.index += 1 # Increment the index so that it does not contain the key 0
transformer = D2VTransformer(min_count=1, size=5)
transformer.fit(series)
```
Output:
```python
Traceback (most recent call last):
File "main.py", line 9, in <module>
transformer.fit(series)
File "venv/lib/python3.7/site-packages/gensim/sklearn_api/d2vmodel.py", line 162, in fit
if isinstance(X[0], doc2vec.TaggedDocument):
File "venv/lib/python3.7/site-packages/pandas/core/series.py", line 868, in __getitem__
result = self.index.get_value(self, key)
File "venv/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 4375, in get_value
tz=getattr(series.dtype, 'tz', None))
File "pandas/_libs/index.pyx", line 81, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 89, in pandas._libs.index.IndexEngine.get_value
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 987, in pandas._libs.hashtable.Int64HashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 993, in pandas._libs.hashtable.Int64HashTable.get_item
KeyError: 0
```
This occurs because the [`fit`](https://github.com/RaRe-Technologies/gensim/blob/4543646d3fe3496e11bc935e72cbf9b18504442e/gensim/sklearn_api/d2vmodel.py#L162) and [`transform`](https://github.com/RaRe-Technologies/gensim/blob/4543646d3fe3496e11bc935e72cbf9b18504442e/gensim/sklearn_api/d2vmodel.py#L198) methods of `D2VTransformer` require `__getitem__` on the passed iterable not to raise an exception for key 0.
Versions:
Darwin-18.6.0-x86_64-i386-64bit
Python 3.7.3 (default, Mar 27 2019, 09:23:15) [Clang 10.0.1 (clang-1001.0.46.3)]
NumPy 1.16.4
SciPy 1.3.0
gensim 3.8.0
FAST_VERSION 1
</issue>
<code>
[start of gensim/sklearn_api/d2vmodel.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2011 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """Scikit learn interface for :class:`~gensim.models.doc2vec.Doc2Vec`.
8
9 Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
10
11 Examples
12 --------
13 .. sourcecode:: pycon
14
15 >>> from gensim.test.utils import common_texts
16 >>> from gensim.sklearn_api import D2VTransformer
17 >>>
18 >>> model = D2VTransformer(min_count=1, size=5)
19 >>> docvecs = model.fit_transform(common_texts) # represent `common_texts` as vectors
20
21 """
22 import numpy as np
23 from six import string_types
24 from sklearn.base import TransformerMixin, BaseEstimator
25 from sklearn.exceptions import NotFittedError
26
27 from gensim import models
28 from gensim.models import doc2vec
29
30
31 class D2VTransformer(TransformerMixin, BaseEstimator):
32 """Base Doc2Vec module, wraps :class:`~gensim.models.doc2vec.Doc2Vec`.
33
34 This model based on `Quoc Le, Tomas Mikolov: "Distributed Representations of Sentences and Documents"
35 <https://cs.stanford.edu/~quocle/paragraph_vector.pdf>`_.
36
37 """
38 def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,
39 docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,
40 max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,
41 hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):
42 """
43
44 Parameters
45 ----------
46
47 dm_mean : int {1,0}, optional
48 If 0, use the sum of the context word vectors. If 1, use the mean. Only applies when `dm_concat=0`.
49 dm : int {1,0}, optional
50 Defines the training algorithm. If `dm=1` - distributed memory (PV-DM) is used.
51 Otherwise, distributed bag of words (PV-DBOW) is employed.
52 dbow_words : int {1,0}, optional
53 If set to 1 - trains word-vectors (in skip-gram fashion) simultaneous with DBOW
54 doc-vector training, If 0, only trains doc-vectors (faster).
55 dm_concat : int {1,0}, optional
56 If 1, use concatenation of context vectors rather than sum/average.
57 Note concatenation results in a much-larger model, as the input is no longer the size of one
58 (sampled or arithmetically combined) word vector, but the size of the tag(s) and all words
59 in the context strung together.
60 dm_tag_count : int, optional
61 Expected constant number of document tags per document, when using dm_concat mode.
62 docvecs : :class:`~gensim.models.keyedvectors.Doc2VecKeyedVectors`
63 A mapping from a string or int tag to its vector representation.
64 Either this or `docvecs_mapfile` **MUST** be supplied.
65 docvecs_mapfile : str, optional
66 Path to a file containing the docvecs mapping. If `docvecs` is None, this file will be used to create it.
67 comment : str, optional
68 A model descriptive comment, used for logging and debugging purposes.
69 trim_rule : function ((str, int, int) -> int), optional
70 Vocabulary trimming rule that accepts (word, count, min_count).
71 Specifies whether certain words should remain in the vocabulary (:attr:`gensim.utils.RULE_KEEP`),
72 be trimmed away (:attr:`gensim.utils.RULE_DISCARD`), or handled using the default
73 (:attr:`gensim.utils.RULE_DEFAULT`).
74 If None, then :func:`gensim.utils.keep_vocab_item` will be used.
75 size : int, optional
76 Dimensionality of the feature vectors.
77 alpha : float, optional
78 The initial learning rate.
79 window : int, optional
80 The maximum distance between the current and predicted word within a sentence.
81 min_count : int, optional
82 Ignores all words with total frequency lower than this.
83 max_vocab_size : int, optional
84 Limits the RAM during vocabulary building; if there are more unique
85 words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
86 Set to `None` for no limit.
87 sample : float, optional
88 The threshold for configuring which higher-frequency words are randomly downsampled,
89 useful range is (0, 1e-5).
90 seed : int, optional
91 Seed for the random number generator. Initial vectors for each word are seeded with a hash of
92 the concatenation of word + `str(seed)`.
93 Note that for a **fully deterministically-reproducible run**, you **must also limit the model to
94 a single worker thread (`workers=1`)**, to eliminate ordering jitter from OS thread scheduling.
95 In Python 3, reproducibility between interpreter launches also requires use of the `PYTHONHASHSEED`
96 environment variable to control hash randomization.
97 workers : int, optional
98 Use this many worker threads to train the model. Will yield a speedup when training with multicore machines.
99 min_alpha : float, optional
100 Learning rate will linearly drop to `min_alpha` as training progresses.
101 hs : int {1,0}, optional
102 If 1, hierarchical softmax will be used for model training. If set to 0, and `negative` is non-zero,
103 negative sampling will be used.
104 negative : int, optional
105 If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
106 should be drawn (usually between 5-20). If set to 0, no negative sampling is used.
107 cbow_mean : int, optional
108 Same as `dm_mean`, **unused**.
109 hashfxn : function (object -> int), optional
110 A hashing function. Used to create an initial random reproducible vector by hashing the random seed.
111 iter : int, optional
112 Number of epochs to iterate through the corpus.
113 sorted_vocab : bool, optional
114 Whether the vocabulary should be sorted internally.
115 batch_words : int, optional
116 Number of words to be handled by each job.
117
118 """
119 self.gensim_model = None
120 self.dm_mean = dm_mean
121 self.dm = dm
122 self.dbow_words = dbow_words
123 self.dm_concat = dm_concat
124 self.dm_tag_count = dm_tag_count
125 self.docvecs = docvecs
126 self.docvecs_mapfile = docvecs_mapfile
127 self.comment = comment
128 self.trim_rule = trim_rule
129
130 # attributes associated with gensim.models.Word2Vec
131 self.size = size
132 self.alpha = alpha
133 self.window = window
134 self.min_count = min_count
135 self.max_vocab_size = max_vocab_size
136 self.sample = sample
137 self.seed = seed
138 self.workers = workers
139 self.min_alpha = min_alpha
140 self.hs = hs
141 self.negative = negative
142 self.cbow_mean = int(cbow_mean)
143 self.hashfxn = hashfxn
144 self.iter = iter
145 self.sorted_vocab = sorted_vocab
146 self.batch_words = batch_words
147
148 def fit(self, X, y=None):
149 """Fit the model according to the given training data.
150
151 Parameters
152 ----------
153 X : {iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, iterable of list of str}
154 A collection of tagged documents used for training the model.
155
156 Returns
157 -------
158 :class:`~gensim.sklearn_api.d2vmodel.D2VTransformer`
159 The trained model.
160
161 """
162 if isinstance(X[0], doc2vec.TaggedDocument):
163 d2v_sentences = X
164 else:
165 d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]
166 self.gensim_model = models.Doc2Vec(
167 documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,
168 dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
169 docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,
170 trim_rule=self.trim_rule, vector_size=self.size, alpha=self.alpha, window=self.window,
171 min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,
172 seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,
173 negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,
174 epochs=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
175 )
176 return self
177
178 def transform(self, docs):
179 """Infer the vector representations for the input documents.
180
181 Parameters
182 ----------
183 docs : {iterable of list of str, list of str}
184 Input document or sequence of documents.
185
186 Returns
187 -------
188 numpy.ndarray of shape [`len(docs)`, `size`]
189 The vector representation of the `docs`.
190
191 """
192 if self.gensim_model is None:
193 raise NotFittedError(
194 "This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
195 )
196
197 # The input as array of array
198 if isinstance(docs[0], string_types):
199 docs = [docs]
200 vectors = [self.gensim_model.infer_vector(doc) for doc in docs]
201 return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))
202
[end of gensim/sklearn_api/d2vmodel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gensim/sklearn_api/d2vmodel.py b/gensim/sklearn_api/d2vmodel.py
--- a/gensim/sklearn_api/d2vmodel.py
+++ b/gensim/sklearn_api/d2vmodel.py
@@ -159,7 +159,7 @@
The trained model.
"""
- if isinstance(X[0], doc2vec.TaggedDocument):
+ if isinstance([i for i in X[:1]][0], doc2vec.TaggedDocument):
d2v_sentences = X
else:
d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]
| {"golden_diff": "diff --git a/gensim/sklearn_api/d2vmodel.py b/gensim/sklearn_api/d2vmodel.py\n--- a/gensim/sklearn_api/d2vmodel.py\n+++ b/gensim/sklearn_api/d2vmodel.py\n@@ -159,7 +159,7 @@\n The trained model.\n \n \"\"\"\n- if isinstance(X[0], doc2vec.TaggedDocument):\n+ if isinstance([i for i in X[:1]][0], doc2vec.TaggedDocument):\n d2v_sentences = X\n else:\n d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]\n", "issue": "D2VTransformer raises if passed a Pandas series without index key 0\n`D2VTransformer` raises if passed a Pandas series with an index that does not contain the key 0:\r\n\r\n```python\r\nimport pandas as pd\r\nfrom gensim.sklearn_api import D2VTransformer\r\nfrom gensim.test.utils import common_texts\r\n\r\nseries = pd.Series(common_texts)\r\nseries.index += 1 # Increment the index so that it does not contain the key 0\r\n\r\ntransformer = D2VTransformer(min_count=1, size=5)\r\ntransformer.fit(series)\r\n```\r\n\r\nOutput:\r\n\r\n```python\r\n\ufeffTraceback (most recent call last):\r\n File \"main.py\", line 9, in <module>\r\n transformer.fit(series)\r\n File \"venv/lib/python3.7/site-packages/gensim/sklearn_api/d2vmodel.py\", line 162, in fit\r\n if isinstance(X[0], doc2vec.TaggedDocument):\r\n File \"venv/lib/python3.7/site-packages/pandas/core/series.py\", line 868, in __getitem__\r\n result = self.index.get_value(self, key)\r\n File \"venv/lib/python3.7/site-packages/pandas/core/indexes/base.py\", line 4375, in get_value\r\n tz=getattr(series.dtype, 'tz', None))\r\n File \"pandas/_libs/index.pyx\", line 81, in pandas._libs.index.IndexEngine.get_value\r\n File \"pandas/_libs/index.pyx\", line 89, in pandas._libs.index.IndexEngine.get_value\r\n File \"pandas/_libs/index.pyx\", line 132, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 987, in pandas._libs.hashtable.Int64HashTable.get_item\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 993, in pandas._libs.hashtable.Int64HashTable.get_item\r\nKeyError: 0\r\n```\r\n\r\nThis occurs because the [`fit`](https://github.com/RaRe-Technologies/gensim/blob/4543646d3fe3496e11bc935e72cbf9b18504442e/gensim/sklearn_api/d2vmodel.py#L162) and [`transform`](https://github.com/RaRe-Technologies/gensim/blob/4543646d3fe3496e11bc935e72cbf9b18504442e/gensim/sklearn_api/d2vmodel.py#L198) methods of `D2VTransformer` require `__getitem__` on the passed iterable not to raise an exception for key 0.\r\n\r\nVersions:\r\n\r\nDarwin-18.6.0-x86_64-i386-64bit\r\nPython 3.7.3 (default, Mar 27 2019, 09:23:15) [Clang 10.0.1 (clang-1001.0.46.3)]\r\nNumPy 1.16.4\r\nSciPy 1.3.0\r\ngensim 3.8.0\r\nFAST_VERSION 1\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2011 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"Scikit learn interface for :class:`~gensim.models.doc2vec.Doc2Vec`.\n\nFollows scikit-learn API conventions to facilitate using gensim along with scikit-learn.\n\nExamples\n--------\n.. sourcecode:: pycon\n\n >>> from gensim.test.utils import common_texts\n >>> from gensim.sklearn_api import D2VTransformer\n >>>\n >>> model = D2VTransformer(min_count=1, size=5)\n >>> docvecs = model.fit_transform(common_texts) # represent `common_texts` as vectors\n\n\"\"\"\nimport numpy as np\nfrom six import string_types\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.exceptions import NotFittedError\n\nfrom gensim import models\nfrom gensim.models import doc2vec\n\n\nclass D2VTransformer(TransformerMixin, BaseEstimator):\n \"\"\"Base Doc2Vec module, wraps :class:`~gensim.models.doc2vec.Doc2Vec`.\n\n This model based on `Quoc Le, Tomas Mikolov: \"Distributed Representations of Sentences and Documents\"\n <https://cs.stanford.edu/~quocle/paragraph_vector.pdf>`_.\n\n \"\"\"\n def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,\n docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,\n max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,\n hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):\n \"\"\"\n\n Parameters\n ----------\n\n dm_mean : int {1,0}, optional\n If 0, use the sum of the context word vectors. If 1, use the mean. Only applies when `dm_concat=0`.\n dm : int {1,0}, optional\n Defines the training algorithm. If `dm=1` - distributed memory (PV-DM) is used.\n Otherwise, distributed bag of words (PV-DBOW) is employed.\n dbow_words : int {1,0}, optional\n If set to 1 - trains word-vectors (in skip-gram fashion) simultaneous with DBOW\n doc-vector training, If 0, only trains doc-vectors (faster).\n dm_concat : int {1,0}, optional\n If 1, use concatenation of context vectors rather than sum/average.\n Note concatenation results in a much-larger model, as the input is no longer the size of one\n (sampled or arithmetically combined) word vector, but the size of the tag(s) and all words\n in the context strung together.\n dm_tag_count : int, optional\n Expected constant number of document tags per document, when using dm_concat mode.\n docvecs : :class:`~gensim.models.keyedvectors.Doc2VecKeyedVectors`\n A mapping from a string or int tag to its vector representation.\n Either this or `docvecs_mapfile` **MUST** be supplied.\n docvecs_mapfile : str, optional\n Path to a file containing the docvecs mapping. If `docvecs` is None, this file will be used to create it.\n comment : str, optional\n A model descriptive comment, used for logging and debugging purposes.\n trim_rule : function ((str, int, int) -> int), optional\n Vocabulary trimming rule that accepts (word, count, min_count).\n Specifies whether certain words should remain in the vocabulary (:attr:`gensim.utils.RULE_KEEP`),\n be trimmed away (:attr:`gensim.utils.RULE_DISCARD`), or handled using the default\n (:attr:`gensim.utils.RULE_DEFAULT`).\n If None, then :func:`gensim.utils.keep_vocab_item` will be used.\n size : int, optional\n Dimensionality of the feature vectors.\n alpha : float, optional\n The initial learning rate.\n window : int, optional\n The maximum distance between the current and predicted word within a sentence.\n min_count : int, optional\n Ignores all words with total frequency lower than this.\n max_vocab_size : int, optional\n Limits the RAM during vocabulary building; if there are more unique\n words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.\n Set to `None` for no limit.\n sample : float, optional\n The threshold for configuring which higher-frequency words are randomly downsampled,\n useful range is (0, 1e-5).\n seed : int, optional\n Seed for the random number generator. Initial vectors for each word are seeded with a hash of\n the concatenation of word + `str(seed)`.\n Note that for a **fully deterministically-reproducible run**, you **must also limit the model to\n a single worker thread (`workers=1`)**, to eliminate ordering jitter from OS thread scheduling.\n In Python 3, reproducibility between interpreter launches also requires use of the `PYTHONHASHSEED`\n environment variable to control hash randomization.\n workers : int, optional\n Use this many worker threads to train the model. Will yield a speedup when training with multicore machines.\n min_alpha : float, optional\n Learning rate will linearly drop to `min_alpha` as training progresses.\n hs : int {1,0}, optional\n If 1, hierarchical softmax will be used for model training. If set to 0, and `negative` is non-zero,\n negative sampling will be used.\n negative : int, optional\n If > 0, negative sampling will be used, the int for negative specifies how many \"noise words\"\n should be drawn (usually between 5-20). If set to 0, no negative sampling is used.\n cbow_mean : int, optional\n Same as `dm_mean`, **unused**.\n hashfxn : function (object -> int), optional\n A hashing function. Used to create an initial random reproducible vector by hashing the random seed.\n iter : int, optional\n Number of epochs to iterate through the corpus.\n sorted_vocab : bool, optional\n Whether the vocabulary should be sorted internally.\n batch_words : int, optional\n Number of words to be handled by each job.\n\n \"\"\"\n self.gensim_model = None\n self.dm_mean = dm_mean\n self.dm = dm\n self.dbow_words = dbow_words\n self.dm_concat = dm_concat\n self.dm_tag_count = dm_tag_count\n self.docvecs = docvecs\n self.docvecs_mapfile = docvecs_mapfile\n self.comment = comment\n self.trim_rule = trim_rule\n\n # attributes associated with gensim.models.Word2Vec\n self.size = size\n self.alpha = alpha\n self.window = window\n self.min_count = min_count\n self.max_vocab_size = max_vocab_size\n self.sample = sample\n self.seed = seed\n self.workers = workers\n self.min_alpha = min_alpha\n self.hs = hs\n self.negative = negative\n self.cbow_mean = int(cbow_mean)\n self.hashfxn = hashfxn\n self.iter = iter\n self.sorted_vocab = sorted_vocab\n self.batch_words = batch_words\n\n def fit(self, X, y=None):\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, iterable of list of str}\n A collection of tagged documents used for training the model.\n\n Returns\n -------\n :class:`~gensim.sklearn_api.d2vmodel.D2VTransformer`\n The trained model.\n\n \"\"\"\n if isinstance(X[0], doc2vec.TaggedDocument):\n d2v_sentences = X\n else:\n d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]\n self.gensim_model = models.Doc2Vec(\n documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,\n dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,\n docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,\n trim_rule=self.trim_rule, vector_size=self.size, alpha=self.alpha, window=self.window,\n min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,\n seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,\n negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,\n epochs=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words\n )\n return self\n\n def transform(self, docs):\n \"\"\"Infer the vector representations for the input documents.\n\n Parameters\n ----------\n docs : {iterable of list of str, list of str}\n Input document or sequence of documents.\n\n Returns\n -------\n numpy.ndarray of shape [`len(docs)`, `size`]\n The vector representation of the `docs`.\n\n \"\"\"\n if self.gensim_model is None:\n raise NotFittedError(\n \"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.\"\n )\n\n # The input as array of array\n if isinstance(docs[0], string_types):\n docs = [docs]\n vectors = [self.gensim_model.infer_vector(doc) for doc in docs]\n return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))\n", "path": "gensim/sklearn_api/d2vmodel.py"}]} | 3,996 | 156 |
gh_patches_debug_33436 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-281 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dd-trace-py messes with the root logger
```
has anyone else run into the issue of ddtrace-py turning on all default loggers for everything when running `patch_all()`? Weirdly, I can only replicate it within Docker, but it's definitely the `patch_all()` command that's causing it
[8:50 PM]
same thing happens if i run a single `patch()` on any library, it seems
[8:52 PM]
thinking it might be caused by this line: https://github.com/DataDog/dd-trace-py/blob/a50b5f5422716fae1c54b589cd448dc295b32757/ddtrace/monkey.py#L77
[8:53 PM]
any reason that's `logging.info(...)` on the `logging` module instead of getting a logger and calling `.info()` on that?
```
</issue>
<code>
[start of ddtrace/contrib/mysql/__init__.py]
1 """Instrumeent mysql to report MySQL queries.
2
3 ``patch_all`` will automatically patch your mysql connection to make it work.
4 ::
5
6 from ddtrace import Pin, patch
7 from mysql.connector import connect
8
9 # If not patched yet, you can patch mysql specifically
10 patch(mysql=True)
11
12 # This will report a span with the default settings
13 conn = connect(user="alice", password="b0b", host="localhost", port=3306, database="test")
14 cursor = conn.cursor()
15 cursor.execute("SELECT 6*7 AS the_answer;")
16
17 # Use a pin to specify metadata related to this connection
18 Pin.override(conn, service='mysql-users')
19
20 This package works for mysql.connector version 2.1.x.
21 Only the default full-Python integration works. The binary C connector,
22 provided by _mysql_connector, is not supported yet.
23
24 Help on mysql.connector can be found on:
25 https://dev.mysql.com/doc/connector-python/en/
26 """
27 import logging
28
29 from ..util import require_modules
30
31 # check `MySQL-python` availability
32 required_modules = ['_mysql']
33
34 with require_modules(required_modules) as missing_modules:
35 if not missing_modules:
36 # MySQL-python package is not supported at the moment
37 logging.debug('failed to patch mysql-python: integration not available')
38
39 # check `mysql-connector` availability
40 required_modules = ['mysql.connector']
41
42 with require_modules(required_modules) as missing_modules:
43 if not missing_modules:
44 from .patch import patch
45 from .tracers import get_traced_mysql_connection
46
47 __all__ = ['get_traced_mysql_connection', 'patch']
48
[end of ddtrace/contrib/mysql/__init__.py]
[start of ddtrace/monkey.py]
1 """Patch librairies to be automatically instrumented.
2
3 It can monkey patch supported standard libraries and third party modules.
4 A patched module will automatically report spans with its default configuration.
5
6 A library instrumentation can be configured (for instance, to report as another service)
7 using Pin. For that, check its documentation.
8 """
9 import logging
10 import importlib
11 import threading
12
13
14 # Default set of modules to automatically patch or not
15 PATCH_MODULES = {
16 'boto': False,
17 'botocore': False,
18 'bottle': False,
19 'cassandra': True,
20 'celery': True,
21 'elasticsearch': True,
22 'mongoengine': True,
23 'mysql': True,
24 'psycopg': True,
25 'pylibmc': True,
26 'pymongo': True,
27 'redis': True,
28 'requests': False, # Not ready yet
29 'sqlalchemy': False, # Prefer DB client instrumentation
30 'sqlite3': True,
31 'aiohttp': True, # requires asyncio (Python 3.4+)
32
33 # Ignore some web framework integrations that might be configured explicitly in code
34 "django": False,
35 "flask": False,
36 "falcon": False,
37 "pylons": False,
38 "pyramid": False,
39 }
40
41 _LOCK = threading.Lock()
42 _PATCHED_MODULES = set()
43
44
45 class PatchException(Exception):
46 """Wraps regular `Exception` class when patching modules"""
47 pass
48
49
50 def patch_all(**patch_modules):
51 """Automatically patches all available modules.
52
53 :param dict \**patch_modules: Override whether particular modules are patched or not.
54
55 >>> patch_all({'redis': False, 'cassandra': False})
56 """
57 modules = PATCH_MODULES.copy()
58 modules.update(patch_modules)
59
60 patch(raise_errors=False, **modules)
61
62 def patch(raise_errors=True, **patch_modules):
63 """Patch only a set of given modules.
64
65 :param bool raise_errors: Raise error if one patch fail.
66 :param dict \**patch_modules: List of modules to patch.
67
68 >>> patch({'psycopg': True, 'elasticsearch': True})
69 """
70 modules = [m for (m, should_patch) in patch_modules.items() if should_patch]
71 count = 0
72 for module in modules:
73 patched = patch_module(module, raise_errors=raise_errors)
74 if patched:
75 count += 1
76
77 logging.info("patched %s/%s modules (%s)",
78 count,
79 len(modules),
80 ",".join(get_patched_modules()))
81
82
83 def patch_module(module, raise_errors=True):
84 """Patch a single module
85
86 Returns if the module got properly patched.
87 """
88 try:
89 return _patch_module(module)
90 except Exception as exc:
91 if raise_errors:
92 raise
93 logging.debug("failed to patch %s: %s", module, exc)
94 return False
95
96 def get_patched_modules():
97 """Get the list of patched modules"""
98 with _LOCK:
99 return sorted(_PATCHED_MODULES)
100
101 def _patch_module(module):
102 """_patch_module will attempt to monkey patch the module.
103
104 Returns if the module got patched.
105 Can also raise errors if it fails.
106 """
107 path = 'ddtrace.contrib.%s' % module
108 with _LOCK:
109 if module in _PATCHED_MODULES:
110 logging.debug("already patched: %s", path)
111 return False
112
113 try:
114 imported_module = importlib.import_module(path)
115 imported_module.patch()
116 except ImportError:
117 # if the import fails, the integration is not available
118 raise PatchException('integration not available')
119 except AttributeError:
120 # if patch() is not available in the module, it means
121 # that the library is not installed in the environment
122 raise PatchException('module not installed')
123
124 _PATCHED_MODULES.add(module)
125 return True
126
[end of ddtrace/monkey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/mysql/__init__.py b/ddtrace/contrib/mysql/__init__.py
--- a/ddtrace/contrib/mysql/__init__.py
+++ b/ddtrace/contrib/mysql/__init__.py
@@ -28,13 +28,16 @@
from ..util import require_modules
+
+log = logging.getLogger(__name__)
+
# check `MySQL-python` availability
required_modules = ['_mysql']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
# MySQL-python package is not supported at the moment
- logging.debug('failed to patch mysql-python: integration not available')
+ log.debug('failed to patch mysql-python: integration not available')
# check `mysql-connector` availability
required_modules = ['mysql.connector']
diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py
--- a/ddtrace/monkey.py
+++ b/ddtrace/monkey.py
@@ -11,6 +11,8 @@
import threading
+log = logging.getLogger(__name__)
+
# Default set of modules to automatically patch or not
PATCH_MODULES = {
'boto': False,
@@ -74,7 +76,7 @@
if patched:
count += 1
- logging.info("patched %s/%s modules (%s)",
+ log.info("patched %s/%s modules (%s)",
count,
len(modules),
",".join(get_patched_modules()))
@@ -90,7 +92,7 @@
except Exception as exc:
if raise_errors:
raise
- logging.debug("failed to patch %s: %s", module, exc)
+ log.debug("failed to patch %s: %s", module, exc)
return False
def get_patched_modules():
@@ -107,7 +109,7 @@
path = 'ddtrace.contrib.%s' % module
with _LOCK:
if module in _PATCHED_MODULES:
- logging.debug("already patched: %s", path)
+ log.debug("already patched: %s", path)
return False
try:
| {"golden_diff": "diff --git a/ddtrace/contrib/mysql/__init__.py b/ddtrace/contrib/mysql/__init__.py\n--- a/ddtrace/contrib/mysql/__init__.py\n+++ b/ddtrace/contrib/mysql/__init__.py\n@@ -28,13 +28,16 @@\n \n from ..util import require_modules\n \n+\n+log = logging.getLogger(__name__)\n+\n # check `MySQL-python` availability\n required_modules = ['_mysql']\n \n with require_modules(required_modules) as missing_modules:\n if not missing_modules:\n # MySQL-python package is not supported at the moment\n- logging.debug('failed to patch mysql-python: integration not available')\n+ log.debug('failed to patch mysql-python: integration not available')\n \n # check `mysql-connector` availability\n required_modules = ['mysql.connector']\ndiff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -11,6 +11,8 @@\n import threading\n \n \n+log = logging.getLogger(__name__)\n+\n # Default set of modules to automatically patch or not\n PATCH_MODULES = {\n 'boto': False,\n@@ -74,7 +76,7 @@\n if patched:\n count += 1\n \n- logging.info(\"patched %s/%s modules (%s)\",\n+ log.info(\"patched %s/%s modules (%s)\",\n count,\n len(modules),\n \",\".join(get_patched_modules()))\n@@ -90,7 +92,7 @@\n except Exception as exc:\n if raise_errors:\n raise\n- logging.debug(\"failed to patch %s: %s\", module, exc)\n+ log.debug(\"failed to patch %s: %s\", module, exc)\n return False\n \n def get_patched_modules():\n@@ -107,7 +109,7 @@\n path = 'ddtrace.contrib.%s' % module\n with _LOCK:\n if module in _PATCHED_MODULES:\n- logging.debug(\"already patched: %s\", path)\n+ log.debug(\"already patched: %s\", path)\n return False\n \n try:\n", "issue": "dd-trace-py messes with the root logger\n```\r\nhas anyone else run into the issue of ddtrace-py turning on all default loggers for everything when running `patch_all()`? Weirdly, I can only replicate it within Docker, but it's definitely the `patch_all()` command that's causing it\r\n\r\n[8:50 PM] \r\nsame thing happens if i run a single `patch()` on any library, it seems\r\n\r\n[8:52 PM] \r\nthinking it might be caused by this line: https://github.com/DataDog/dd-trace-py/blob/a50b5f5422716fae1c54b589cd448dc295b32757/ddtrace/monkey.py#L77\r\n\r\n[8:53 PM] \r\nany reason that's `logging.info(...)` on the `logging` module instead of getting a logger and calling `.info()` on that?\r\n```\n", "before_files": [{"content": "\"\"\"Instrumeent mysql to report MySQL queries.\n\n``patch_all`` will automatically patch your mysql connection to make it work.\n::\n\n from ddtrace import Pin, patch\n from mysql.connector import connect\n\n # If not patched yet, you can patch mysql specifically\n patch(mysql=True)\n\n # This will report a span with the default settings\n conn = connect(user=\"alice\", password=\"b0b\", host=\"localhost\", port=3306, database=\"test\")\n cursor = conn.cursor()\n cursor.execute(\"SELECT 6*7 AS the_answer;\")\n\n # Use a pin to specify metadata related to this connection\n Pin.override(conn, service='mysql-users')\n\nThis package works for mysql.connector version 2.1.x.\nOnly the default full-Python integration works. The binary C connector,\nprovided by _mysql_connector, is not supported yet.\n\nHelp on mysql.connector can be found on:\nhttps://dev.mysql.com/doc/connector-python/en/\n\"\"\"\nimport logging\n\nfrom ..util import require_modules\n\n# check `MySQL-python` availability\nrequired_modules = ['_mysql']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n # MySQL-python package is not supported at the moment\n logging.debug('failed to patch mysql-python: integration not available')\n\n# check `mysql-connector` availability\nrequired_modules = ['mysql.connector']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .tracers import get_traced_mysql_connection\n\n __all__ = ['get_traced_mysql_connection', 'patch']\n", "path": "ddtrace/contrib/mysql/__init__.py"}, {"content": "\"\"\"Patch librairies to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport logging\nimport importlib\nimport threading\n\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n 'boto': False,\n 'botocore': False,\n 'bottle': False,\n 'cassandra': True,\n 'celery': True,\n 'elasticsearch': True,\n 'mongoengine': True,\n 'mysql': True,\n 'psycopg': True,\n 'pylibmc': True,\n 'pymongo': True,\n 'redis': True,\n 'requests': False, # Not ready yet\n 'sqlalchemy': False, # Prefer DB client instrumentation\n 'sqlite3': True,\n 'aiohttp': True, # requires asyncio (Python 3.4+)\n\n # Ignore some web framework integrations that might be configured explicitly in code\n \"django\": False,\n \"flask\": False,\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n pass\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n :param dict \\**patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all({'redis': False, 'cassandra': False})\n \"\"\"\n modules = PATCH_MODULES.copy()\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict \\**patch_modules: List of modules to patch.\n\n >>> patch({'psycopg': True, 'elasticsearch': True})\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n count = 0\n for module in modules:\n patched = patch_module(module, raise_errors=raise_errors)\n if patched:\n count += 1\n\n logging.info(\"patched %s/%s modules (%s)\",\n count,\n len(modules),\n \",\".join(get_patched_modules()))\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except Exception as exc:\n if raise_errors:\n raise\n logging.debug(\"failed to patch %s: %s\", module, exc)\n return False\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = 'ddtrace.contrib.%s' % module\n with _LOCK:\n if module in _PATCHED_MODULES:\n logging.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n imported_module.patch()\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException('integration not available')\n except AttributeError:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n raise PatchException('module not installed')\n\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]} | 2,315 | 465 |
gh_patches_debug_433 | rasdani/github-patches | git_diff | kornia__kornia-2476 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Next release?
## 🚀 Feature
Hi, when will the next kornia release on conda or pypi be?
## Motivation
Last conda release was in April, and new features have landed since then, but are unavailable in wheels.
</issue>
<code>
[start of kornia/__init__.py]
1 # NOTE: kornia filters and geometry must go first since are the core of the library
2 # and by changing the import order you might get into a circular dependencies issue.
3 from . import filters
4 from . import geometry
5 from . import grad_estimator
6
7 # import the other modules for convenience
8 from . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x
9
10 # NOTE: we are going to expose to top level very few things
11 from kornia.constants import pi
12 from kornia.testing import xla_is_available
13 from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image
14
15 # Version variable
16 __version__ = "0.6.13-dev"
17
[end of kornia/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/__init__.py b/kornia/__init__.py
--- a/kornia/__init__.py
+++ b/kornia/__init__.py
@@ -13,4 +13,4 @@
from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image
# Version variable
-__version__ = "0.6.13-dev"
+__version__ = "0.7.0"
| {"golden_diff": "diff --git a/kornia/__init__.py b/kornia/__init__.py\n--- a/kornia/__init__.py\n+++ b/kornia/__init__.py\n@@ -13,4 +13,4 @@\n from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n \n # Version variable\n-__version__ = \"0.6.13-dev\"\n+__version__ = \"0.7.0\"\n", "issue": "Next release?\n## \ud83d\ude80 Feature\r\nHi, when will the next kornia release on conda or pypi be?\r\n\r\n## Motivation\r\n\r\nLast conda release was in April, and new features have landed since then, but are unavailable in wheels.\n", "before_files": [{"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.6.13-dev\"\n", "path": "kornia/__init__.py"}]} | 785 | 106 |
gh_patches_debug_9617 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-2195 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add alias argument in query_counter
The context_managers.query_counter constructor currently does not accept any arguments.
In my case, I define custom aliases and do not use the default one. When instantiating a query_counter, it calls get_db with no arguments which means that it tries to get the default alias, and throws the following exception:
`mongoengine.connection.MongoEngineConnectionError: You have not defined a default connection`
This problem is easily solved by adding an "alias" parameter in the constructor for query_counter. I can push a PR if you are interested.
Thanks !
</issue>
<code>
[start of mongoengine/context_managers.py]
1 from contextlib import contextmanager
2
3 from pymongo.write_concern import WriteConcern
4 from six import iteritems
5
6 from mongoengine.common import _import_class
7 from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
8 from mongoengine.pymongo_support import count_documents
9
10 __all__ = (
11 "switch_db",
12 "switch_collection",
13 "no_dereference",
14 "no_sub_classes",
15 "query_counter",
16 "set_write_concern",
17 )
18
19
20 class switch_db(object):
21 """switch_db alias context manager.
22
23 Example ::
24
25 # Register connections
26 register_connection('default', 'mongoenginetest')
27 register_connection('testdb-1', 'mongoenginetest2')
28
29 class Group(Document):
30 name = StringField()
31
32 Group(name='test').save() # Saves in the default db
33
34 with switch_db(Group, 'testdb-1') as Group:
35 Group(name='hello testdb!').save() # Saves in testdb-1
36 """
37
38 def __init__(self, cls, db_alias):
39 """Construct the switch_db context manager
40
41 :param cls: the class to change the registered db
42 :param db_alias: the name of the specific database to use
43 """
44 self.cls = cls
45 self.collection = cls._get_collection()
46 self.db_alias = db_alias
47 self.ori_db_alias = cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME)
48
49 def __enter__(self):
50 """Change the db_alias and clear the cached collection."""
51 self.cls._meta["db_alias"] = self.db_alias
52 self.cls._collection = None
53 return self.cls
54
55 def __exit__(self, t, value, traceback):
56 """Reset the db_alias and collection."""
57 self.cls._meta["db_alias"] = self.ori_db_alias
58 self.cls._collection = self.collection
59
60
61 class switch_collection(object):
62 """switch_collection alias context manager.
63
64 Example ::
65
66 class Group(Document):
67 name = StringField()
68
69 Group(name='test').save() # Saves in the default db
70
71 with switch_collection(Group, 'group1') as Group:
72 Group(name='hello testdb!').save() # Saves in group1 collection
73 """
74
75 def __init__(self, cls, collection_name):
76 """Construct the switch_collection context manager.
77
78 :param cls: the class to change the registered db
79 :param collection_name: the name of the collection to use
80 """
81 self.cls = cls
82 self.ori_collection = cls._get_collection()
83 self.ori_get_collection_name = cls._get_collection_name
84 self.collection_name = collection_name
85
86 def __enter__(self):
87 """Change the _get_collection_name and clear the cached collection."""
88
89 @classmethod
90 def _get_collection_name(cls):
91 return self.collection_name
92
93 self.cls._get_collection_name = _get_collection_name
94 self.cls._collection = None
95 return self.cls
96
97 def __exit__(self, t, value, traceback):
98 """Reset the collection."""
99 self.cls._collection = self.ori_collection
100 self.cls._get_collection_name = self.ori_get_collection_name
101
102
103 class no_dereference(object):
104 """no_dereference context manager.
105
106 Turns off all dereferencing in Documents for the duration of the context
107 manager::
108
109 with no_dereference(Group) as Group:
110 Group.objects.find()
111 """
112
113 def __init__(self, cls):
114 """Construct the no_dereference context manager.
115
116 :param cls: the class to turn dereferencing off on
117 """
118 self.cls = cls
119
120 ReferenceField = _import_class("ReferenceField")
121 GenericReferenceField = _import_class("GenericReferenceField")
122 ComplexBaseField = _import_class("ComplexBaseField")
123
124 self.deref_fields = [
125 k
126 for k, v in iteritems(self.cls._fields)
127 if isinstance(v, (ReferenceField, GenericReferenceField, ComplexBaseField))
128 ]
129
130 def __enter__(self):
131 """Change the objects default and _auto_dereference values."""
132 for field in self.deref_fields:
133 self.cls._fields[field]._auto_dereference = False
134 return self.cls
135
136 def __exit__(self, t, value, traceback):
137 """Reset the default and _auto_dereference values."""
138 for field in self.deref_fields:
139 self.cls._fields[field]._auto_dereference = True
140 return self.cls
141
142
143 class no_sub_classes(object):
144 """no_sub_classes context manager.
145
146 Only returns instances of this class and no sub (inherited) classes::
147
148 with no_sub_classes(Group) as Group:
149 Group.objects.find()
150 """
151
152 def __init__(self, cls):
153 """Construct the no_sub_classes context manager.
154
155 :param cls: the class to turn querying sub classes on
156 """
157 self.cls = cls
158 self.cls_initial_subclasses = None
159
160 def __enter__(self):
161 """Change the objects default and _auto_dereference values."""
162 self.cls_initial_subclasses = self.cls._subclasses
163 self.cls._subclasses = (self.cls._class_name,)
164 return self.cls
165
166 def __exit__(self, t, value, traceback):
167 """Reset the default and _auto_dereference values."""
168 self.cls._subclasses = self.cls_initial_subclasses
169
170
171 class query_counter(object):
172 """Query_counter context manager to get the number of queries.
173 This works by updating the `profiling_level` of the database so that all queries get logged,
174 resetting the db.system.profile collection at the beginnig of the context and counting the new entries.
175
176 This was designed for debugging purpose. In fact it is a global counter so queries issued by other threads/processes
177 can interfere with it
178
179 Be aware that:
180 - Iterating over large amount of documents (>101) makes pymongo issue `getmore` queries to fetch the next batch of
181 documents (https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches)
182 - Some queries are ignored by default by the counter (killcursors, db.system.indexes)
183 """
184
185 def __init__(self):
186 """Construct the query_counter
187 """
188 self.db = get_db()
189 self.initial_profiling_level = None
190 self._ctx_query_counter = 0 # number of queries issued by the context
191
192 self._ignored_query = {
193 "ns": {"$ne": "%s.system.indexes" % self.db.name},
194 "op": {"$ne": "killcursors"}, # MONGODB < 3.2
195 "command.killCursors": {"$exists": False}, # MONGODB >= 3.2
196 }
197
198 def _turn_on_profiling(self):
199 self.initial_profiling_level = self.db.profiling_level()
200 self.db.set_profiling_level(0)
201 self.db.system.profile.drop()
202 self.db.set_profiling_level(2)
203
204 def _resets_profiling(self):
205 self.db.set_profiling_level(self.initial_profiling_level)
206
207 def __enter__(self):
208 self._turn_on_profiling()
209 return self
210
211 def __exit__(self, t, value, traceback):
212 self._resets_profiling()
213
214 def __eq__(self, value):
215 counter = self._get_count()
216 return value == counter
217
218 def __ne__(self, value):
219 return not self.__eq__(value)
220
221 def __lt__(self, value):
222 return self._get_count() < value
223
224 def __le__(self, value):
225 return self._get_count() <= value
226
227 def __gt__(self, value):
228 return self._get_count() > value
229
230 def __ge__(self, value):
231 return self._get_count() >= value
232
233 def __int__(self):
234 return self._get_count()
235
236 def __repr__(self):
237 """repr query_counter as the number of queries."""
238 return u"%s" % self._get_count()
239
240 def _get_count(self):
241 """Get the number of queries by counting the current number of entries in db.system.profile
242 and substracting the queries issued by this context. In fact everytime this is called, 1 query is
243 issued so we need to balance that
244 """
245 count = (
246 count_documents(self.db.system.profile, self._ignored_query)
247 - self._ctx_query_counter
248 )
249 self._ctx_query_counter += (
250 1 # Account for the query we just issued to gather the information
251 )
252 return count
253
254
255 @contextmanager
256 def set_write_concern(collection, write_concerns):
257 combined_concerns = dict(collection.write_concern.document.items())
258 combined_concerns.update(write_concerns)
259 yield collection.with_options(write_concern=WriteConcern(**combined_concerns))
260
[end of mongoengine/context_managers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mongoengine/context_managers.py b/mongoengine/context_managers.py
--- a/mongoengine/context_managers.py
+++ b/mongoengine/context_managers.py
@@ -182,10 +182,10 @@
- Some queries are ignored by default by the counter (killcursors, db.system.indexes)
"""
- def __init__(self):
+ def __init__(self, alias=DEFAULT_CONNECTION_NAME):
"""Construct the query_counter
"""
- self.db = get_db()
+ self.db = get_db(alias=alias)
self.initial_profiling_level = None
self._ctx_query_counter = 0 # number of queries issued by the context
| {"golden_diff": "diff --git a/mongoengine/context_managers.py b/mongoengine/context_managers.py\n--- a/mongoengine/context_managers.py\n+++ b/mongoengine/context_managers.py\n@@ -182,10 +182,10 @@\n - Some queries are ignored by default by the counter (killcursors, db.system.indexes)\n \"\"\"\n \n- def __init__(self):\n+ def __init__(self, alias=DEFAULT_CONNECTION_NAME):\n \"\"\"Construct the query_counter\n \"\"\"\n- self.db = get_db()\n+ self.db = get_db(alias=alias)\n self.initial_profiling_level = None\n self._ctx_query_counter = 0 # number of queries issued by the context\n", "issue": "Add alias argument in query_counter\nThe context_managers.query_counter constructor currently does not accept any arguments.\r\n\r\nIn my case, I define custom aliases and do not use the default one. When instantiating a query_counter, it calls get_db with no arguments which means that it tries to get the default alias, and throws the following exception:\r\n`mongoengine.connection.MongoEngineConnectionError: You have not defined a default connection`\r\n\r\nThis problem is easily solved by adding an \"alias\" parameter in the constructor for query_counter. I can push a PR if you are interested.\r\n\r\nThanks !\n", "before_files": [{"content": "from contextlib import contextmanager\n\nfrom pymongo.write_concern import WriteConcern\nfrom six import iteritems\n\nfrom mongoengine.common import _import_class\nfrom mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db\nfrom mongoengine.pymongo_support import count_documents\n\n__all__ = (\n \"switch_db\",\n \"switch_collection\",\n \"no_dereference\",\n \"no_sub_classes\",\n \"query_counter\",\n \"set_write_concern\",\n)\n\n\nclass switch_db(object):\n \"\"\"switch_db alias context manager.\n\n Example ::\n\n # Register connections\n register_connection('default', 'mongoenginetest')\n register_connection('testdb-1', 'mongoenginetest2')\n\n class Group(Document):\n name = StringField()\n\n Group(name='test').save() # Saves in the default db\n\n with switch_db(Group, 'testdb-1') as Group:\n Group(name='hello testdb!').save() # Saves in testdb-1\n \"\"\"\n\n def __init__(self, cls, db_alias):\n \"\"\"Construct the switch_db context manager\n\n :param cls: the class to change the registered db\n :param db_alias: the name of the specific database to use\n \"\"\"\n self.cls = cls\n self.collection = cls._get_collection()\n self.db_alias = db_alias\n self.ori_db_alias = cls._meta.get(\"db_alias\", DEFAULT_CONNECTION_NAME)\n\n def __enter__(self):\n \"\"\"Change the db_alias and clear the cached collection.\"\"\"\n self.cls._meta[\"db_alias\"] = self.db_alias\n self.cls._collection = None\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the db_alias and collection.\"\"\"\n self.cls._meta[\"db_alias\"] = self.ori_db_alias\n self.cls._collection = self.collection\n\n\nclass switch_collection(object):\n \"\"\"switch_collection alias context manager.\n\n Example ::\n\n class Group(Document):\n name = StringField()\n\n Group(name='test').save() # Saves in the default db\n\n with switch_collection(Group, 'group1') as Group:\n Group(name='hello testdb!').save() # Saves in group1 collection\n \"\"\"\n\n def __init__(self, cls, collection_name):\n \"\"\"Construct the switch_collection context manager.\n\n :param cls: the class to change the registered db\n :param collection_name: the name of the collection to use\n \"\"\"\n self.cls = cls\n self.ori_collection = cls._get_collection()\n self.ori_get_collection_name = cls._get_collection_name\n self.collection_name = collection_name\n\n def __enter__(self):\n \"\"\"Change the _get_collection_name and clear the cached collection.\"\"\"\n\n @classmethod\n def _get_collection_name(cls):\n return self.collection_name\n\n self.cls._get_collection_name = _get_collection_name\n self.cls._collection = None\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the collection.\"\"\"\n self.cls._collection = self.ori_collection\n self.cls._get_collection_name = self.ori_get_collection_name\n\n\nclass no_dereference(object):\n \"\"\"no_dereference context manager.\n\n Turns off all dereferencing in Documents for the duration of the context\n manager::\n\n with no_dereference(Group) as Group:\n Group.objects.find()\n \"\"\"\n\n def __init__(self, cls):\n \"\"\"Construct the no_dereference context manager.\n\n :param cls: the class to turn dereferencing off on\n \"\"\"\n self.cls = cls\n\n ReferenceField = _import_class(\"ReferenceField\")\n GenericReferenceField = _import_class(\"GenericReferenceField\")\n ComplexBaseField = _import_class(\"ComplexBaseField\")\n\n self.deref_fields = [\n k\n for k, v in iteritems(self.cls._fields)\n if isinstance(v, (ReferenceField, GenericReferenceField, ComplexBaseField))\n ]\n\n def __enter__(self):\n \"\"\"Change the objects default and _auto_dereference values.\"\"\"\n for field in self.deref_fields:\n self.cls._fields[field]._auto_dereference = False\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the default and _auto_dereference values.\"\"\"\n for field in self.deref_fields:\n self.cls._fields[field]._auto_dereference = True\n return self.cls\n\n\nclass no_sub_classes(object):\n \"\"\"no_sub_classes context manager.\n\n Only returns instances of this class and no sub (inherited) classes::\n\n with no_sub_classes(Group) as Group:\n Group.objects.find()\n \"\"\"\n\n def __init__(self, cls):\n \"\"\"Construct the no_sub_classes context manager.\n\n :param cls: the class to turn querying sub classes on\n \"\"\"\n self.cls = cls\n self.cls_initial_subclasses = None\n\n def __enter__(self):\n \"\"\"Change the objects default and _auto_dereference values.\"\"\"\n self.cls_initial_subclasses = self.cls._subclasses\n self.cls._subclasses = (self.cls._class_name,)\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the default and _auto_dereference values.\"\"\"\n self.cls._subclasses = self.cls_initial_subclasses\n\n\nclass query_counter(object):\n \"\"\"Query_counter context manager to get the number of queries.\n This works by updating the `profiling_level` of the database so that all queries get logged,\n resetting the db.system.profile collection at the beginnig of the context and counting the new entries.\n\n This was designed for debugging purpose. In fact it is a global counter so queries issued by other threads/processes\n can interfere with it\n\n Be aware that:\n - Iterating over large amount of documents (>101) makes pymongo issue `getmore` queries to fetch the next batch of\n documents (https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches)\n - Some queries are ignored by default by the counter (killcursors, db.system.indexes)\n \"\"\"\n\n def __init__(self):\n \"\"\"Construct the query_counter\n \"\"\"\n self.db = get_db()\n self.initial_profiling_level = None\n self._ctx_query_counter = 0 # number of queries issued by the context\n\n self._ignored_query = {\n \"ns\": {\"$ne\": \"%s.system.indexes\" % self.db.name},\n \"op\": {\"$ne\": \"killcursors\"}, # MONGODB < 3.2\n \"command.killCursors\": {\"$exists\": False}, # MONGODB >= 3.2\n }\n\n def _turn_on_profiling(self):\n self.initial_profiling_level = self.db.profiling_level()\n self.db.set_profiling_level(0)\n self.db.system.profile.drop()\n self.db.set_profiling_level(2)\n\n def _resets_profiling(self):\n self.db.set_profiling_level(self.initial_profiling_level)\n\n def __enter__(self):\n self._turn_on_profiling()\n return self\n\n def __exit__(self, t, value, traceback):\n self._resets_profiling()\n\n def __eq__(self, value):\n counter = self._get_count()\n return value == counter\n\n def __ne__(self, value):\n return not self.__eq__(value)\n\n def __lt__(self, value):\n return self._get_count() < value\n\n def __le__(self, value):\n return self._get_count() <= value\n\n def __gt__(self, value):\n return self._get_count() > value\n\n def __ge__(self, value):\n return self._get_count() >= value\n\n def __int__(self):\n return self._get_count()\n\n def __repr__(self):\n \"\"\"repr query_counter as the number of queries.\"\"\"\n return u\"%s\" % self._get_count()\n\n def _get_count(self):\n \"\"\"Get the number of queries by counting the current number of entries in db.system.profile\n and substracting the queries issued by this context. In fact everytime this is called, 1 query is\n issued so we need to balance that\n \"\"\"\n count = (\n count_documents(self.db.system.profile, self._ignored_query)\n - self._ctx_query_counter\n )\n self._ctx_query_counter += (\n 1 # Account for the query we just issued to gather the information\n )\n return count\n\n\n@contextmanager\ndef set_write_concern(collection, write_concerns):\n combined_concerns = dict(collection.write_concern.document.items())\n combined_concerns.update(write_concerns)\n yield collection.with_options(write_concern=WriteConcern(**combined_concerns))\n", "path": "mongoengine/context_managers.py"}]} | 3,267 | 158 |
gh_patches_debug_22898 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
revenue model is '' for everyone
cf. #3479
</issue>
<code>
[start of gratipay/models/team.py]
1 """Teams on Gratipay are plural participants with members.
2 """
3 from postgres.orm import Model
4
5
6 class Team(Model):
7 """Represent a Gratipay team.
8 """
9
10 typname = 'teams'
11
12 def __eq__(self, other):
13 if not isinstance(other, Team):
14 return False
15 return self.id == other.id
16
17 def __ne__(self, other):
18 if not isinstance(other, Team):
19 return True
20 return self.id != other.id
21
22
23 # Constructors
24 # ============
25
26 @classmethod
27 def from_id(cls, id):
28 """Return an existing team based on id.
29 """
30 return cls._from_thing("id", id)
31
32 @classmethod
33 def from_slug(cls, slug):
34 """Return an existing team based on slug.
35 """
36 return cls._from_thing("slug_lower", slug.lower())
37
38 @classmethod
39 def _from_thing(cls, thing, value):
40 assert thing in ("id", "slug_lower")
41 return cls.db.one("""
42
43 SELECT teams.*::teams
44 FROM teams
45 WHERE {}=%s
46
47 """.format(thing), (value,))
48
49 @classmethod
50 def create_new(cls, owner, fields):
51 return cls.db.one("""
52
53 INSERT INTO teams
54 (slug, slug_lower, name, homepage, product_or_service,
55 getting_involved, getting_paid, owner)
56 VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
57 RETURNING teams.*::teams
58
59 """, (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],
60 fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],
61 owner.username))
62
63 def get_og_title(self):
64 out = self.name
65 receiving = self.receiving
66 if receiving > 0:
67 out += " receives $%.2f/wk" % receiving
68 else:
69 out += " is"
70 return out + " on Gratipay"
71
72
73 def update_receiving(self, cursor=None):
74 # Stubbed out for now. Migrate this over from Participant.
75 pass
76
77
78 @property
79 def status(self):
80 return { None: 'unreviewed'
81 , False: 'rejected'
82 , True: 'approved'
83 }[self.is_approved]
84
[end of gratipay/models/team.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gratipay/models/team.py b/gratipay/models/team.py
--- a/gratipay/models/team.py
+++ b/gratipay/models/team.py
@@ -47,18 +47,22 @@
""".format(thing), (value,))
@classmethod
- def create_new(cls, owner, fields):
+ def insert(cls, owner, **fields):
+ fields['slug_lower'] = fields['slug'].lower()
+ fields['owner'] = owner.username
return cls.db.one("""
INSERT INTO teams
- (slug, slug_lower, name, homepage, product_or_service,
- getting_involved, getting_paid, owner)
- VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
+ (slug, slug_lower, name, homepage,
+ product_or_service, revenue_model, getting_involved, getting_paid,
+ owner)
+ VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,
+ %(product_or_service)s, %(revenue_model)s, %(getting_involved)s,
+ %(getting_paid)s,
+ %(owner)s)
RETURNING teams.*::teams
- """, (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],
- fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],
- owner.username))
+ """, fields)
def get_og_title(self):
out = self.name
| {"golden_diff": "diff --git a/gratipay/models/team.py b/gratipay/models/team.py\n--- a/gratipay/models/team.py\n+++ b/gratipay/models/team.py\n@@ -47,18 +47,22 @@\n \"\"\".format(thing), (value,))\n \n @classmethod\n- def create_new(cls, owner, fields):\n+ def insert(cls, owner, **fields):\n+ fields['slug_lower'] = fields['slug'].lower()\n+ fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n \n INSERT INTO teams\n- (slug, slug_lower, name, homepage, product_or_service,\n- getting_involved, getting_paid, owner)\n- VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n+ (slug, slug_lower, name, homepage,\n+ product_or_service, revenue_model, getting_involved, getting_paid,\n+ owner)\n+ VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n+ %(product_or_service)s, %(revenue_model)s, %(getting_involved)s,\n+ %(getting_paid)s,\n+ %(owner)s)\n RETURNING teams.*::teams\n \n- \"\"\", (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],\n- fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],\n- owner.username))\n+ \"\"\", fields)\n \n def get_og_title(self):\n out = self.name\n", "issue": "revenue model is '' for everyone\ncf. #3479\n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def create_new(cls, owner, fields):\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage, product_or_service,\n getting_involved, getting_paid, owner)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING teams.*::teams\n\n \"\"\", (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],\n fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],\n owner.username))\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n # Stubbed out for now. Migrate this over from Participant.\n pass\n\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n", "path": "gratipay/models/team.py"}]} | 1,224 | 337 |
gh_patches_debug_22373 | rasdani/github-patches | git_diff | zulip__zulip-10098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
slash commands: Add /dark and /light commands.
We have /night and /day, and people are starting to use them. We should add the aliases /dark and /light.
</issue>
<code>
[start of zerver/lib/zcommand.py]
1 from typing import Any, Dict
2 from django.utils.translation import ugettext as _
3
4 from zerver.models import UserProfile
5 from zerver.lib.actions import do_set_user_display_setting
6 from zerver.lib.exceptions import JsonableError
7
8 def process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]:
9 if not content.startswith('/'):
10 raise JsonableError(_('There should be a leading slash in the zcommand.'))
11 command = content[1:]
12
13 if command == 'ping':
14 ret = dict() # type: Dict[str, Any]
15 return ret
16
17 if command == 'night':
18 if user_profile.night_mode:
19 msg = 'You are still in night mode.'
20 else:
21 msg = 'Changed to night mode! To revert night mode, type `/day`.'
22 do_set_user_display_setting(user_profile, 'night_mode', True)
23 ret = dict(msg=msg)
24 return ret
25
26 if command == 'day':
27 if user_profile.night_mode:
28 msg = 'Changed to day mode! To revert day mode, type `/night`.'
29 do_set_user_display_setting(user_profile, 'night_mode', False)
30 else:
31 msg = 'You are still in day mode.'
32 ret = dict(msg=msg)
33 return ret
34
35 raise JsonableError(_('No such command: %s') % (command,))
36
[end of zerver/lib/zcommand.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/zcommand.py b/zerver/lib/zcommand.py
--- a/zerver/lib/zcommand.py
+++ b/zerver/lib/zcommand.py
@@ -14,18 +14,21 @@
ret = dict() # type: Dict[str, Any]
return ret
- if command == 'night':
+ night_commands = ['night', 'dark']
+ day_commands = ['day', 'light']
+
+ if command in night_commands:
if user_profile.night_mode:
msg = 'You are still in night mode.'
else:
- msg = 'Changed to night mode! To revert night mode, type `/day`.'
+ msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,)
do_set_user_display_setting(user_profile, 'night_mode', True)
ret = dict(msg=msg)
return ret
- if command == 'day':
+ if command in day_commands:
if user_profile.night_mode:
- msg = 'Changed to day mode! To revert day mode, type `/night`.'
+ msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,)
do_set_user_display_setting(user_profile, 'night_mode', False)
else:
msg = 'You are still in day mode.'
| {"golden_diff": "diff --git a/zerver/lib/zcommand.py b/zerver/lib/zcommand.py\n--- a/zerver/lib/zcommand.py\n+++ b/zerver/lib/zcommand.py\n@@ -14,18 +14,21 @@\n ret = dict() # type: Dict[str, Any]\n return ret\n \n- if command == 'night':\n+ night_commands = ['night', 'dark']\n+ day_commands = ['day', 'light']\n+\n+ if command in night_commands:\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n- msg = 'Changed to night mode! To revert night mode, type `/day`.'\n+ msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n \n- if command == 'day':\n+ if command in day_commands:\n if user_profile.night_mode:\n- msg = 'Changed to day mode! To revert day mode, type `/night`.'\n+ msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n", "issue": "slash commands: Add /dark and /light commands.\nWe have /night and /day, and people are starting to use them. We should add the aliases /dark and /light.\n", "before_files": [{"content": "from typing import Any, Dict\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserProfile\nfrom zerver.lib.actions import do_set_user_display_setting\nfrom zerver.lib.exceptions import JsonableError\n\ndef process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]:\n if not content.startswith('/'):\n raise JsonableError(_('There should be a leading slash in the zcommand.'))\n command = content[1:]\n\n if command == 'ping':\n ret = dict() # type: Dict[str, Any]\n return ret\n\n if command == 'night':\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n msg = 'Changed to night mode! To revert night mode, type `/day`.'\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n\n if command == 'day':\n if user_profile.night_mode:\n msg = 'Changed to day mode! To revert day mode, type `/night`.'\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n ret = dict(msg=msg)\n return ret\n\n raise JsonableError(_('No such command: %s') % (command,))\n", "path": "zerver/lib/zcommand.py"}]} | 931 | 296 |
gh_patches_debug_39460 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get filtered set of records from table
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
At the moment, when we request records from a table, it's _all_ of the records or none. We should be able to filter a way that supports retrieving "groups" of records, where a group is defined as a set of records where some subset of the columns match a set of values, equivalent to the SQL:
```sql
WHERE col1 = val1 AND col2 = val2 AND ... AND coln = valn
```
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
We should create a function that lets us filter string columns using the pattern above, and another function that retrieves the distinct tuples for a set of columns (i.e., the groups in this context).
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
The interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) filtering query each time.
</issue>
<code>
[start of db/records.py]
1 from sqlalchemy import delete, select
2 from sqlalchemy.inspection import inspect
3
4
5 def _get_primary_key_column(table):
6 primary_key_list = list(inspect(table).primary_key)
7 # We do not support getting by composite primary keys
8 assert len(primary_key_list) == 1
9 return primary_key_list[0]
10
11
12 def get_record(table, engine, id_value):
13 primary_key_column = _get_primary_key_column(table)
14 query = select(table).where(primary_key_column == id_value)
15 with engine.begin() as conn:
16 result = conn.execute(query).fetchall()
17 assert len(result) <= 1
18 return result[0] if result else None
19
20
21 def get_records(table, engine, limit=None, offset=None, order_by=[]):
22 """
23 Returns records from a table.
24
25 Args:
26 table: SQLAlchemy table object
27 engine: SQLAlchemy engine object
28 limit: int, gives number of rows to return
29 offset: int, gives number of rows to skip
30 order_by: list of SQLAlchemy ColumnElements to order by. Should
31 usually be either a list of string column names, or a
32 list of columns from the given table.
33 """
34 query = select(table).order_by(*order_by).limit(limit).offset(offset)
35 with engine.begin() as conn:
36 return conn.execute(query).fetchall()
37
38
39 def create_record_or_records(table, engine, record_data):
40 """
41 record_data can be a dictionary, tuple, or list of dictionaries or tuples.
42 if record_data is a list, it creates multiple records.
43 """
44 id_value = None
45 with engine.begin() as connection:
46 result = connection.execute(table.insert(), record_data)
47 # If there was only a single record created, return the record.
48 if result.rowcount == 1:
49 # We need to manually commit insertion so that we can retrieve the record.
50 connection.commit()
51 id_value = result.inserted_primary_key[0]
52 if id_value is not None:
53 return get_record(table, engine, id_value)
54 # Do not return any records if multiple rows were added.
55 return None
56
57
58 def create_records_from_csv(table, engine, csv_filename, column_names):
59 with open(csv_filename, 'rb') as csv_file:
60 with engine.begin() as conn:
61 cursor = conn.connection.cursor()
62 relation = '.'.join('"{}"'.format(part) for part in (table.schema, table.name))
63 formatted_columns = '({})'.format(','.join([f'"{column_name}"' for column_name in column_names]))
64 copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'
65 cursor.copy_expert(copy_sql, csv_file)
66
67
68 def update_record(table, engine, id_value, record_data):
69 primary_key_column = _get_primary_key_column(table)
70 with engine.begin() as connection:
71 connection.execute(
72 table.update().where(primary_key_column == id_value).values(record_data)
73 )
74 return get_record(table, engine, id_value)
75
76
77 def delete_record(table, engine, id_value):
78 primary_key_column = _get_primary_key_column(table)
79 query = delete(table).where(primary_key_column == id_value)
80 with engine.begin() as conn:
81 return conn.execute(query)
82
[end of db/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/records.py b/db/records.py
--- a/db/records.py
+++ b/db/records.py
@@ -1,6 +1,9 @@
-from sqlalchemy import delete, select
+import logging
+from sqlalchemy import delete, select, and_, Column
from sqlalchemy.inspection import inspect
+logger = logging.getLogger(__name__)
+
def _get_primary_key_column(table):
primary_key_list = list(inspect(table).primary_key)
@@ -18,7 +21,9 @@
return result[0] if result else None
-def get_records(table, engine, limit=None, offset=None, order_by=[]):
+def get_records(
+ table, engine, limit=None, offset=None, order_by=[], filters=[]
+):
"""
Returns records from a table.
@@ -30,12 +35,72 @@
order_by: list of SQLAlchemy ColumnElements to order by. Should
usually be either a list of string column names, or a
list of columns from the given table.
+ filters: list of tuples of type (ColumnElement, value), where
+ ColumnElement is an SQLAlchemy ColumnElement, and value
+ is a valid value for the associated column (i.e., the
+ type must be correct)
"""
- query = select(table).order_by(*order_by).limit(limit).offset(offset)
+ query = (
+ select(table)
+ .order_by(*order_by)
+ .limit(limit)
+ .offset(offset)
+ .where(_build_filter_conjunction(table, filters))
+ )
with engine.begin() as conn:
return conn.execute(query).fetchall()
+def _build_filter_conjunction(table, filters):
+ refined_filters = [
+ (table.columns[col] if type(col) == str else col, value)
+ for col, value in filters
+ ]
+ # We need a default of True (rather than empty), since invoking and_
+ # without arguments is deprecated.
+ return and_(True, *[col == value for col, value in refined_filters])
+
+
+def get_distinct_tuple_values(
+ column_list, engine, table=None, limit=None, offset=None,
+):
+ """
+ Returns distinct tuples from a given list of columns.
+
+ Args:
+ column_list: list of column names or SQLAlchemy column objects
+ engine: SQLAlchemy engine object
+ table: SQLAlchemy table object
+ limit: int, gives number of rows to return
+ offset: int, gives number of rows to skip
+
+ If no table is given, the column_list must consist entirely of
+ SQLAlchemy column objects associated with a table.
+ """
+ if table is not None:
+ column_objects = [
+ table.columns[col] if type(col) == str else col
+ for col in column_list
+ ]
+ else:
+ column_objects = column_list
+ try:
+ assert all([type(col) == Column for col in column_objects])
+ except AssertionError as e:
+ logger.error("All columns must be str or sqlalchemy.Column type")
+ raise e
+
+ query = (
+ select(*column_objects)
+ .distinct()
+ .limit(limit)
+ .offset(offset)
+ )
+ with engine.begin() as conn:
+ res = conn.execute(query).fetchall()
+ return [tuple(zip(column_objects, row)) for row in res]
+
+
def create_record_or_records(table, engine, record_data):
"""
record_data can be a dictionary, tuple, or list of dictionaries or tuples.
| {"golden_diff": "diff --git a/db/records.py b/db/records.py\n--- a/db/records.py\n+++ b/db/records.py\n@@ -1,6 +1,9 @@\n-from sqlalchemy import delete, select\n+import logging\n+from sqlalchemy import delete, select, and_, Column\n from sqlalchemy.inspection import inspect\n \n+logger = logging.getLogger(__name__)\n+\n \n def _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n@@ -18,7 +21,9 @@\n return result[0] if result else None\n \n \n-def get_records(table, engine, limit=None, offset=None, order_by=[]):\n+def get_records(\n+ table, engine, limit=None, offset=None, order_by=[], filters=[]\n+):\n \"\"\"\n Returns records from a table.\n \n@@ -30,12 +35,72 @@\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n+ filters: list of tuples of type (ColumnElement, value), where\n+ ColumnElement is an SQLAlchemy ColumnElement, and value\n+ is a valid value for the associated column (i.e., the\n+ type must be correct)\n \"\"\"\n- query = select(table).order_by(*order_by).limit(limit).offset(offset)\n+ query = (\n+ select(table)\n+ .order_by(*order_by)\n+ .limit(limit)\n+ .offset(offset)\n+ .where(_build_filter_conjunction(table, filters))\n+ )\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n \n \n+def _build_filter_conjunction(table, filters):\n+ refined_filters = [\n+ (table.columns[col] if type(col) == str else col, value)\n+ for col, value in filters\n+ ]\n+ # We need a default of True (rather than empty), since invoking and_\n+ # without arguments is deprecated.\n+ return and_(True, *[col == value for col, value in refined_filters])\n+\n+\n+def get_distinct_tuple_values(\n+ column_list, engine, table=None, limit=None, offset=None,\n+):\n+ \"\"\"\n+ Returns distinct tuples from a given list of columns.\n+\n+ Args:\n+ column_list: list of column names or SQLAlchemy column objects\n+ engine: SQLAlchemy engine object\n+ table: SQLAlchemy table object\n+ limit: int, gives number of rows to return\n+ offset: int, gives number of rows to skip\n+\n+ If no table is given, the column_list must consist entirely of\n+ SQLAlchemy column objects associated with a table.\n+ \"\"\"\n+ if table is not None:\n+ column_objects = [\n+ table.columns[col] if type(col) == str else col\n+ for col in column_list\n+ ]\n+ else:\n+ column_objects = column_list\n+ try:\n+ assert all([type(col) == Column for col in column_objects])\n+ except AssertionError as e:\n+ logger.error(\"All columns must be str or sqlalchemy.Column type\")\n+ raise e\n+\n+ query = (\n+ select(*column_objects)\n+ .distinct()\n+ .limit(limit)\n+ .offset(offset)\n+ )\n+ with engine.begin() as conn:\n+ res = conn.execute(query).fetchall()\n+ return [tuple(zip(column_objects, row)) for row in res]\n+\n+\n def create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n", "issue": "Get filtered set of records from table\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nAt the moment, when we request records from a table, it's _all_ of the records or none. We should be able to filter a way that supports retrieving \"groups\" of records, where a group is defined as a set of records where some subset of the columns match a set of values, equivalent to the SQL:\r\n\r\n```sql\r\nWHERE col1 = val1 AND col2 = val2 AND ... AND coln = valn\r\n```\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nWe should create a function that lets us filter string columns using the pattern above, and another function that retrieves the distinct tuples for a set of columns (i.e., the groups in this context).\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nThe interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) filtering query each time.\n", "before_files": [{"content": "from sqlalchemy import delete, select\nfrom sqlalchemy.inspection import inspect\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(table, engine, limit=None, offset=None, order_by=[]):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n \"\"\"\n query = select(table).order_by(*order_by).limit(limit).offset(offset)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(table, engine, csv_filename, column_names):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}]} | 1,625 | 798 |
gh_patches_debug_2891 | rasdani/github-patches | git_diff | getsentry__sentry-5094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Webhook data does not have event id
Webhook data contains issue id only. It would be nice to have event id as well.
Discussed with @mattrobenolt on IRC. Documenting it here with this issue.
</issue>
<code>
[start of src/sentry/plugins/sentry_webhooks/plugin.py]
1 from __future__ import absolute_import
2
3 import logging
4 import six
5 import sentry
6
7 from django import forms
8 from django.conf import settings
9 from django.utils.translation import ugettext_lazy as _
10
11 from sentry.exceptions import PluginError
12 from sentry.plugins.bases import notify
13 from sentry.http import is_valid_url, safe_urlopen
14 from sentry.utils.safe import safe_execute
15
16
17 def validate_urls(value, **kwargs):
18 output = []
19 for url in value.split('\n'):
20 url = url.strip()
21 if not url:
22 continue
23 if not url.startswith(('http://', 'https://')):
24 raise PluginError('Not a valid URL.')
25 if not is_valid_url(url):
26 raise PluginError('Not a valid URL.')
27 output.append(url)
28 return '\n'.join(output)
29
30
31 class WebHooksOptionsForm(notify.NotificationConfigurationForm):
32 urls = forms.CharField(
33 label=_('Callback URLs'),
34 widget=forms.Textarea(attrs={
35 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),
36 help_text=_('Enter callback URLs to POST new events to (one per line).'))
37
38 def clean_url(self):
39 value = self.cleaned_data.get('url')
40 return validate_urls(value)
41
42
43 class WebHooksPlugin(notify.NotificationPlugin):
44 author = 'Sentry Team'
45 author_url = 'https://github.com/getsentry/sentry'
46 version = sentry.VERSION
47 description = "Integrates web hooks."
48 resource_links = [
49 ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
50 ('Source', 'https://github.com/getsentry/sentry'),
51 ]
52
53 slug = 'webhooks'
54 title = 'WebHooks'
55 conf_title = title
56 conf_key = 'webhooks'
57 # TODO(dcramer): remove when this is migrated to React
58 project_conf_form = WebHooksOptionsForm
59 timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)
60 logger = logging.getLogger('sentry.plugins.webhooks')
61 user_agent = 'sentry-webhooks/%s' % version
62
63 def is_configured(self, project, **kwargs):
64 return bool(self.get_option('urls', project))
65
66 def get_config(self, project, **kwargs):
67 return [{
68 'name': 'urls',
69 'label': 'Callback URLs',
70 'type': 'textarea',
71 'help': 'Enter callback URLs to POST new events to (one per line).',
72 'placeholder': 'https://sentry.io/callback/url',
73 'validators': [validate_urls],
74 'required': False
75 }]
76
77 def get_group_data(self, group, event):
78 data = {
79 'id': six.text_type(group.id),
80 'project': group.project.slug,
81 'project_name': group.project.name,
82 'logger': event.get_tag('logger'),
83 'level': event.get_tag('level'),
84 'culprit': group.culprit,
85 'message': event.get_legacy_message(),
86 'url': group.get_absolute_url(),
87 }
88 data['event'] = dict(event.data or {})
89 data['event']['tags'] = event.get_tags()
90 return data
91
92 def get_webhook_urls(self, project):
93 urls = self.get_option('urls', project)
94 if not urls:
95 return ()
96 return filter(bool, urls.strip().splitlines())
97
98 def send_webhook(self, url, payload):
99 return safe_urlopen(
100 url=url,
101 json=payload,
102 timeout=self.timeout,
103 verify_ssl=False,
104 )
105
106 def notify_users(self, group, event, fail_silently=False):
107 payload = self.get_group_data(group, event)
108 for url in self.get_webhook_urls(group.project):
109 safe_execute(self.send_webhook, url, payload, _with_transaction=False)
110
[end of src/sentry/plugins/sentry_webhooks/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py
--- a/src/sentry/plugins/sentry_webhooks/plugin.py
+++ b/src/sentry/plugins/sentry_webhooks/plugin.py
@@ -87,6 +87,8 @@
}
data['event'] = dict(event.data or {})
data['event']['tags'] = event.get_tags()
+ data['event']['event_id'] = event.event_id
+ data['event']['id'] = event.id
return data
def get_webhook_urls(self, project):
| {"golden_diff": "diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py\n--- a/src/sentry/plugins/sentry_webhooks/plugin.py\n+++ b/src/sentry/plugins/sentry_webhooks/plugin.py\n@@ -87,6 +87,8 @@\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n+ data['event']['event_id'] = event.event_id\n+ data['event']['id'] = event.id\n return data\n \n def get_webhook_urls(self, project):\n", "issue": "Webhook data does not have event id\nWebhook data contains issue id only. It would be nice to have event id as well.\r\n\r\nDiscussed with @mattrobenolt on IRC. Documenting it here with this issue.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport six\nimport sentry\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.exceptions import PluginError\nfrom sentry.plugins.bases import notify\nfrom sentry.http import is_valid_url, safe_urlopen\nfrom sentry.utils.safe import safe_execute\n\n\ndef validate_urls(value, **kwargs):\n output = []\n for url in value.split('\\n'):\n url = url.strip()\n if not url:\n continue\n if not url.startswith(('http://', 'https://')):\n raise PluginError('Not a valid URL.')\n if not is_valid_url(url):\n raise PluginError('Not a valid URL.')\n output.append(url)\n return '\\n'.join(output)\n\n\nclass WebHooksOptionsForm(notify.NotificationConfigurationForm):\n urls = forms.CharField(\n label=_('Callback URLs'),\n widget=forms.Textarea(attrs={\n 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),\n help_text=_('Enter callback URLs to POST new events to (one per line).'))\n\n def clean_url(self):\n value = self.cleaned_data.get('url')\n return validate_urls(value)\n\n\nclass WebHooksPlugin(notify.NotificationPlugin):\n author = 'Sentry Team'\n author_url = 'https://github.com/getsentry/sentry'\n version = sentry.VERSION\n description = \"Integrates web hooks.\"\n resource_links = [\n ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),\n ('Source', 'https://github.com/getsentry/sentry'),\n ]\n\n slug = 'webhooks'\n title = 'WebHooks'\n conf_title = title\n conf_key = 'webhooks'\n # TODO(dcramer): remove when this is migrated to React\n project_conf_form = WebHooksOptionsForm\n timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)\n logger = logging.getLogger('sentry.plugins.webhooks')\n user_agent = 'sentry-webhooks/%s' % version\n\n def is_configured(self, project, **kwargs):\n return bool(self.get_option('urls', project))\n\n def get_config(self, project, **kwargs):\n return [{\n 'name': 'urls',\n 'label': 'Callback URLs',\n 'type': 'textarea',\n 'help': 'Enter callback URLs to POST new events to (one per line).',\n 'placeholder': 'https://sentry.io/callback/url',\n 'validators': [validate_urls],\n 'required': False\n }]\n\n def get_group_data(self, group, event):\n data = {\n 'id': six.text_type(group.id),\n 'project': group.project.slug,\n 'project_name': group.project.name,\n 'logger': event.get_tag('logger'),\n 'level': event.get_tag('level'),\n 'culprit': group.culprit,\n 'message': event.get_legacy_message(),\n 'url': group.get_absolute_url(),\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n return data\n\n def get_webhook_urls(self, project):\n urls = self.get_option('urls', project)\n if not urls:\n return ()\n return filter(bool, urls.strip().splitlines())\n\n def send_webhook(self, url, payload):\n return safe_urlopen(\n url=url,\n json=payload,\n timeout=self.timeout,\n verify_ssl=False,\n )\n\n def notify_users(self, group, event, fail_silently=False):\n payload = self.get_group_data(group, event)\n for url in self.get_webhook_urls(group.project):\n safe_execute(self.send_webhook, url, payload, _with_transaction=False)\n", "path": "src/sentry/plugins/sentry_webhooks/plugin.py"}]} | 1,631 | 129 |
gh_patches_debug_42463 | rasdani/github-patches | git_diff | liqd__adhocracy4-964 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
async serializer: also set is_deleted to True for blocked comments
**URL:**
**user:**
**expected behaviour:** readable code, comments should have distinctive properties with distinct names, meanings and behavior
**behaviour:** **Async comments** have the properties `is_removed` and `is_deleted`. Comments that are either removed (by a moderator) or deleted (by a creator) are both set to `is_deleted` and their content is permanently deleted.
**important screensize:**
**device & browser:**
**Comment/Question:**
Because the serializer does not differentiate between moderator and creator for deleting the comment, does it actually make sense to show this differentiation in the serializer?
This is related to #902 and #903
Screenshot?
</issue>
<code>
[start of adhocracy4/comments_async/serializers.py]
1 from django.conf import settings
2 from django.contrib.contenttypes.models import ContentType
3 from django.utils.translation import gettext as _
4 from easy_thumbnails.files import get_thumbnailer
5 from rest_framework import serializers
6
7 from adhocracy4.comments.models import Comment
8
9
10 class CommentSerializer(serializers.ModelSerializer):
11 """Default Serializer for the comments."""
12
13 user_name = serializers.SerializerMethodField()
14 user_pk = serializers.SerializerMethodField()
15 user_profile_url = serializers.SerializerMethodField()
16 user_image = serializers.SerializerMethodField()
17 is_deleted = serializers.SerializerMethodField()
18 ratings = serializers.SerializerMethodField()
19 is_moderator = serializers.SerializerMethodField()
20 comment_content_type = serializers.SerializerMethodField()
21 has_rating_permission = serializers.SerializerMethodField()
22 has_changing_permission = serializers.SerializerMethodField()
23 has_deleting_permission = serializers.SerializerMethodField()
24
25 class Meta:
26 model = Comment
27 read_only_fields = ('modified', 'created', 'id',
28 'user_name', 'user_pk', 'user_image',
29 'user_image_fallback', 'ratings',
30 'content_type', 'object_pk',
31 'comment_content_type', 'has_rating_permission',
32 'has_changing_permission',
33 'has_deleting_permission')
34 exclude = ('creator',)
35
36 def to_representation(self, instance):
37 """
38 Create a dictionary form categories and don't show blocked comments.
39
40 Gets the categories and adds them along with their values
41 to a dictionary.
42 Also gets the comments and blocks their content
43 from being shown if they are set to blocked.
44 """
45 ret = super().to_representation(instance)
46 categories = {}
47 if ret['comment_categories']:
48 category_choices = getattr(settings,
49 'A4_COMMENT_CATEGORIES', '')
50 if category_choices:
51 category_choices = dict((x, str(y)) for x, y
52 in category_choices)
53 category_list = ret['comment_categories'].strip('[]').split(',')
54 for category in category_list:
55 if category in category_choices:
56 categories[category] = category_choices[category]
57 else:
58 categories[category] = category
59 ret['comment_categories'] = categories
60 is_blocked = ret.get('is_blocked')
61 if is_blocked:
62 ret['comment'] = ''
63 return ret
64
65 def to_internal_value(self, data):
66 data = super().to_internal_value(data)
67 if 'comment_categories' in data:
68 value = data.get('comment_categories')
69 if value == '' or value == '[]':
70 raise serializers.ValidationError({
71 'comment_categories': _('Please choose one or more '
72 'categories.')
73 })
74 return data
75
76 def get_user_pk(self, obj):
77 if (obj.is_censored or obj.is_removed):
78 return -1
79 return str(obj.creator.id)
80
81 def get_user_profile_url(self, obj):
82 if obj.is_censored or obj.is_removed:
83 return ''
84 try:
85 return obj.creator.get_absolute_url()
86 except AttributeError:
87 return ''
88
89 def get_user_name(self, obj):
90 """Don't show username if comment is marked removed or censored."""
91 if(obj.is_censored or obj.is_removed):
92 return _('unknown user')
93 return obj.creator.get_short_name()
94
95 def get_user_image_fallback(self, obj):
96 """Load small thumbnail images for default user images."""
97 if(obj.is_censored or obj.is_removed):
98 return None
99 try:
100 if obj.creator.avatar_fallback:
101 return obj.creator.avatar_fallback
102 except AttributeError:
103 pass
104 return None
105
106 def get_user_image(self, obj):
107 """Load small thumbnail images for user images."""
108 if(obj.is_censored or obj.is_removed):
109 return None
110 try:
111 if obj.creator.avatar:
112 avatar = get_thumbnailer(obj.creator.avatar)['avatar']
113 return avatar.url
114 except AttributeError:
115 pass
116 return self.get_user_image_fallback(obj)
117
118 def get_is_moderator(self, obj):
119 return obj.project.has_moderator(obj.creator)
120
121 def get_is_deleted(self, obj):
122 """Return true if one of the flags is set."""
123 return (obj.is_censored or obj.is_removed)
124
125 def get_ratings(self, comment):
126 """
127 Get positive and negative rating count.
128
129 As well as info on the request users rating
130 """
131 user = self.context['request'].user
132 positive_ratings = comment.ratings.filter(value=1).count()
133 negative_ratings = comment.ratings.filter(value=-1).count()
134
135 if user.is_authenticated:
136 user_rating = comment.ratings.filter(creator=user).first()
137 else:
138 user_rating = None
139
140 if user_rating:
141 user_rating_value = user_rating.value
142 user_rating_id = user_rating.pk
143 else:
144 user_rating_value = None
145 user_rating_id = None
146
147 result = {
148 'positive_ratings': positive_ratings,
149 'negative_ratings': negative_ratings,
150 'current_user_rating_value': user_rating_value,
151 'current_user_rating_id': user_rating_id
152 }
153
154 return result
155
156 # used in zt-app, where we can't pass props through template tags
157 # FIXME: this should replace comments_contenttype passed in template tag
158 def get_comment_content_type(self, comment):
159 return ContentType.objects.get_for_model(Comment).pk
160
161 def get_has_rating_permission(self, comment):
162 request = self.context.get('request')
163 if request and hasattr(request, 'user'):
164 user = request.user
165 return user.has_perm('a4comments.rate_comment', comment)
166 return False
167
168 def get_has_changing_permission(self, comment):
169 request = self.context.get('request')
170 if request and hasattr(request, 'user'):
171 user = request.user
172 return user.has_perm('a4comments.change_comment', comment)
173 return False
174
175 def get_has_deleting_permission(self, comment):
176 request = self.context.get('request')
177 if request and hasattr(request, 'user'):
178 user = request.user
179 return user.has_perm('a4comments.delete_comment', comment)
180 return False
181
182
183 class CommentListSerializer(CommentSerializer):
184 """Serializer for the comments to be used when viewed as list."""
185
186
187 class ThreadSerializer(CommentSerializer):
188 """Serializes a comment including child comment (replies)."""
189
190 child_comments = CommentSerializer(many=True, read_only=True)
191
192
193 class ThreadListSerializer(CommentListSerializer):
194 """
195 Serializes comments when viewed.
196
197 As list including child comment (replies).
198 """
199
200 child_comments = CommentListSerializer(many=True, read_only=True)
201
[end of adhocracy4/comments_async/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/adhocracy4/comments_async/serializers.py b/adhocracy4/comments_async/serializers.py
--- a/adhocracy4/comments_async/serializers.py
+++ b/adhocracy4/comments_async/serializers.py
@@ -31,16 +31,15 @@
def to_representation(self, instance):
"""
- Create a dictionary form categories and don't show blocked comments.
+ Create a dictionary from categories and don't show blocked comments.
Gets the categories and adds them along with their values
- to a dictionary.
- Also gets the comments and blocks their content
- from being shown if they are set to blocked.
+ to a dictionary. Do return empty dict when comment is_blocked.
+ Does return empty string as comment text when comment is_blocked.
"""
ret = super().to_representation(instance)
categories = {}
- if ret['comment_categories']:
+ if ret['comment_categories'] and not instance.is_blocked:
category_choices = getattr(settings,
'A4_COMMENT_CATEGORIES', '')
if category_choices:
@@ -53,8 +52,7 @@
else:
categories[category] = category
ret['comment_categories'] = categories
- is_blocked = ret.get('is_blocked')
- if is_blocked:
+ if instance.is_blocked:
ret['comment'] = ''
return ret
@@ -70,12 +68,12 @@
return data
def get_user_pk(self, obj):
- if (obj.is_censored or obj.is_removed):
+ if obj.is_censored or obj.is_removed or obj.is_blocked:
return -1
return str(obj.creator.id)
def get_user_profile_url(self, obj):
- if obj.is_censored or obj.is_removed:
+ if obj.is_censored or obj.is_removed or obj.is_blocked:
return ''
try:
return obj.creator.get_absolute_url()
@@ -84,13 +82,13 @@
def get_user_name(self, obj):
"""Don't show username if comment is marked removed or censored."""
- if(obj.is_censored or obj.is_removed):
+ if obj.is_censored or obj.is_removed or obj.is_blocked:
return _('unknown user')
return obj.creator.get_short_name()
def get_user_image_fallback(self, obj):
"""Load small thumbnail images for default user images."""
- if(obj.is_censored or obj.is_removed):
+ if obj.is_censored or obj.is_removed or obj.is_blocked:
return None
try:
if obj.creator.avatar_fallback:
@@ -101,7 +99,7 @@
def get_user_image(self, obj):
"""Load small thumbnail images for user images."""
- if(obj.is_censored or obj.is_removed):
+ if obj.is_censored or obj.is_removed or obj.is_blocked:
return None
try:
if obj.creator.avatar:
@@ -116,7 +114,7 @@
def get_is_deleted(self, obj):
"""Return true if one of the flags is set."""
- return (obj.is_censored or obj.is_removed)
+ return obj.is_censored or obj.is_removed or obj.is_blocked
def get_ratings(self, comment):
"""
| {"golden_diff": "diff --git a/adhocracy4/comments_async/serializers.py b/adhocracy4/comments_async/serializers.py\n--- a/adhocracy4/comments_async/serializers.py\n+++ b/adhocracy4/comments_async/serializers.py\n@@ -31,16 +31,15 @@\n \n def to_representation(self, instance):\n \"\"\"\n- Create a dictionary form categories and don't show blocked comments.\n+ Create a dictionary from categories and don't show blocked comments.\n \n Gets the categories and adds them along with their values\n- to a dictionary.\n- Also gets the comments and blocks their content\n- from being shown if they are set to blocked.\n+ to a dictionary. Do return empty dict when comment is_blocked.\n+ Does return empty string as comment text when comment is_blocked.\n \"\"\"\n ret = super().to_representation(instance)\n categories = {}\n- if ret['comment_categories']:\n+ if ret['comment_categories'] and not instance.is_blocked:\n category_choices = getattr(settings,\n 'A4_COMMENT_CATEGORIES', '')\n if category_choices:\n@@ -53,8 +52,7 @@\n else:\n categories[category] = category\n ret['comment_categories'] = categories\n- is_blocked = ret.get('is_blocked')\n- if is_blocked:\n+ if instance.is_blocked:\n ret['comment'] = ''\n return ret\n \n@@ -70,12 +68,12 @@\n return data\n \n def get_user_pk(self, obj):\n- if (obj.is_censored or obj.is_removed):\n+ if obj.is_censored or obj.is_removed or obj.is_blocked:\n return -1\n return str(obj.creator.id)\n \n def get_user_profile_url(self, obj):\n- if obj.is_censored or obj.is_removed:\n+ if obj.is_censored or obj.is_removed or obj.is_blocked:\n return ''\n try:\n return obj.creator.get_absolute_url()\n@@ -84,13 +82,13 @@\n \n def get_user_name(self, obj):\n \"\"\"Don't show username if comment is marked removed or censored.\"\"\"\n- if(obj.is_censored or obj.is_removed):\n+ if obj.is_censored or obj.is_removed or obj.is_blocked:\n return _('unknown user')\n return obj.creator.get_short_name()\n \n def get_user_image_fallback(self, obj):\n \"\"\"Load small thumbnail images for default user images.\"\"\"\n- if(obj.is_censored or obj.is_removed):\n+ if obj.is_censored or obj.is_removed or obj.is_blocked:\n return None\n try:\n if obj.creator.avatar_fallback:\n@@ -101,7 +99,7 @@\n \n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n- if(obj.is_censored or obj.is_removed):\n+ if obj.is_censored or obj.is_removed or obj.is_blocked:\n return None\n try:\n if obj.creator.avatar:\n@@ -116,7 +114,7 @@\n \n def get_is_deleted(self, obj):\n \"\"\"Return true if one of the flags is set.\"\"\"\n- return (obj.is_censored or obj.is_removed)\n+ return obj.is_censored or obj.is_removed or obj.is_blocked\n \n def get_ratings(self, comment):\n \"\"\"\n", "issue": "async serializer: also set is_deleted to True for blocked comments\n**URL:** \r\n**user:** \r\n**expected behaviour:** readable code, comments should have distinctive properties with distinct names, meanings and behavior\r\n**behaviour:** **Async comments** have the properties `is_removed` and `is_deleted`. Comments that are either removed (by a moderator) or deleted (by a creator) are both set to `is_deleted` and their content is permanently deleted.\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\nBecause the serializer does not differentiate between moderator and creator for deleting the comment, does it actually make sense to show this differentiation in the serializer?\r\nThis is related to #902 and #903\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import gettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.comments.models import Comment\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"Default Serializer for the comments.\"\"\"\n\n user_name = serializers.SerializerMethodField()\n user_pk = serializers.SerializerMethodField()\n user_profile_url = serializers.SerializerMethodField()\n user_image = serializers.SerializerMethodField()\n is_deleted = serializers.SerializerMethodField()\n ratings = serializers.SerializerMethodField()\n is_moderator = serializers.SerializerMethodField()\n comment_content_type = serializers.SerializerMethodField()\n has_rating_permission = serializers.SerializerMethodField()\n has_changing_permission = serializers.SerializerMethodField()\n has_deleting_permission = serializers.SerializerMethodField()\n\n class Meta:\n model = Comment\n read_only_fields = ('modified', 'created', 'id',\n 'user_name', 'user_pk', 'user_image',\n 'user_image_fallback', 'ratings',\n 'content_type', 'object_pk',\n 'comment_content_type', 'has_rating_permission',\n 'has_changing_permission',\n 'has_deleting_permission')\n exclude = ('creator',)\n\n def to_representation(self, instance):\n \"\"\"\n Create a dictionary form categories and don't show blocked comments.\n\n Gets the categories and adds them along with their values\n to a dictionary.\n Also gets the comments and blocks their content\n from being shown if they are set to blocked.\n \"\"\"\n ret = super().to_representation(instance)\n categories = {}\n if ret['comment_categories']:\n category_choices = getattr(settings,\n 'A4_COMMENT_CATEGORIES', '')\n if category_choices:\n category_choices = dict((x, str(y)) for x, y\n in category_choices)\n category_list = ret['comment_categories'].strip('[]').split(',')\n for category in category_list:\n if category in category_choices:\n categories[category] = category_choices[category]\n else:\n categories[category] = category\n ret['comment_categories'] = categories\n is_blocked = ret.get('is_blocked')\n if is_blocked:\n ret['comment'] = ''\n return ret\n\n def to_internal_value(self, data):\n data = super().to_internal_value(data)\n if 'comment_categories' in data:\n value = data.get('comment_categories')\n if value == '' or value == '[]':\n raise serializers.ValidationError({\n 'comment_categories': _('Please choose one or more '\n 'categories.')\n })\n return data\n\n def get_user_pk(self, obj):\n if (obj.is_censored or obj.is_removed):\n return -1\n return str(obj.creator.id)\n\n def get_user_profile_url(self, obj):\n if obj.is_censored or obj.is_removed:\n return ''\n try:\n return obj.creator.get_absolute_url()\n except AttributeError:\n return ''\n\n def get_user_name(self, obj):\n \"\"\"Don't show username if comment is marked removed or censored.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return _('unknown user')\n return obj.creator.get_short_name()\n\n def get_user_image_fallback(self, obj):\n \"\"\"Load small thumbnail images for default user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return None\n try:\n if obj.creator.avatar_fallback:\n return obj.creator.avatar_fallback\n except AttributeError:\n pass\n return None\n\n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return None\n try:\n if obj.creator.avatar:\n avatar = get_thumbnailer(obj.creator.avatar)['avatar']\n return avatar.url\n except AttributeError:\n pass\n return self.get_user_image_fallback(obj)\n\n def get_is_moderator(self, obj):\n return obj.project.has_moderator(obj.creator)\n\n def get_is_deleted(self, obj):\n \"\"\"Return true if one of the flags is set.\"\"\"\n return (obj.is_censored or obj.is_removed)\n\n def get_ratings(self, comment):\n \"\"\"\n Get positive and negative rating count.\n\n As well as info on the request users rating\n \"\"\"\n user = self.context['request'].user\n positive_ratings = comment.ratings.filter(value=1).count()\n negative_ratings = comment.ratings.filter(value=-1).count()\n\n if user.is_authenticated:\n user_rating = comment.ratings.filter(creator=user).first()\n else:\n user_rating = None\n\n if user_rating:\n user_rating_value = user_rating.value\n user_rating_id = user_rating.pk\n else:\n user_rating_value = None\n user_rating_id = None\n\n result = {\n 'positive_ratings': positive_ratings,\n 'negative_ratings': negative_ratings,\n 'current_user_rating_value': user_rating_value,\n 'current_user_rating_id': user_rating_id\n }\n\n return result\n\n # used in zt-app, where we can't pass props through template tags\n # FIXME: this should replace comments_contenttype passed in template tag\n def get_comment_content_type(self, comment):\n return ContentType.objects.get_for_model(Comment).pk\n\n def get_has_rating_permission(self, comment):\n request = self.context.get('request')\n if request and hasattr(request, 'user'):\n user = request.user\n return user.has_perm('a4comments.rate_comment', comment)\n return False\n\n def get_has_changing_permission(self, comment):\n request = self.context.get('request')\n if request and hasattr(request, 'user'):\n user = request.user\n return user.has_perm('a4comments.change_comment', comment)\n return False\n\n def get_has_deleting_permission(self, comment):\n request = self.context.get('request')\n if request and hasattr(request, 'user'):\n user = request.user\n return user.has_perm('a4comments.delete_comment', comment)\n return False\n\n\nclass CommentListSerializer(CommentSerializer):\n \"\"\"Serializer for the comments to be used when viewed as list.\"\"\"\n\n\nclass ThreadSerializer(CommentSerializer):\n \"\"\"Serializes a comment including child comment (replies).\"\"\"\n\n child_comments = CommentSerializer(many=True, read_only=True)\n\n\nclass ThreadListSerializer(CommentListSerializer):\n \"\"\"\n Serializes comments when viewed.\n\n As list including child comment (replies).\n \"\"\"\n\n child_comments = CommentListSerializer(many=True, read_only=True)\n", "path": "adhocracy4/comments_async/serializers.py"}]} | 2,600 | 718 |
gh_patches_debug_604 | rasdani/github-patches | git_diff | pex-tool__pex-1419 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.46
On the docket:
+ [x] Fix Pip proprietary URL env marker handling. #1417
+ [x] Un-reify installed wheel script shebangs. #1410
+ [x] Support deterministic repository extract tool. #1411
+ [x] support setuptools scripts #1379
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.45"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.45"
+__version__ = "2.1.46"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.45\"\n+__version__ = \"2.1.46\"\n", "issue": "Release 2.1.46\nOn the docket:\r\n+ [x] Fix Pip proprietary URL env marker handling. #1417 \r\n+ [x] Un-reify installed wheel script shebangs. #1410\r\n+ [x] Support deterministic repository extract tool. #1411\r\n+ [x] support setuptools scripts #1379\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.45\"\n", "path": "pex/version.py"}]} | 664 | 96 |
gh_patches_debug_17600 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Creating new organisations from the project editor fails
</issue>
<code>
[start of akvo/rest/views/organisation.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.conf import settings
8 from django.db.models import Q
9 from django.utils import six
10 from rest_framework.decorators import api_view
11 from rest_framework.exceptions import ParseError
12 from rest_framework.parsers import JSONParser
13 from rest_framework.response import Response
14 from rest_framework_xml.parsers import XMLParser
15 from rest_framework_xml.compat import etree
16
17 from akvo.rest.views.utils import int_or_none, get_qs_elements_for_page
18 from akvo.rsr.filters import location_choices, get_m49_filter
19 from akvo.rsr.models import Project, Organisation, Country
20 from akvo.rsr.views.utils import apply_keywords, org_projects
21 from ..serializers import OrganisationSerializer, OrganisationDirectorySerializer
22 from ..viewsets import BaseRSRViewSet
23
24
25 class AkvoOrganisationParser(XMLParser):
26 def parse(self, stream, media_type=None, parser_context=None):
27 assert etree, 'XMLParser requires defusedxml to be installed'
28
29 parser_context = parser_context or {}
30 encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
31 parser = etree.DefusedXMLParser(encoding=encoding)
32 try:
33 tree = etree.parse(stream, parser=parser, forbid_dtd=True)
34 except (etree.ParseError, ValueError) as exc:
35 raise ParseError('XML parse error - %s' % six.text_type(exc))
36 return self.organisation_data_from_etree(tree.getroot())
37
38 def organisation_data_from_etree(self, tree):
39 def find_text(tree, str):
40 element = tree.find(str)
41 if element is None:
42 return ''
43 return element.text.strip() if element.text else ""
44
45 def location_data(location_tree):
46 if location_tree is None:
47 return []
48 iso_code = find_text(location_tree, 'iso_code').lower()
49 country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))
50 country = country.id
51 latitude = find_text(location_tree, 'latitude') or 0
52 longitude = find_text(location_tree, 'longitude') or 0
53 primary = True
54 return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]
55
56 long_name = find_text(tree, 'name')
57 name = long_name[:25]
58 description = find_text(tree, 'description')
59 url = find_text(tree, 'url')
60 iati_type = find_text(tree, 'iati_organisation_type')
61 new_organisation_type = int(iati_type) if iati_type else 22
62 organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)
63 locations = location_data(tree.find('location/object'))
64 return dict(
65 name=name, long_name=long_name, description=description, url=url,
66 organisation_type=organisation_type, new_organisation_type=new_organisation_type,
67 locations=locations
68 )
69
70
71 class OrganisationViewSet(BaseRSRViewSet):
72 """
73 API endpoint that allows organisations to be viewed or edited.
74 """
75 queryset = Organisation.objects.all()
76 serializer_class = OrganisationSerializer
77 parser_classes = (AkvoOrganisationParser, JSONParser,)
78
79
80 @api_view(['GET'])
81 def organisation_directory(request):
82 """REST view for the update directory."""
83
84 page = request.rsr_page
85 all_organisations = Organisation.objects.all() if not page else _page_organisations(page)
86
87 # Filter updates based on query parameters
88 filter_, text_filter = _create_filters_query(request)
89 organisations = (
90 all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations
91 )
92 organisations_text_filtered = (
93 organisations.filter(text_filter) if text_filter is not None else organisations
94 )
95 if organisations_text_filtered.exists():
96 organisations = organisations_text_filtered
97
98 # Get the relevant data for typeaheads based on filtered organisations (minus
99 # text filtering, if no organisations were found)
100 locations = [
101 {'id': choice[0], 'name': choice[1]}
102 for choice in location_choices(organisations)
103 ]
104
105 display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)
106
107 # Get related objects of page at once
108 response = {
109 'project_count': organisations_text_filtered.count(),
110 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,
111 'location': locations,
112 'page_size_default': settings.PROJECT_DIRECTORY_PAGE_SIZES[0],
113 }
114 return Response(response)
115
116
117 def _public_projects():
118 """Return all public projects."""
119 return Project.objects.public().published().select_related('partners')
120
121
122 def _page_organisations(page):
123 """Dig out the list or organisations to use."""
124 projects = org_projects(page.organisation) if page.partner_projects else _public_projects()
125 keyword_projects = apply_keywords(page, projects)
126 return keyword_projects.all_partners()
127
128
129 def _create_filters_query(request):
130 """Returns a Q object expression based on query parameters."""
131 location_param = int_or_none(request.GET.get('location'))
132 title_or_subtitle_param = request.GET.get('title_or_subtitle')
133
134 location_filter = (
135 get_m49_filter(location_param, use_recipient_country=False) if location_param else None
136 )
137 title_filter = (
138 Q(name__icontains=title_or_subtitle_param) |
139 Q(long_name__icontains=title_or_subtitle_param)
140 ) if title_or_subtitle_param else None
141 all_filters = [
142 location_filter,
143 ]
144 filters = filter(None, all_filters)
145 return reduce(lambda x, y: x & y, filters) if filters else None, title_filter
146
[end of akvo/rest/views/organisation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py
--- a/akvo/rest/views/organisation.py
+++ b/akvo/rest/views/organisation.py
@@ -9,8 +9,8 @@
from django.utils import six
from rest_framework.decorators import api_view
from rest_framework.exceptions import ParseError
-from rest_framework.parsers import JSONParser
from rest_framework.response import Response
+from rest_framework.settings import api_settings
from rest_framework_xml.parsers import XMLParser
from rest_framework_xml.compat import etree
@@ -74,7 +74,7 @@
"""
queryset = Organisation.objects.all()
serializer_class = OrganisationSerializer
- parser_classes = (AkvoOrganisationParser, JSONParser,)
+ parser_classes = [AkvoOrganisationParser] + api_settings.DEFAULT_PARSER_CLASSES
@api_view(['GET'])
| {"golden_diff": "diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py\n--- a/akvo/rest/views/organisation.py\n+++ b/akvo/rest/views/organisation.py\n@@ -9,8 +9,8 @@\n from django.utils import six\n from rest_framework.decorators import api_view\n from rest_framework.exceptions import ParseError\n-from rest_framework.parsers import JSONParser\n from rest_framework.response import Response\n+from rest_framework.settings import api_settings\n from rest_framework_xml.parsers import XMLParser\n from rest_framework_xml.compat import etree\n \n@@ -74,7 +74,7 @@\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n- parser_classes = (AkvoOrganisationParser, JSONParser,)\n+ parser_classes = [AkvoOrganisationParser] + api_settings.DEFAULT_PARSER_CLASSES\n \n \n @api_view(['GET'])\n", "issue": "Creating new organisations from the project editor fails\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import six\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.compat import etree\n\nfrom akvo.rest.views.utils import int_or_none, get_qs_elements_for_page\nfrom akvo.rsr.filters import location_choices, get_m49_filter\nfrom akvo.rsr.models import Project, Organisation, Country\nfrom akvo.rsr.views.utils import apply_keywords, org_projects\nfrom ..serializers import OrganisationSerializer, OrganisationDirectorySerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n\n\n@api_view(['GET'])\ndef organisation_directory(request):\n \"\"\"REST view for the update directory.\"\"\"\n\n page = request.rsr_page\n all_organisations = Organisation.objects.all() if not page else _page_organisations(page)\n\n # Filter updates based on query parameters\n filter_, text_filter = _create_filters_query(request)\n organisations = (\n all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations\n )\n organisations_text_filtered = (\n organisations.filter(text_filter) if text_filter is not None else organisations\n )\n if organisations_text_filtered.exists():\n organisations = organisations_text_filtered\n\n # Get the relevant data for typeaheads based on filtered organisations (minus\n # text filtering, if no organisations were found)\n locations = [\n {'id': choice[0], 'name': choice[1]}\n for choice in location_choices(organisations)\n ]\n\n display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)\n\n # Get related objects of page at once\n response = {\n 'project_count': organisations_text_filtered.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n 'page_size_default': settings.PROJECT_DIRECTORY_PAGE_SIZES[0],\n }\n return Response(response)\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _create_filters_query(request):\n \"\"\"Returns a Q object expression based on query parameters.\"\"\"\n location_param = int_or_none(request.GET.get('location'))\n title_or_subtitle_param = request.GET.get('title_or_subtitle')\n\n location_filter = (\n get_m49_filter(location_param, use_recipient_country=False) if location_param else None\n )\n title_filter = (\n Q(name__icontains=title_or_subtitle_param) |\n Q(long_name__icontains=title_or_subtitle_param)\n ) if title_or_subtitle_param else None\n all_filters = [\n location_filter,\n ]\n filters = filter(None, all_filters)\n return reduce(lambda x, y: x & y, filters) if filters else None, title_filter\n", "path": "akvo/rest/views/organisation.py"}]} | 2,129 | 186 |
gh_patches_debug_22416 | rasdani/github-patches | git_diff | privacyidea__privacyidea-2563 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Container audit fails in 3.5
The container audit will fail in version 3.5 due to a missing parameter in the constructor.
https://community.privacyidea.org/t/logging-error-after-update-to-3-5/1811/2
</issue>
<code>
[start of privacyidea/lib/auditmodules/containeraudit.py]
1 # -*- coding: utf-8 -*-
2 #
3 # 2019-11-07 Cornelius Kölbel <[email protected]>
4 # initial code for writing audit information to a file
5 #
6 # This code is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
8 # License as published by the Free Software Foundation; either
9 # version 3 of the License, or any later version.
10 #
11 # This code is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public
17 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 #
20 __doc__ = """The Container Audit Module allows to write audit information to several different
21 audit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the
22 Logger Audit Module. This way audit information can be saved in the SQL database and at the same time
23 be passed to a file or external services via the Python logging facility.
24
25 The Container Audit Module is configured like this:
26
27 PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'
28 PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']
29 PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'
30
31 You also have to provide the configuration parameters for the referenced audit modules.
32
33 """
34
35 import logging
36 from privacyidea.lib.auditmodules.base import (Audit as AuditBase)
37 from privacyidea.lib.utils import get_module_class
38
39
40 log = logging.getLogger(__name__)
41
42
43 class Audit(AuditBase):
44 """
45 This is the ContainerAudit module, which writes the audit entries
46 to a list of audit modules.
47 """
48
49 def __init__(self, config=None):
50 super(Audit, self).__init__(config)
51 self.name = "containeraudit"
52 write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')
53 read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')
54 # Initialize all modules
55 self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf]
56 self.read_module = get_module_class(read_conf, "Audit", "log")(config)
57 if not self.read_module.is_readable:
58 log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module))
59
60 @property
61 def has_data(self):
62 return any([x.has_data for x in self.write_modules])
63
64 def log(self, param):
65 """
66 Call the log method for all writeable modules
67 """
68 for module in self.write_modules:
69 module.log(param)
70
71 def add_to_log(self, param, add_with_comma=False):
72 """
73 Call the add_to_log method for all writeable modules
74 """
75 for module in self.write_modules:
76 module.add_to_log(param, add_with_comma)
77
78 def add_policy(self, policyname):
79 """
80 Call the add_policy method for all writeable modules
81 """
82 for module in self.write_modules:
83 module.add_policy(policyname)
84
85 def search(self, search_dict, page_size=15, page=1, sortorder="asc",
86 timelimit=None):
87 """
88 Call the search method for the one readable module
89 """
90 return self.read_module.search(search_dict, page_size=page_size, page=page,
91 sortorder=sortorder, timelimit=timelimit)
92
93 def get_count(self, search_dict, timedelta=None, success=None):
94 """
95 Call the count method for the one readable module
96 """
97 return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)
98
99 def csv_generator(self, param=None, user=None, timelimit=None):
100 """
101 Call the csv_generator method for the one readable module
102 """
103 return self.read_module.csv_generator(param=param, user=user,
104 timelimit=timelimit)
105
106 def get_total(self, param, AND=True, display_error=True, timelimit=None):
107 """
108 Call the total method for the one readable module
109 """
110 return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)
111
112 def finalize_log(self):
113 """
114 Call the finalize method of all writeable audit modules
115 """
116 for module in self.write_modules:
117 module.finalize_log()
118
[end of privacyidea/lib/auditmodules/containeraudit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py
--- a/privacyidea/lib/auditmodules/containeraudit.py
+++ b/privacyidea/lib/auditmodules/containeraudit.py
@@ -46,14 +46,15 @@
to a list of audit modules.
"""
- def __init__(self, config=None):
- super(Audit, self).__init__(config)
+ def __init__(self, config=None, startdate=None):
+ super(Audit, self).__init__(config, startdate)
self.name = "containeraudit"
write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')
read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')
# Initialize all modules
- self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf]
- self.read_module = get_module_class(read_conf, "Audit", "log")(config)
+ self.write_modules = [get_module_class(audit_module, "Audit", "log")(config, startdate)
+ for audit_module in write_conf]
+ self.read_module = get_module_class(read_conf, "Audit", "log")(config, startdate)
if not self.read_module.is_readable:
log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module))
| {"golden_diff": "diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py\n--- a/privacyidea/lib/auditmodules/containeraudit.py\n+++ b/privacyidea/lib/auditmodules/containeraudit.py\n@@ -46,14 +46,15 @@\n to a list of audit modules.\n \"\"\"\n \n- def __init__(self, config=None):\n- super(Audit, self).__init__(config)\n+ def __init__(self, config=None, startdate=None):\n+ super(Audit, self).__init__(config, startdate)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n- self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n- self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n+ self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config, startdate)\n+ for audit_module in write_conf]\n+ self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config, startdate)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n", "issue": "Container audit fails in 3.5\nThe container audit will fail in version 3.5 due to a missing parameter in the constructor.\r\n\r\nhttps://community.privacyidea.org/t/logging-error-after-update-to-3-5/1811/2\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2019-11-07 Cornelius K\u00f6lbel <[email protected]>\n# initial code for writing audit information to a file\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"The Container Audit Module allows to write audit information to several different\naudit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the \nLogger Audit Module. This way audit information can be saved in the SQL database and at the same time\nbe passed to a file or external services via the Python logging facility. \n\nThe Container Audit Module is configured like this:\n\n PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'\n PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']\n PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'\n\nYou also have to provide the configuration parameters for the referenced audit modules.\n\n\"\"\"\n\nimport logging\nfrom privacyidea.lib.auditmodules.base import (Audit as AuditBase)\nfrom privacyidea.lib.utils import get_module_class\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Audit(AuditBase):\n \"\"\"\n This is the ContainerAudit module, which writes the audit entries\n to a list of audit modules.\n \"\"\"\n\n def __init__(self, config=None):\n super(Audit, self).__init__(config)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n\n @property\n def has_data(self):\n return any([x.has_data for x in self.write_modules])\n\n def log(self, param):\n \"\"\"\n Call the log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.log(param)\n\n def add_to_log(self, param, add_with_comma=False):\n \"\"\"\n Call the add_to_log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_to_log(param, add_with_comma)\n\n def add_policy(self, policyname):\n \"\"\"\n Call the add_policy method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_policy(policyname)\n\n def search(self, search_dict, page_size=15, page=1, sortorder=\"asc\",\n timelimit=None):\n \"\"\"\n Call the search method for the one readable module\n \"\"\"\n return self.read_module.search(search_dict, page_size=page_size, page=page,\n sortorder=sortorder, timelimit=timelimit)\n\n def get_count(self, search_dict, timedelta=None, success=None):\n \"\"\"\n Call the count method for the one readable module\n \"\"\"\n return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)\n\n def csv_generator(self, param=None, user=None, timelimit=None):\n \"\"\"\n Call the csv_generator method for the one readable module\n \"\"\"\n return self.read_module.csv_generator(param=param, user=user,\n timelimit=timelimit)\n\n def get_total(self, param, AND=True, display_error=True, timelimit=None):\n \"\"\"\n Call the total method for the one readable module\n \"\"\"\n return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)\n\n def finalize_log(self):\n \"\"\"\n Call the finalize method of all writeable audit modules\n \"\"\"\n for module in self.write_modules:\n module.finalize_log()\n", "path": "privacyidea/lib/auditmodules/containeraudit.py"}]} | 1,863 | 312 |
gh_patches_debug_47859 | rasdani/github-patches | git_diff | saleor__saleor-903 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Server Error (500) when adding attribute
Hi,
First of all thanks for this excellent software, makes my life easier.
I deployed it on heroku using the heroku elements (https://elements.heroku.com/buttons/mirumee/saleor).
Then I tried to add an attribute but that resulted in Server Error (500) page. Is this expected behavior? Any settings that have to be changed? If yes then any way to do that on heroku?
Thanks!
</issue>
<code>
[start of saleor/product/models/utils.py]
1 from django.utils.encoding import smart_text
2
3
4 def get_attributes_display_map(obj, attributes):
5 display_map = {}
6 for attribute in attributes:
7 value = obj.attributes.get(smart_text(attribute.pk))
8 if value:
9 choices = {smart_text(a.pk): a for a in attribute.values.all()}
10 choice_obj = choices.get(value)
11 if choice_obj:
12 display_map[attribute.pk] = choice_obj
13 else:
14 display_map[attribute.pk] = value_pk
15 return display_map
16
[end of saleor/product/models/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/product/models/utils.py b/saleor/product/models/utils.py
--- a/saleor/product/models/utils.py
+++ b/saleor/product/models/utils.py
@@ -11,5 +11,5 @@
if choice_obj:
display_map[attribute.pk] = choice_obj
else:
- display_map[attribute.pk] = value_pk
+ display_map[attribute.pk] = value
return display_map
| {"golden_diff": "diff --git a/saleor/product/models/utils.py b/saleor/product/models/utils.py\n--- a/saleor/product/models/utils.py\n+++ b/saleor/product/models/utils.py\n@@ -11,5 +11,5 @@\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n- display_map[attribute.pk] = value_pk\n+ display_map[attribute.pk] = value\n return display_map\n", "issue": "Server Error (500) when adding attribute\nHi,\r\n\r\nFirst of all thanks for this excellent software, makes my life easier.\r\n\r\nI deployed it on heroku using the heroku elements (https://elements.heroku.com/buttons/mirumee/saleor).\r\n\r\nThen I tried to add an attribute but that resulted in Server Error (500) page. Is this expected behavior? Any settings that have to be changed? If yes then any way to do that on heroku?\r\n\r\nThanks!\r\n\n", "before_files": [{"content": "from django.utils.encoding import smart_text\n\n\ndef get_attributes_display_map(obj, attributes):\n display_map = {}\n for attribute in attributes:\n value = obj.attributes.get(smart_text(attribute.pk))\n if value:\n choices = {smart_text(a.pk): a for a in attribute.values.all()}\n choice_obj = choices.get(value)\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n display_map[attribute.pk] = value_pk\n return display_map\n", "path": "saleor/product/models/utils.py"}]} | 770 | 99 |
gh_patches_debug_37199 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-2426 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in ERGAS metric
In the definition of ERGAS, it should be `\ ratio` rather than `* ratio`
For example see ERGAS defined in [Du et al](https://ieeexplore.ieee.org/abstract/document/4317530), [Meng et al](https://ieeexplore.ieee.org/document/9082183)
https://github.com/Lightning-AI/torchmetrics/blob/4230cfef3d2020fffff873565acea01ad883d3e4/src/torchmetrics/functional/image/ergas.py#L82
</issue>
<code>
[start of src/torchmetrics/functional/image/ergas.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.utilities.checks import _check_same_shape
21 from torchmetrics.utilities.distributed import reduce
22
23
24 def _ergas_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
25 """Update and returns variables required to compute Erreur Relative Globale Adimensionnelle de Synthèse.
26
27 Args:
28 preds: Predicted tensor
29 target: Ground truth tensor
30
31 """
32 if preds.dtype != target.dtype:
33 raise TypeError(
34 "Expected `preds` and `target` to have the same data type."
35 f" Got preds: {preds.dtype} and target: {target.dtype}."
36 )
37 _check_same_shape(preds, target)
38 if len(preds.shape) != 4:
39 raise ValueError(
40 "Expected `preds` and `target` to have BxCxHxW shape."
41 f" Got preds: {preds.shape} and target: {target.shape}."
42 )
43 return preds, target
44
45
46 def _ergas_compute(
47 preds: Tensor,
48 target: Tensor,
49 ratio: float = 4,
50 reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
51 ) -> Tensor:
52 """Erreur Relative Globale Adimensionnelle de Synthèse.
53
54 Args:
55 preds: estimated image
56 target: ground truth image
57 ratio: ratio of high resolution to low resolution
58 reduction: a method to reduce metric score over labels.
59
60 - ``'elementwise_mean'``: takes the mean (default)
61 - ``'sum'``: takes the sum
62 - ``'none'`` or ``None``: no reduction will be applied
63
64 Example:
65 >>> gen = torch.manual_seed(42)
66 >>> preds = torch.rand([16, 1, 16, 16], generator=gen)
67 >>> target = preds * 0.75
68 >>> preds, target = _ergas_update(preds, target)
69 >>> torch.round(_ergas_compute(preds, target))
70 tensor(154.)
71
72 """
73 b, c, h, w = preds.shape
74 preds = preds.reshape(b, c, h * w)
75 target = target.reshape(b, c, h * w)
76
77 diff = preds - target
78 sum_squared_error = torch.sum(diff * diff, dim=2)
79 rmse_per_band = torch.sqrt(sum_squared_error / (h * w))
80 mean_target = torch.mean(target, dim=2)
81
82 ergas_score = 100 * ratio * torch.sqrt(torch.sum((rmse_per_band / mean_target) ** 2, dim=1) / c)
83 return reduce(ergas_score, reduction)
84
85
86 def error_relative_global_dimensionless_synthesis(
87 preds: Tensor,
88 target: Tensor,
89 ratio: float = 4,
90 reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
91 ) -> Tensor:
92 """Erreur Relative Globale Adimensionnelle de Synthèse.
93
94 Args:
95 preds: estimated image
96 target: ground truth image
97 ratio: ratio of high resolution to low resolution
98 reduction: a method to reduce metric score over labels.
99
100 - ``'elementwise_mean'``: takes the mean (default)
101 - ``'sum'``: takes the sum
102 - ``'none'`` or ``None``: no reduction will be applied
103
104 Return:
105 Tensor with RelativeG score
106
107 Raises:
108 TypeError:
109 If ``preds`` and ``target`` don't have the same data type.
110 ValueError:
111 If ``preds`` and ``target`` don't have ``BxCxHxW shape``.
112
113 Example:
114 >>> from torchmetrics.functional.image import error_relative_global_dimensionless_synthesis
115 >>> gen = torch.manual_seed(42)
116 >>> preds = torch.rand([16, 1, 16, 16], generator=gen)
117 >>> target = preds * 0.75
118 >>> ergds = error_relative_global_dimensionless_synthesis(preds, target)
119 >>> torch.round(ergds)
120 tensor(154.)
121
122 References:
123 [1] Qian Du; Nicholas H. Younan; Roger King; Vijay P. Shah, "On the Performance Evaluation of
124 Pan-Sharpening Techniques" in IEEE Geoscience and Remote Sensing Letters, vol. 4, no. 4, pp. 518-522,
125 15 October 2007, doi: 10.1109/LGRS.2007.896328.
126
127 """
128 preds, target = _ergas_update(preds, target)
129 return _ergas_compute(preds, target, ratio, reduction)
130
[end of src/torchmetrics/functional/image/ergas.py]
[start of src/torchmetrics/image/ergas.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, List, Optional, Sequence, Union
16
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update
21 from torchmetrics.metric import Metric
22 from torchmetrics.utilities import rank_zero_warn
23 from torchmetrics.utilities.data import dim_zero_cat
24 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
25 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
26
27 if not _MATPLOTLIB_AVAILABLE:
28 __doctest_skip__ = ["ErrorRelativeGlobalDimensionlessSynthesis.plot"]
29
30
31 class ErrorRelativeGlobalDimensionlessSynthesis(Metric):
32 """Calculate `Relative dimensionless global error synthesis`_ (ERGAS).
33
34 This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each
35 band of the result image.
36
37 As input to ``forward`` and ``update`` the metric accepts the following input
38
39 - ``preds`` (:class:`~torch.Tensor`): Predictions from model
40 - ``target`` (:class:`~torch.Tensor`): Ground truth values
41
42 As output of `forward` and `compute` the metric returns the following output
43
44 - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS
45 value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample
46
47 Args:
48 ratio: ratio of high resolution to low resolution
49 reduction: a method to reduce metric score over labels.
50
51 - ``'elementwise_mean'``: takes the mean (default)
52 - ``'sum'``: takes the sum
53 - ``'none'`` or ``None``: no reduction will be applied
54
55 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
56
57 Example:
58 >>> import torch
59 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
60 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
61 >>> target = preds * 0.75
62 >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()
63 >>> torch.round(ergas(preds, target))
64 tensor(154.)
65
66 """
67
68 higher_is_better: bool = False
69 is_differentiable: bool = True
70 full_state_update: bool = False
71 plot_lower_bound: float = 0.0
72
73 preds: List[Tensor]
74 target: List[Tensor]
75
76 def __init__(
77 self,
78 ratio: float = 4,
79 reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
80 **kwargs: Any,
81 ) -> None:
82 super().__init__(**kwargs)
83 rank_zero_warn(
84 "Metric `UniversalImageQualityIndex` will save all targets and"
85 " predictions in buffer. For large datasets this may lead"
86 " to large memory footprint."
87 )
88
89 self.add_state("preds", default=[], dist_reduce_fx="cat")
90 self.add_state("target", default=[], dist_reduce_fx="cat")
91 self.ratio = ratio
92 self.reduction = reduction
93
94 def update(self, preds: Tensor, target: Tensor) -> None:
95 """Update state with predictions and targets."""
96 preds, target = _ergas_update(preds, target)
97 self.preds.append(preds)
98 self.target.append(target)
99
100 def compute(self) -> Tensor:
101 """Compute explained variance over state."""
102 preds = dim_zero_cat(self.preds)
103 target = dim_zero_cat(self.target)
104 return _ergas_compute(preds, target, self.ratio, self.reduction)
105
106 def plot(
107 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
108 ) -> _PLOT_OUT_TYPE:
109 """Plot a single or multiple values from the metric.
110
111 Args:
112 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
113 If no value is provided, will automatically call `metric.compute` and plot that result.
114 ax: An matplotlib axis object. If provided will add plot to that axis
115
116 Returns:
117 Figure and Axes object
118
119 Raises:
120 ModuleNotFoundError:
121 If `matplotlib` is not installed
122
123 .. plot::
124 :scale: 75
125
126 >>> # Example plotting a single value
127 >>> import torch
128 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
129 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
130 >>> target = preds * 0.75
131 >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()
132 >>> metric.update(preds, target)
133 >>> fig_, ax_ = metric.plot()
134
135 .. plot::
136 :scale: 75
137
138 >>> # Example plotting multiple values
139 >>> import torch
140 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
141 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
142 >>> target = preds * 0.75
143 >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()
144 >>> values = [ ]
145 >>> for _ in range(10):
146 ... values.append(metric(preds, target))
147 >>> fig_, ax_ = metric.plot(values)
148
149 """
150 return self._plot(val, ax)
151
[end of src/torchmetrics/image/ergas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/image/ergas.py b/src/torchmetrics/functional/image/ergas.py
--- a/src/torchmetrics/functional/image/ergas.py
+++ b/src/torchmetrics/functional/image/ergas.py
@@ -89,7 +89,7 @@
ratio: float = 4,
reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
) -> Tensor:
- """Erreur Relative Globale Adimensionnelle de Synthèse.
+ """Calculates `Error relative global dimensionless synthesis`_ (ERGAS) metric.
Args:
preds: estimated image
@@ -119,11 +119,6 @@
>>> torch.round(ergds)
tensor(154.)
- References:
- [1] Qian Du; Nicholas H. Younan; Roger King; Vijay P. Shah, "On the Performance Evaluation of
- Pan-Sharpening Techniques" in IEEE Geoscience and Remote Sensing Letters, vol. 4, no. 4, pp. 518-522,
- 15 October 2007, doi: 10.1109/LGRS.2007.896328.
-
"""
preds, target = _ergas_update(preds, target)
return _ergas_compute(preds, target, ratio, reduction)
diff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py
--- a/src/torchmetrics/image/ergas.py
+++ b/src/torchmetrics/image/ergas.py
@@ -29,10 +29,18 @@
class ErrorRelativeGlobalDimensionlessSynthesis(Metric):
- """Calculate `Relative dimensionless global error synthesis`_ (ERGAS).
+ r"""Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric.
This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each
- band of the result image.
+ band of the result image. It is defined as:
+
+ .. math::
+ ERGAS = 100 * \frac{h}{l} \\sqrt{\frac{1}{N} \\sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\\mu_k^2}}
+
+ where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,
+ often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`
+ is the root mean square error of the k-th band between low and high resolution images, and :math:`\\mu_k` is the
+ mean value of the k-th band of the reference image.
As input to ``forward`` and ``update`` the metric accepts the following input
@@ -45,7 +53,7 @@
value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample
Args:
- ratio: ratio of high resolution to low resolution
+ ratio: ratio of high resolution to low resolution.
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
| {"golden_diff": "diff --git a/src/torchmetrics/functional/image/ergas.py b/src/torchmetrics/functional/image/ergas.py\n--- a/src/torchmetrics/functional/image/ergas.py\n+++ b/src/torchmetrics/functional/image/ergas.py\n@@ -89,7 +89,7 @@\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n ) -> Tensor:\n- \"\"\"Erreur Relative Globale Adimensionnelle de Synth\u00e8se.\n+ \"\"\"Calculates `Error relative global dimensionless synthesis`_ (ERGAS) metric.\n \n Args:\n preds: estimated image\n@@ -119,11 +119,6 @@\n >>> torch.round(ergds)\n tensor(154.)\n \n- References:\n- [1] Qian Du; Nicholas H. Younan; Roger King; Vijay P. Shah, \"On the Performance Evaluation of\n- Pan-Sharpening Techniques\" in IEEE Geoscience and Remote Sensing Letters, vol. 4, no. 4, pp. 518-522,\n- 15 October 2007, doi: 10.1109/LGRS.2007.896328.\n-\n \"\"\"\n preds, target = _ergas_update(preds, target)\n return _ergas_compute(preds, target, ratio, reduction)\ndiff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py\n--- a/src/torchmetrics/image/ergas.py\n+++ b/src/torchmetrics/image/ergas.py\n@@ -29,10 +29,18 @@\n \n \n class ErrorRelativeGlobalDimensionlessSynthesis(Metric):\n- \"\"\"Calculate `Relative dimensionless global error synthesis`_ (ERGAS).\n+ r\"\"\"Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric.\n \n This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each\n- band of the result image.\n+ band of the result image. It is defined as:\n+\n+ .. math::\n+ ERGAS = 100 * \\frac{h}{l} \\\\sqrt{\\frac{1}{N} \\\\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\\\mu_k^2}}\n+\n+ where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,\n+ often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`\n+ is the root mean square error of the k-th band between low and high resolution images, and :math:`\\\\mu_k` is the\n+ mean value of the k-th band of the reference image.\n \n As input to ``forward`` and ``update`` the metric accepts the following input\n \n@@ -45,7 +53,7 @@\n value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample\n \n Args:\n- ratio: ratio of high resolution to low resolution\n+ ratio: ratio of high resolution to low resolution.\n reduction: a method to reduce metric score over labels.\n \n - ``'elementwise_mean'``: takes the mean (default)\n", "issue": "Error in ERGAS metric\nIn the definition of ERGAS, it should be `\\ ratio` rather than `* ratio` \r\n\r\nFor example see ERGAS defined in [Du et al](https://ieeexplore.ieee.org/abstract/document/4317530), [Meng et al](https://ieeexplore.ieee.org/document/9082183)\r\n\r\nhttps://github.com/Lightning-AI/torchmetrics/blob/4230cfef3d2020fffff873565acea01ad883d3e4/src/torchmetrics/functional/image/ergas.py#L82\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities.checks import _check_same_shape\nfrom torchmetrics.utilities.distributed import reduce\n\n\ndef _ergas_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Erreur Relative Globale Adimensionnelle de Synth\u00e8se.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n\n \"\"\"\n if preds.dtype != target.dtype:\n raise TypeError(\n \"Expected `preds` and `target` to have the same data type.\"\n f\" Got preds: {preds.dtype} and target: {target.dtype}.\"\n )\n _check_same_shape(preds, target)\n if len(preds.shape) != 4:\n raise ValueError(\n \"Expected `preds` and `target` to have BxCxHxW shape.\"\n f\" Got preds: {preds.shape} and target: {target.shape}.\"\n )\n return preds, target\n\n\ndef _ergas_compute(\n preds: Tensor,\n target: Tensor,\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n) -> Tensor:\n \"\"\"Erreur Relative Globale Adimensionnelle de Synth\u00e8se.\n\n Args:\n preds: estimated image\n target: ground truth image\n ratio: ratio of high resolution to low resolution\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n Example:\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand([16, 1, 16, 16], generator=gen)\n >>> target = preds * 0.75\n >>> preds, target = _ergas_update(preds, target)\n >>> torch.round(_ergas_compute(preds, target))\n tensor(154.)\n\n \"\"\"\n b, c, h, w = preds.shape\n preds = preds.reshape(b, c, h * w)\n target = target.reshape(b, c, h * w)\n\n diff = preds - target\n sum_squared_error = torch.sum(diff * diff, dim=2)\n rmse_per_band = torch.sqrt(sum_squared_error / (h * w))\n mean_target = torch.mean(target, dim=2)\n\n ergas_score = 100 * ratio * torch.sqrt(torch.sum((rmse_per_band / mean_target) ** 2, dim=1) / c)\n return reduce(ergas_score, reduction)\n\n\ndef error_relative_global_dimensionless_synthesis(\n preds: Tensor,\n target: Tensor,\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n) -> Tensor:\n \"\"\"Erreur Relative Globale Adimensionnelle de Synth\u00e8se.\n\n Args:\n preds: estimated image\n target: ground truth image\n ratio: ratio of high resolution to low resolution\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n Return:\n Tensor with RelativeG score\n\n Raises:\n TypeError:\n If ``preds`` and ``target`` don't have the same data type.\n ValueError:\n If ``preds`` and ``target`` don't have ``BxCxHxW shape``.\n\n Example:\n >>> from torchmetrics.functional.image import error_relative_global_dimensionless_synthesis\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand([16, 1, 16, 16], generator=gen)\n >>> target = preds * 0.75\n >>> ergds = error_relative_global_dimensionless_synthesis(preds, target)\n >>> torch.round(ergds)\n tensor(154.)\n\n References:\n [1] Qian Du; Nicholas H. Younan; Roger King; Vijay P. Shah, \"On the Performance Evaluation of\n Pan-Sharpening Techniques\" in IEEE Geoscience and Remote Sensing Letters, vol. 4, no. 4, pp. 518-522,\n 15 October 2007, doi: 10.1109/LGRS.2007.896328.\n\n \"\"\"\n preds, target = _ergas_update(preds, target)\n return _ergas_compute(preds, target, ratio, reduction)\n", "path": "src/torchmetrics/functional/image/ergas.py"}, {"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, List, Optional, Sequence, Union\n\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.data import dim_zero_cat\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"ErrorRelativeGlobalDimensionlessSynthesis.plot\"]\n\n\nclass ErrorRelativeGlobalDimensionlessSynthesis(Metric):\n \"\"\"Calculate `Relative dimensionless global error synthesis`_ (ERGAS).\n\n This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each\n band of the result image.\n\n As input to ``forward`` and ``update`` the metric accepts the following input\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model\n - ``target`` (:class:`~torch.Tensor`): Ground truth values\n\n As output of `forward` and `compute` the metric returns the following output\n\n - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS\n value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample\n\n Args:\n ratio: ratio of high resolution to low resolution\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> torch.round(ergas(preds, target))\n tensor(154.)\n\n \"\"\"\n\n higher_is_better: bool = False\n is_differentiable: bool = True\n full_state_update: bool = False\n plot_lower_bound: float = 0.0\n\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n rank_zero_warn(\n \"Metric `UniversalImageQualityIndex` will save all targets and\"\n \" predictions in buffer. For large datasets this may lead\"\n \" to large memory footprint.\"\n )\n\n self.add_state(\"preds\", default=[], dist_reduce_fx=\"cat\")\n self.add_state(\"target\", default=[], dist_reduce_fx=\"cat\")\n self.ratio = ratio\n self.reduction = reduction\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n preds, target = _ergas_update(preds, target)\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"Compute explained variance over state.\"\"\"\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n return _ergas_compute(preds, target, self.ratio, self.reduction)\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> metric.update(preds, target)\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(preds, target))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/image/ergas.py"}]} | 3,899 | 763 |
gh_patches_debug_48735 | rasdani/github-patches | git_diff | microsoft__torchgeo-309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Open in Colab URL broken in 0.1.1
The latest 0.1.1 release broke the "Open in Colab" URL in our tutorials. Still trying to fix this.
</issue>
<code>
[start of docs/conf.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 # Configuration file for the Sphinx documentation builder.
5 #
6 # This file only contains a selection of the most common options. For a full
7 # list see the documentation:
8 # https://www.sphinx-doc.org/en/master/usage/configuration.html
9
10 # -- Path setup --------------------------------------------------------------
11
12 import os
13 import sys
14
15 import pytorch_sphinx_theme
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.insert(0, os.path.abspath(".."))
21
22 import torchgeo # noqa: E402
23
24 # -- Project information -----------------------------------------------------
25
26 project = "torchgeo"
27 copyright = "2021, Microsoft Corporation"
28 author = torchgeo.__author__
29 version = ".".join(torchgeo.__version__.split(".")[:2])
30 release = torchgeo.__version__
31
32
33 # -- General configuration ---------------------------------------------------
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 "sphinx.ext.autodoc",
40 "sphinx.ext.intersphinx",
41 "sphinx.ext.napoleon",
42 "sphinx.ext.todo",
43 "sphinx.ext.viewcode",
44 "nbsphinx",
45 ]
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = ["_build"]
51
52 # Sphinx 3.0+ required for:
53 # autodoc_typehints = "description"
54 needs_sphinx = "3.0"
55
56 nitpicky = True
57 nitpick_ignore = [
58 # https://github.com/sphinx-doc/sphinx/issues/8127
59 ("py:class", ".."),
60 # TODO: can't figure out why this isn't found
61 ("py:class", "LightningDataModule"),
62 # Undocumented class
63 ("py:class", "torchvision.models.resnet.ResNet"),
64 ]
65
66
67 # -- Options for HTML output -------------------------------------------------
68
69 # The theme to use for HTML and HTML Help pages. See the documentation for
70 # a list of builtin themes.
71 html_theme = "pytorch_sphinx_theme"
72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
73
74 # Theme options are theme-specific and customize the look and feel of a theme
75 # further. For a list of options available for each theme, see the
76 # documentation.
77 html_theme_options = {
78 "collapse_navigation": False,
79 "display_version": True,
80 "logo_only": True,
81 "pytorch_project": "docs",
82 "navigation_with_keys": True,
83 "analytics_id": "UA-209075005-1",
84 }
85
86 html_favicon = os.path.join("..", "logo", "favicon.ico")
87
88 html_static_path = ["_static"]
89 html_css_files = ["workaround.css"]
90
91 # -- Extension configuration -------------------------------------------------
92
93 # sphinx.ext.autodoc
94 autodoc_default_options = {
95 "members": True,
96 "special-members": True,
97 "show-inheritance": True,
98 }
99 autodoc_member_order = "bysource"
100 autodoc_typehints = "description"
101
102 # sphinx.ext.intersphinx
103 intersphinx_mapping = {
104 "matplotlib": ("https://matplotlib.org/stable/", None),
105 "python": ("https://docs.python.org/3", None),
106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None),
107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None),
108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None),
109 "torch": ("https://pytorch.org/docs/stable", None),
110 "torchvision": ("https://pytorch.org/vision/stable", None),
111 }
112
113 # nbsphinx
114 nbsphinx_execute = "never"
115 # TODO: branch/tag should change depending on which version of docs you look at
116 # TODO: width option of image directive is broken, see:
117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140
118 nbsphinx_prolog = """
119 {% set colab = "https://colab.research.google.com" %}
120 {% set repo = "microsoft/torchgeo" %}
121 {% set branch = "main" %}
122
123 .. image:: {{ colab }}/assets/colab-badge.svg
124 :class: colabbadge
125 :alt: Open in Colab
126 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb
127 """
128
129 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme
130 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599
131 # NOTE: This will likely break nbsphinx widgets
132 nbsphinx_requirejs_path = ""
133
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -118,7 +118,11 @@
nbsphinx_prolog = """
{% set colab = "https://colab.research.google.com" %}
{% set repo = "microsoft/torchgeo" %}
-{% set branch = "main" %}
+{% if "dev" in env.config.release %}
+ {% set branch = "main" %}
+{% else %}
+ {% set branch = "releases/v" ~ env.config.version %}
+{% endif %}
.. image:: {{ colab }}/assets/colab-badge.svg
:class: colabbadge
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -118,7 +118,11 @@\n nbsphinx_prolog = \"\"\"\n {% set colab = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n-{% set branch = \"main\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases/v\" ~ env.config.version %}\n+{% endif %}\n \n .. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n", "issue": "Open in Colab URL broken in 0.1.1\nThe latest 0.1.1 release broke the \"Open in Colab\" URL in our tutorials. Still trying to fix this.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% set branch = \"main\" %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]} | 1,971 | 151 |
gh_patches_debug_22107 | rasdani/github-patches | git_diff | Pyomo__pyomo-1140 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error whil using mindtpy
I was trying to start the MINLP example presented here:
https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html
When i run the code:
```
from pyomo.environ import *
# Create a simple model
model = ConcreteModel()
model.x = Var(bounds=(1.0, 10.0), initialize=5.0)
model.y = Var(within=Binary)
model.c1 = Constraint(expr=(model.x-3.0)**2 <= 50.0*(1-model.y))
model.c2 = Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y))
model.objective = Objective(expr=abs(model.x*model.x), sense=minimize)
# Solve the model using MindtPy
SolverFactory('mindtpy').solve(model, mip_solver='glpk', nlp_solver='ipopt')
model.objective.display()
model.display()
model.pprint()
```
I get the following error:
pyomo.common.errors.DeveloperError: Internal Pyomo implementation error:
"sympy expression type 're' not found in the operator map"
Please report this to the Pyomo Developers.
I have installed pyomo 5.6.6, pyomo.extras 3.3, and pyomo.solvers 1.0 using Anaconda on Windows 10.
</issue>
<code>
[start of pyomo/core/expr/sympy_tools.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 from six import StringIO, iterkeys
12 import pyutilib.misc
13 from pyomo.core.expr import current
14 from pyomo.common import DeveloperError
15 from pyomo.core.expr import current as EXPR, native_types
16 from pyomo.core.expr.numvalue import value
17 from pyomo.core.kernel.component_map import ComponentMap
18 from pyomo.common.errors import NondifferentiableError
19
20 sympy_available = True
21 try:
22 import sympy
23
24 def _prod(*x):
25 ans = x[0]
26 for i in x[1:]:
27 ans *= i
28 return ans
29
30 def _sum(*x):
31 return sum(x_ for x_ in x)
32
33 def _nondifferentiable(*x):
34 if type(x[1]) is tuple:
35 # sympy >= 1.3 returns tuples (var, order)
36 wrt = x[1][0]
37 else:
38 # early versions of sympy returned the bare var
39 wrt = x[1]
40 raise NondifferentiableError(
41 "The sub-expression '%s' is not differentiable with respect to %s"
42 % (x[0], wrt) )
43
44 _operatorMap = {
45 sympy.Add: _sum,
46 sympy.Mul: _prod,
47 sympy.Pow: lambda x, y: x**y,
48 sympy.exp: lambda x: current.exp(x),
49 sympy.log: lambda x: current.log(x),
50 sympy.sin: lambda x: current.sin(x),
51 sympy.asin: lambda x: current.asin(x),
52 sympy.sinh: lambda x: current.sinh(x),
53 sympy.asinh: lambda x: current.asinh(x),
54 sympy.cos: lambda x: current.cos(x),
55 sympy.acos: lambda x: current.acos(x),
56 sympy.cosh: lambda x: current.cosh(x),
57 sympy.acosh: lambda x: current.acosh(x),
58 sympy.tan: lambda x: current.tan(x),
59 sympy.atan: lambda x: current.atan(x),
60 sympy.tanh: lambda x: current.tanh(x),
61 sympy.atanh: lambda x: current.atanh(x),
62 sympy.ceiling: lambda x: current.ceil(x),
63 sympy.floor: lambda x: current.floor(x),
64 sympy.sqrt: lambda x: current.sqrt(x),
65 sympy.Derivative: _nondifferentiable,
66 sympy.Tuple: lambda *x: x,
67 }
68
69 _pyomo_operator_map = {
70 EXPR.SumExpression: sympy.Add,
71 EXPR.ProductExpression: sympy.Mul,
72 EXPR.NPV_ProductExpression: sympy.Mul,
73 EXPR.MonomialTermExpression: sympy.Mul,
74 }
75
76 _functionMap = {
77 'exp': sympy.exp,
78 'log': sympy.log,
79 'log10': lambda x: sympy.log(x)/sympy.log(10),
80 'sin': sympy.sin,
81 'asin': sympy.asin,
82 'sinh': sympy.sinh,
83 'asinh': sympy.asinh,
84 'cos': sympy.cos,
85 'acos': sympy.acos,
86 'cosh': sympy.cosh,
87 'acosh': sympy.acosh,
88 'tan': sympy.tan,
89 'atan': sympy.atan,
90 'tanh': sympy.tanh,
91 'atanh': sympy.atanh,
92 'ceil': sympy.ceiling,
93 'floor': sympy.floor,
94 'sqrt': sympy.sqrt,
95 }
96 except ImportError:
97 sympy_available = False
98
99 class PyomoSympyBimap(object):
100 def __init__(self):
101 self.pyomo2sympy = ComponentMap()
102 self.sympy2pyomo = {}
103 self.i = 0
104
105 def getPyomoSymbol(self, sympy_object, default=None):
106 return self.sympy2pyomo.get(sympy_object, default)
107
108 def getSympySymbol(self, pyomo_object):
109 if pyomo_object in self.pyomo2sympy:
110 return self.pyomo2sympy[pyomo_object]
111 sympy_obj = sympy.Symbol("x%d" % self.i)
112 self.i += 1
113 self.pyomo2sympy[pyomo_object] = sympy_obj
114 self.sympy2pyomo[sympy_obj] = pyomo_object
115 return sympy_obj
116
117 def sympyVars(self):
118 return iterkeys(self.sympy2pyomo)
119
120 # =====================================================
121 # sympyify_expression
122 # =====================================================
123
124 class Pyomo2SympyVisitor(EXPR.StreamBasedExpressionVisitor):
125
126 def __init__(self, object_map):
127 super(Pyomo2SympyVisitor, self).__init__()
128 self.object_map = object_map
129
130 def exitNode(self, node, values):
131 if node.__class__ is EXPR.UnaryFunctionExpression:
132 return _functionMap[node._name](values[0])
133 _op = _pyomo_operator_map.get(node.__class__, None)
134 if _op is None:
135 return node._apply_operation(values)
136 else:
137 return _op(*tuple(values))
138
139 def beforeChild(self, node, child):
140 #
141 # Don't replace native or sympy types
142 #
143 if type(child) in native_types:
144 return False, child
145 #
146 # We will descend into all expressions...
147 #
148 if child.is_expression_type():
149 return True, None
150 #
151 # Replace pyomo variables with sympy variables
152 #
153 if child.is_potentially_variable():
154 return False, self.object_map.getSympySymbol(child)
155 #
156 # Everything else is a constant...
157 #
158 return False, value(child)
159
160 class Sympy2PyomoVisitor(EXPR.StreamBasedExpressionVisitor):
161
162 def __init__(self, object_map):
163 super(Sympy2PyomoVisitor, self).__init__()
164 self.object_map = object_map
165
166 def enterNode(self, node):
167 return (node._args, [])
168
169 def exitNode(self, node, values):
170 """ Visit nodes that have been expanded """
171 _sympyOp = node
172 _op = _operatorMap.get( type(_sympyOp), None )
173 if _op is None:
174 raise DeveloperError(
175 "sympy expression type '%s' not found in the operator "
176 "map" % type(_sympyOp) )
177 return _op(*tuple(values))
178
179 def beforeChild(self, node, child):
180 if not child._args:
181 item = self.object_map.getPyomoSymbol(child, None)
182 if item is None:
183 item = float(child.evalf())
184 return False, item
185 return True, None
186
187 def sympyify_expression(expr):
188 """Convert a Pyomo expression to a Sympy expression"""
189 #
190 # Create the visitor and call it.
191 #
192 object_map = PyomoSympyBimap()
193 visitor = Pyomo2SympyVisitor(object_map)
194 is_expr, ans = visitor.beforeChild(None, expr)
195 if not is_expr:
196 return object_map, ans
197
198 return object_map, visitor.walk_expression(expr)
199
200
201 def sympy2pyomo_expression(expr, object_map):
202 visitor = Sympy2PyomoVisitor(object_map)
203 is_expr, ans = visitor.beforeChild(None, expr)
204 if not is_expr:
205 return ans
206 return visitor.walk_expression(expr)
207
[end of pyomo/core/expr/sympy_tools.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/core/expr/sympy_tools.py b/pyomo/core/expr/sympy_tools.py
--- a/pyomo/core/expr/sympy_tools.py
+++ b/pyomo/core/expr/sympy_tools.py
@@ -62,6 +62,7 @@
sympy.ceiling: lambda x: current.ceil(x),
sympy.floor: lambda x: current.floor(x),
sympy.sqrt: lambda x: current.sqrt(x),
+ sympy.Abs: lambda x: abs(x),
sympy.Derivative: _nondifferentiable,
sympy.Tuple: lambda *x: x,
}
@@ -108,7 +109,10 @@
def getSympySymbol(self, pyomo_object):
if pyomo_object in self.pyomo2sympy:
return self.pyomo2sympy[pyomo_object]
- sympy_obj = sympy.Symbol("x%d" % self.i)
+ # Pyomo currently ONLY supports Real variables (not complex
+ # variables). If that ever changes, then we will need to
+ # revisit hard-coding the symbol type here
+ sympy_obj = sympy.Symbol("x%d" % self.i, real=True)
self.i += 1
self.pyomo2sympy[pyomo_object] = sympy_obj
self.sympy2pyomo[sympy_obj] = pyomo_object
| {"golden_diff": "diff --git a/pyomo/core/expr/sympy_tools.py b/pyomo/core/expr/sympy_tools.py\n--- a/pyomo/core/expr/sympy_tools.py\n+++ b/pyomo/core/expr/sympy_tools.py\n@@ -62,6 +62,7 @@\n sympy.ceiling: lambda x: current.ceil(x),\n sympy.floor: lambda x: current.floor(x),\n sympy.sqrt: lambda x: current.sqrt(x),\n+ sympy.Abs: lambda x: abs(x),\n sympy.Derivative: _nondifferentiable,\n sympy.Tuple: lambda *x: x,\n }\n@@ -108,7 +109,10 @@\n def getSympySymbol(self, pyomo_object):\n if pyomo_object in self.pyomo2sympy:\n return self.pyomo2sympy[pyomo_object]\n- sympy_obj = sympy.Symbol(\"x%d\" % self.i)\n+ # Pyomo currently ONLY supports Real variables (not complex\n+ # variables). If that ever changes, then we will need to\n+ # revisit hard-coding the symbol type here\n+ sympy_obj = sympy.Symbol(\"x%d\" % self.i, real=True)\n self.i += 1\n self.pyomo2sympy[pyomo_object] = sympy_obj\n self.sympy2pyomo[sympy_obj] = pyomo_object\n", "issue": "Error whil using mindtpy\nI was trying to start the MINLP example presented here:\r\nhttps://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html\r\n\r\nWhen i run the code:\r\n\r\n```\r\nfrom pyomo.environ import *\r\n\r\n# Create a simple model\r\nmodel = ConcreteModel()\r\n\r\nmodel.x = Var(bounds=(1.0, 10.0), initialize=5.0)\r\nmodel.y = Var(within=Binary)\r\n\r\nmodel.c1 = Constraint(expr=(model.x-3.0)**2 <= 50.0*(1-model.y))\r\nmodel.c2 = Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y))\r\n\r\nmodel.objective = Objective(expr=abs(model.x*model.x), sense=minimize)\r\n\r\n# Solve the model using MindtPy\r\nSolverFactory('mindtpy').solve(model, mip_solver='glpk', nlp_solver='ipopt')\r\nmodel.objective.display()\r\nmodel.display()\r\nmodel.pprint()\r\n```\r\n\r\nI get the following error:\r\n\r\npyomo.common.errors.DeveloperError: Internal Pyomo implementation error:\r\n \"sympy expression type 're' not found in the operator map\"\r\n Please report this to the Pyomo Developers.\r\n\r\nI have installed pyomo 5.6.6, pyomo.extras 3.3, and pyomo.solvers 1.0 using Anaconda on Windows 10.\r\n\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom six import StringIO, iterkeys\nimport pyutilib.misc\nfrom pyomo.core.expr import current\nfrom pyomo.common import DeveloperError\nfrom pyomo.core.expr import current as EXPR, native_types\nfrom pyomo.core.expr.numvalue import value\nfrom pyomo.core.kernel.component_map import ComponentMap\nfrom pyomo.common.errors import NondifferentiableError\n\nsympy_available = True\ntry:\n import sympy\n\n def _prod(*x):\n ans = x[0]\n for i in x[1:]:\n ans *= i\n return ans\n\n def _sum(*x):\n return sum(x_ for x_ in x)\n\n def _nondifferentiable(*x):\n if type(x[1]) is tuple:\n # sympy >= 1.3 returns tuples (var, order)\n wrt = x[1][0]\n else:\n # early versions of sympy returned the bare var\n wrt = x[1]\n raise NondifferentiableError(\n \"The sub-expression '%s' is not differentiable with respect to %s\"\n % (x[0], wrt) )\n\n _operatorMap = {\n sympy.Add: _sum,\n sympy.Mul: _prod,\n sympy.Pow: lambda x, y: x**y,\n sympy.exp: lambda x: current.exp(x),\n sympy.log: lambda x: current.log(x),\n sympy.sin: lambda x: current.sin(x),\n sympy.asin: lambda x: current.asin(x),\n sympy.sinh: lambda x: current.sinh(x),\n sympy.asinh: lambda x: current.asinh(x),\n sympy.cos: lambda x: current.cos(x),\n sympy.acos: lambda x: current.acos(x),\n sympy.cosh: lambda x: current.cosh(x),\n sympy.acosh: lambda x: current.acosh(x),\n sympy.tan: lambda x: current.tan(x),\n sympy.atan: lambda x: current.atan(x),\n sympy.tanh: lambda x: current.tanh(x),\n sympy.atanh: lambda x: current.atanh(x),\n sympy.ceiling: lambda x: current.ceil(x),\n sympy.floor: lambda x: current.floor(x),\n sympy.sqrt: lambda x: current.sqrt(x),\n sympy.Derivative: _nondifferentiable,\n sympy.Tuple: lambda *x: x,\n }\n\n _pyomo_operator_map = {\n EXPR.SumExpression: sympy.Add,\n EXPR.ProductExpression: sympy.Mul,\n EXPR.NPV_ProductExpression: sympy.Mul,\n EXPR.MonomialTermExpression: sympy.Mul,\n }\n\n _functionMap = {\n 'exp': sympy.exp,\n 'log': sympy.log,\n 'log10': lambda x: sympy.log(x)/sympy.log(10),\n 'sin': sympy.sin,\n 'asin': sympy.asin,\n 'sinh': sympy.sinh,\n 'asinh': sympy.asinh,\n 'cos': sympy.cos,\n 'acos': sympy.acos,\n 'cosh': sympy.cosh,\n 'acosh': sympy.acosh,\n 'tan': sympy.tan,\n 'atan': sympy.atan,\n 'tanh': sympy.tanh,\n 'atanh': sympy.atanh,\n 'ceil': sympy.ceiling,\n 'floor': sympy.floor,\n 'sqrt': sympy.sqrt,\n }\nexcept ImportError:\n sympy_available = False\n\nclass PyomoSympyBimap(object):\n def __init__(self):\n self.pyomo2sympy = ComponentMap()\n self.sympy2pyomo = {}\n self.i = 0\n\n def getPyomoSymbol(self, sympy_object, default=None):\n return self.sympy2pyomo.get(sympy_object, default)\n\n def getSympySymbol(self, pyomo_object):\n if pyomo_object in self.pyomo2sympy:\n return self.pyomo2sympy[pyomo_object]\n sympy_obj = sympy.Symbol(\"x%d\" % self.i)\n self.i += 1\n self.pyomo2sympy[pyomo_object] = sympy_obj\n self.sympy2pyomo[sympy_obj] = pyomo_object\n return sympy_obj\n\n def sympyVars(self):\n return iterkeys(self.sympy2pyomo)\n\n# =====================================================\n# sympyify_expression\n# =====================================================\n\nclass Pyomo2SympyVisitor(EXPR.StreamBasedExpressionVisitor):\n\n def __init__(self, object_map):\n super(Pyomo2SympyVisitor, self).__init__()\n self.object_map = object_map\n\n def exitNode(self, node, values):\n if node.__class__ is EXPR.UnaryFunctionExpression:\n return _functionMap[node._name](values[0])\n _op = _pyomo_operator_map.get(node.__class__, None)\n if _op is None:\n return node._apply_operation(values)\n else:\n return _op(*tuple(values))\n\n def beforeChild(self, node, child):\n #\n # Don't replace native or sympy types\n #\n if type(child) in native_types:\n return False, child\n #\n # We will descend into all expressions...\n #\n if child.is_expression_type():\n return True, None\n #\n # Replace pyomo variables with sympy variables\n #\n if child.is_potentially_variable():\n return False, self.object_map.getSympySymbol(child)\n #\n # Everything else is a constant...\n #\n return False, value(child)\n\nclass Sympy2PyomoVisitor(EXPR.StreamBasedExpressionVisitor):\n\n def __init__(self, object_map):\n super(Sympy2PyomoVisitor, self).__init__()\n self.object_map = object_map\n\n def enterNode(self, node):\n return (node._args, [])\n\n def exitNode(self, node, values):\n \"\"\" Visit nodes that have been expanded \"\"\"\n _sympyOp = node\n _op = _operatorMap.get( type(_sympyOp), None )\n if _op is None:\n raise DeveloperError(\n \"sympy expression type '%s' not found in the operator \"\n \"map\" % type(_sympyOp) )\n return _op(*tuple(values))\n\n def beforeChild(self, node, child):\n if not child._args:\n item = self.object_map.getPyomoSymbol(child, None)\n if item is None:\n item = float(child.evalf())\n return False, item\n return True, None\n\ndef sympyify_expression(expr):\n \"\"\"Convert a Pyomo expression to a Sympy expression\"\"\"\n #\n # Create the visitor and call it.\n #\n object_map = PyomoSympyBimap()\n visitor = Pyomo2SympyVisitor(object_map)\n is_expr, ans = visitor.beforeChild(None, expr)\n if not is_expr:\n return object_map, ans\n\n return object_map, visitor.walk_expression(expr)\n\n\ndef sympy2pyomo_expression(expr, object_map):\n visitor = Sympy2PyomoVisitor(object_map)\n is_expr, ans = visitor.beforeChild(None, expr)\n if not is_expr:\n return ans\n return visitor.walk_expression(expr)\n", "path": "pyomo/core/expr/sympy_tools.py"}]} | 3,102 | 324 |
gh_patches_debug_18606 | rasdani/github-patches | git_diff | pytorch__vision-301 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Phototour dataset links are broken.
Hi there,
I was trying triplet code on the this fork https://github.com/edgarriba/examples
But dataset link are broken? @edgarriba can you prvoide updated links?
thanks,
Aytac
</issue>
<code>
[start of torchvision/datasets/phototour.py]
1 import os
2 import errno
3 import numpy as np
4 from PIL import Image
5
6 import torch
7 import torch.utils.data as data
8
9 from .utils import download_url, check_integrity
10
11
12 class PhotoTour(data.Dataset):
13 """`Learning Local Image Descriptors Data <http://phototour.cs.washington.edu/patches/default.htm>`_ Dataset.
14
15
16 Args:
17 root (string): Root directory where images are.
18 name (string): Name of the dataset to load.
19 transform (callable, optional): A function/transform that takes in an PIL image
20 and returns a transformed version.
21 download (bool, optional): If true, downloads the dataset from the internet and
22 puts it in root directory. If dataset is already downloaded, it is not
23 downloaded again.
24
25 """
26 urls = {
27 'notredame': [
28 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/notredame.zip',
29 'notredame.zip',
30 '509eda8535847b8c0a90bbb210c83484'
31 ],
32 'yosemite': [
33 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/yosemite.zip',
34 'yosemite.zip',
35 '533b2e8eb7ede31be40abc317b2fd4f0'
36 ],
37 'liberty': [
38 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/liberty.zip',
39 'liberty.zip',
40 'fdd9152f138ea5ef2091746689176414'
41 ],
42 }
43 mean = {'notredame': 0.4854, 'yosemite': 0.4844, 'liberty': 0.4437}
44 std = {'notredame': 0.1864, 'yosemite': 0.1818, 'liberty': 0.2019}
45 lens = {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092}
46
47 image_ext = 'bmp'
48 info_file = 'info.txt'
49 matches_files = 'm50_100000_100000_0.txt'
50
51 def __init__(self, root, name, train=True, transform=None, download=False):
52 self.root = os.path.expanduser(root)
53 self.name = name
54 self.data_dir = os.path.join(self.root, name)
55 self.data_down = os.path.join(self.root, '{}.zip'.format(name))
56 self.data_file = os.path.join(self.root, '{}.pt'.format(name))
57
58 self.train = train
59 self.transform = transform
60
61 self.mean = self.mean[name]
62 self.std = self.std[name]
63
64 if download:
65 self.download()
66
67 if not self._check_datafile_exists():
68 raise RuntimeError('Dataset not found.' +
69 ' You can use download=True to download it')
70
71 # load the serialized data
72 self.data, self.labels, self.matches = torch.load(self.data_file)
73
74 def __getitem__(self, index):
75 """
76 Args:
77 index (int): Index
78
79 Returns:
80 tuple: (data1, data2, matches)
81 """
82 if self.train:
83 data = self.data[index]
84 if self.transform is not None:
85 data = self.transform(data)
86 return data
87 m = self.matches[index]
88 data1, data2 = self.data[m[0]], self.data[m[1]]
89 if self.transform is not None:
90 data1 = self.transform(data1)
91 data2 = self.transform(data2)
92 return data1, data2, m[2]
93
94 def __len__(self):
95 if self.train:
96 return self.lens[self.name]
97 return len(self.matches)
98
99 def _check_datafile_exists(self):
100 return os.path.exists(self.data_file)
101
102 def _check_downloaded(self):
103 return os.path.exists(self.data_dir)
104
105 def download(self):
106 if self._check_datafile_exists():
107 print('# Found cached data {}'.format(self.data_file))
108 return
109
110 if not self._check_downloaded():
111 # download files
112 url = self.urls[self.name][0]
113 filename = self.urls[self.name][1]
114 md5 = self.urls[self.name][2]
115 fpath = os.path.join(self.root, filename)
116
117 download_url(url, self.root, filename, md5)
118
119 print('# Extracting data {}\n'.format(self.data_down))
120
121 import zipfile
122 with zipfile.ZipFile(fpath, 'r') as z:
123 z.extractall(self.data_dir)
124
125 os.unlink(fpath)
126
127 # process and save as torch files
128 print('# Caching data {}'.format(self.data_file))
129
130 dataset = (
131 read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
132 read_info_file(self.data_dir, self.info_file),
133 read_matches_files(self.data_dir, self.matches_files)
134 )
135
136 with open(self.data_file, 'wb') as f:
137 torch.save(dataset, f)
138
139
140 def read_image_file(data_dir, image_ext, n):
141 """Return a Tensor containing the patches
142 """
143 def PIL2array(_img):
144 """Convert PIL image type to numpy 2D array
145 """
146 return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
147
148 def find_files(_data_dir, _image_ext):
149 """Return a list with the file names of the images containing the patches
150 """
151 files = []
152 # find those files with the specified extension
153 for file_dir in os.listdir(_data_dir):
154 if file_dir.endswith(_image_ext):
155 files.append(os.path.join(_data_dir, file_dir))
156 return sorted(files) # sort files in ascend order to keep relations
157
158 patches = []
159 list_files = find_files(data_dir, image_ext)
160
161 for fpath in list_files:
162 img = Image.open(fpath)
163 for y in range(0, 1024, 64):
164 for x in range(0, 1024, 64):
165 patch = img.crop((x, y, x + 64, y + 64))
166 patches.append(PIL2array(patch))
167 return torch.ByteTensor(np.array(patches[:n]))
168
169
170 def read_info_file(data_dir, info_file):
171 """Return a Tensor containing the list of labels
172 Read the file and keep only the ID of the 3D point.
173 """
174 labels = []
175 with open(os.path.join(data_dir, info_file), 'r') as f:
176 labels = [int(line.split()[0]) for line in f]
177 return torch.LongTensor(labels)
178
179
180 def read_matches_files(data_dir, matches_file):
181 """Return a Tensor containing the ground truth matches
182 Read the file and keep only 3D point ID.
183 Matches are represented with a 1, non matches with a 0.
184 """
185 matches = []
186 with open(os.path.join(data_dir, matches_file), 'r') as f:
187 for line in f:
188 l = line.split()
189 matches.append([int(l[0]), int(l[3]), int(l[1] == l[4])])
190 return torch.LongTensor(matches)
191
[end of torchvision/datasets/phototour.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/datasets/phototour.py b/torchvision/datasets/phototour.py
--- a/torchvision/datasets/phototour.py
+++ b/torchvision/datasets/phototour.py
@@ -25,17 +25,17 @@
"""
urls = {
'notredame': [
- 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/notredame.zip',
+ 'http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip',
'notredame.zip',
'509eda8535847b8c0a90bbb210c83484'
],
'yosemite': [
- 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/yosemite.zip',
+ 'http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip',
'yosemite.zip',
'533b2e8eb7ede31be40abc317b2fd4f0'
],
'liberty': [
- 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/liberty.zip',
+ 'http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip',
'liberty.zip',
'fdd9152f138ea5ef2091746689176414'
],
| {"golden_diff": "diff --git a/torchvision/datasets/phototour.py b/torchvision/datasets/phototour.py\n--- a/torchvision/datasets/phototour.py\n+++ b/torchvision/datasets/phototour.py\n@@ -25,17 +25,17 @@\n \"\"\"\n urls = {\n 'notredame': [\n- 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/notredame.zip',\n+ 'http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip',\n 'notredame.zip',\n '509eda8535847b8c0a90bbb210c83484'\n ],\n 'yosemite': [\n- 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/yosemite.zip',\n+ 'http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip',\n 'yosemite.zip',\n '533b2e8eb7ede31be40abc317b2fd4f0'\n ],\n 'liberty': [\n- 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/liberty.zip',\n+ 'http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip',\n 'liberty.zip',\n 'fdd9152f138ea5ef2091746689176414'\n ],\n", "issue": "Phototour dataset links are broken.\nHi there,\r\nI was trying triplet code on the this fork https://github.com/edgarriba/examples\r\nBut dataset link are broken? @edgarriba can you prvoide updated links?\r\n\r\nthanks,\r\nAytac\n", "before_files": [{"content": "import os\nimport errno\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data as data\n\nfrom .utils import download_url, check_integrity\n\n\nclass PhotoTour(data.Dataset):\n \"\"\"`Learning Local Image Descriptors Data <http://phototour.cs.washington.edu/patches/default.htm>`_ Dataset.\n\n\n Args:\n root (string): Root directory where images are.\n name (string): Name of the dataset to load.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n urls = {\n 'notredame': [\n 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/notredame.zip',\n 'notredame.zip',\n '509eda8535847b8c0a90bbb210c83484'\n ],\n 'yosemite': [\n 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/yosemite.zip',\n 'yosemite.zip',\n '533b2e8eb7ede31be40abc317b2fd4f0'\n ],\n 'liberty': [\n 'http://www.iis.ee.ic.ac.uk/~vbalnt/phototourism-patches/liberty.zip',\n 'liberty.zip',\n 'fdd9152f138ea5ef2091746689176414'\n ],\n }\n mean = {'notredame': 0.4854, 'yosemite': 0.4844, 'liberty': 0.4437}\n std = {'notredame': 0.1864, 'yosemite': 0.1818, 'liberty': 0.2019}\n lens = {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092}\n\n image_ext = 'bmp'\n info_file = 'info.txt'\n matches_files = 'm50_100000_100000_0.txt'\n\n def __init__(self, root, name, train=True, transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.name = name\n self.data_dir = os.path.join(self.root, name)\n self.data_down = os.path.join(self.root, '{}.zip'.format(name))\n self.data_file = os.path.join(self.root, '{}.pt'.format(name))\n\n self.train = train\n self.transform = transform\n\n self.mean = self.mean[name]\n self.std = self.std[name]\n\n if download:\n self.download()\n\n if not self._check_datafile_exists():\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n # load the serialized data\n self.data, self.labels, self.matches = torch.load(self.data_file)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (data1, data2, matches)\n \"\"\"\n if self.train:\n data = self.data[index]\n if self.transform is not None:\n data = self.transform(data)\n return data\n m = self.matches[index]\n data1, data2 = self.data[m[0]], self.data[m[1]]\n if self.transform is not None:\n data1 = self.transform(data1)\n data2 = self.transform(data2)\n return data1, data2, m[2]\n\n def __len__(self):\n if self.train:\n return self.lens[self.name]\n return len(self.matches)\n\n def _check_datafile_exists(self):\n return os.path.exists(self.data_file)\n\n def _check_downloaded(self):\n return os.path.exists(self.data_dir)\n\n def download(self):\n if self._check_datafile_exists():\n print('# Found cached data {}'.format(self.data_file))\n return\n\n if not self._check_downloaded():\n # download files\n url = self.urls[self.name][0]\n filename = self.urls[self.name][1]\n md5 = self.urls[self.name][2]\n fpath = os.path.join(self.root, filename)\n\n download_url(url, self.root, filename, md5)\n\n print('# Extracting data {}\\n'.format(self.data_down))\n\n import zipfile\n with zipfile.ZipFile(fpath, 'r') as z:\n z.extractall(self.data_dir)\n\n os.unlink(fpath)\n\n # process and save as torch files\n print('# Caching data {}'.format(self.data_file))\n\n dataset = (\n read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),\n read_info_file(self.data_dir, self.info_file),\n read_matches_files(self.data_dir, self.matches_files)\n )\n\n with open(self.data_file, 'wb') as f:\n torch.save(dataset, f)\n\n\ndef read_image_file(data_dir, image_ext, n):\n \"\"\"Return a Tensor containing the patches\n \"\"\"\n def PIL2array(_img):\n \"\"\"Convert PIL image type to numpy 2D array\n \"\"\"\n return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)\n\n def find_files(_data_dir, _image_ext):\n \"\"\"Return a list with the file names of the images containing the patches\n \"\"\"\n files = []\n # find those files with the specified extension\n for file_dir in os.listdir(_data_dir):\n if file_dir.endswith(_image_ext):\n files.append(os.path.join(_data_dir, file_dir))\n return sorted(files) # sort files in ascend order to keep relations\n\n patches = []\n list_files = find_files(data_dir, image_ext)\n\n for fpath in list_files:\n img = Image.open(fpath)\n for y in range(0, 1024, 64):\n for x in range(0, 1024, 64):\n patch = img.crop((x, y, x + 64, y + 64))\n patches.append(PIL2array(patch))\n return torch.ByteTensor(np.array(patches[:n]))\n\n\ndef read_info_file(data_dir, info_file):\n \"\"\"Return a Tensor containing the list of labels\n Read the file and keep only the ID of the 3D point.\n \"\"\"\n labels = []\n with open(os.path.join(data_dir, info_file), 'r') as f:\n labels = [int(line.split()[0]) for line in f]\n return torch.LongTensor(labels)\n\n\ndef read_matches_files(data_dir, matches_file):\n \"\"\"Return a Tensor containing the ground truth matches\n Read the file and keep only 3D point ID.\n Matches are represented with a 1, non matches with a 0.\n \"\"\"\n matches = []\n with open(os.path.join(data_dir, matches_file), 'r') as f:\n for line in f:\n l = line.split()\n matches.append([int(l[0]), int(l[3]), int(l[1] == l[4])])\n return torch.LongTensor(matches)\n", "path": "torchvision/datasets/phototour.py"}]} | 2,721 | 350 |
gh_patches_debug_16576 | rasdani/github-patches | git_diff | nyu-mll__jiant-1247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wsc get_train_examples doesn't match wsc data format
In jiant/jiant/tasks/lib/wsc.py, `_create_examples()` dies at the statement `span1_idx=line["span1_index"]` (line 177) with `KeyError: 'span1_index'` because it mismatches the structure of the JSON task data.
The statement should be `span1_idx=line["target"]["span1_index"]` and similarly for the next 3 statements.
**To Reproduce**
1. Install jiant v2 from any recent commit (where wsc.py hasn't been touched since 3bd801ce)
2. I doubt this matters, but running Python version 3.8 in a recent linux on a 40 core, 80 thread skylake CPU with 384 GB of RAM and a VT100/16GB GPU.
3. In the Python REPL,
```python
from jiant.proj.simple import runscript as run
import jiant.scripts.download_data.runscript as downloader
downloader.download_data(["wsc"], "/home/rasmussen.63/wsc-speed/tasks")
args = run.RunConfiguration(
run_name="wsc-speed",
exp_dir="wsc-speed",
data_dir="wsc-speed/tasks",
model_type="roberta-base",
tasks="wsc",
train_batch_size=16,
num_train_epochs=3
)
run.run_simple(args)
```
Watch stderr come back to you until it stops at
```
Tokenizing Task 'wsc' for phases 'train,val,test'
WSCTask
[train]: /home/rasmussen.63/wsc-speed/tasks/data/wsc/train.jsonl
[val]: /home/rasmussen.63/wsc-speed/tasks/data/wsc/val.jsonl
[test]: /home/rasmussen.63/wsc-speed/tasks/data/wsc/test.jsonl
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/rasmussen.63/jiant/jiant/proj/simple/runscript.py", line 148, in run_simple
tokenize_and_cache.main(
File "/home/rasmussen.63/jiant/jiant/proj/main/tokenize_and_cache.py", line 165, in main
examples=task.get_train_examples(),
File "/home/rasmussen.63/jiant/jiant/tasks/lib/wsc.py", line 160, in get_train_examples
return self._create_examples(lines=read_json_lines(self.train_path), set_type="train")
File "/home/rasmussen.63/jiant/jiant/tasks/lib/wsc.py", line 177, in _create_examples
span1_idx=line["span1_index"],
KeyError: 'span1_index'
```
**Expected behavior**
WSCTask should initialize an Example from the downloaded data.
</issue>
<code>
[start of jiant/tasks/lib/wsc.py]
1 import numpy as np
2 import torch
3 from dataclasses import dataclass
4 from typing import List
5
6 from jiant.tasks.core import (
7 BaseExample,
8 BaseTokenizedExample,
9 BaseDataRow,
10 BatchMixin,
11 SuperGlueMixin,
12 Task,
13 TaskTypes,
14 )
15 from jiant.tasks.lib.templates.shared import (
16 labels_to_bimap,
17 add_cls_token,
18 create_input_set_from_tokens_and_segments,
19 )
20 from jiant.tasks.utils import truncate_sequences, ExclusiveSpan
21 from jiant.utils import retokenize
22 from jiant.utils.python.io import read_json_lines
23 from jiant.utils.tokenization_normalization import normalize_tokenizations
24
25
26 @dataclass
27 class Example(BaseExample):
28 guid: str
29 text: str
30 span1_idx: int
31 span2_idx: int
32 span1_text: str
33 span2_text: str
34 label: str
35
36 def tokenize(self, tokenizer):
37 space_tokenization = self.text.split()
38 target_tokenization = tokenizer.tokenize(self.text)
39 normed_space_tokenization, normed_target_tokenization = normalize_tokenizations(
40 space_tokenization, target_tokenization, tokenizer
41 )
42 aligner = retokenize.TokenAligner(normed_space_tokenization, normed_target_tokenization)
43 span1_token_count = len(self.span1_text.split())
44 span2_token_count = len(self.span2_text.split())
45 target_span1 = ExclusiveSpan(
46 *aligner.project_token_span(self.span1_idx, self.span1_idx + span1_token_count)
47 )
48 target_span2 = ExclusiveSpan(
49 *aligner.project_token_span(self.span2_idx, self.span2_idx + span2_token_count)
50 )
51 return TokenizedExample(
52 guid=self.guid,
53 tokens=target_tokenization,
54 span1_span=target_span1,
55 span2_span=target_span2,
56 span1_text=self.span1_text,
57 span2_text=self.span2_text,
58 label_id=WSCTask.LABEL_TO_ID[self.label],
59 )
60
61
62 @dataclass
63 class TokenizedExample(BaseTokenizedExample):
64 guid: str
65 tokens: List
66 span1_span: ExclusiveSpan
67 span2_span: ExclusiveSpan
68 span1_text: str
69 span2_text: str
70 label_id: int
71
72 def featurize(self, tokenizer, feat_spec):
73 special_tokens_count = 2 # CLS, SEP
74
75 (tokens,) = truncate_sequences(
76 tokens_ls=[self.tokens], max_length=feat_spec.max_seq_length - special_tokens_count,
77 )
78
79 unpadded_tokens = tokens + [tokenizer.sep_token]
80 unpadded_segment_ids = [feat_spec.sequence_a_segment_id] * (len(self.tokens) + 1)
81
82 unpadded_inputs = add_cls_token(
83 unpadded_tokens=unpadded_tokens,
84 unpadded_segment_ids=unpadded_segment_ids,
85 tokenizer=tokenizer,
86 feat_spec=feat_spec,
87 )
88
89 input_set = create_input_set_from_tokens_and_segments(
90 unpadded_tokens=unpadded_inputs.unpadded_tokens,
91 unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
92 tokenizer=tokenizer,
93 feat_spec=feat_spec,
94 )
95 span1_span = ExclusiveSpan(
96 start=self.span1_span[0] + unpadded_inputs.cls_offset,
97 end=self.span1_span[1] + unpadded_inputs.cls_offset,
98 ).to_inclusive()
99 span2_span = ExclusiveSpan(
100 start=self.span2_span[0] + unpadded_inputs.cls_offset,
101 end=self.span2_span[1] + unpadded_inputs.cls_offset,
102 ).to_inclusive()
103
104 return DataRow(
105 guid=self.guid,
106 input_ids=np.array(input_set.input_ids),
107 input_mask=np.array(input_set.input_mask),
108 segment_ids=np.array(input_set.segment_ids),
109 spans=np.array([span1_span, span2_span]),
110 label_id=self.label_id,
111 tokens=unpadded_inputs.unpadded_tokens,
112 span1_text=self.span1_text,
113 span2_text=self.span2_text,
114 )
115
116
117 @dataclass
118 class DataRow(BaseDataRow):
119 guid: str
120 input_ids: np.ndarray
121 input_mask: np.ndarray
122 segment_ids: np.ndarray
123 spans: np.ndarray
124 label_id: int
125 tokens: List
126 span1_text: str
127 span2_text: str
128
129 def get_tokens(self):
130 return [self.tokens]
131
132
133 @dataclass
134 class Batch(BatchMixin):
135 input_ids: torch.LongTensor
136 input_mask: torch.LongTensor
137 segment_ids: torch.LongTensor
138 spans: torch.LongTensor
139 label_id: torch.LongTensor
140 tokens: List
141 span1_text: List
142 span2_text: List
143
144
145 class WSCTask(SuperGlueMixin, Task):
146 Example = Example
147 TokenizedExample = Example
148 DataRow = DataRow
149 Batch = Batch
150
151 TASK_TYPE = TaskTypes.SPAN_COMPARISON_CLASSIFICATION
152 LABELS = [False, True]
153 LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
154
155 @property
156 def num_spans(self):
157 return 2
158
159 def get_train_examples(self):
160 return self._create_examples(lines=read_json_lines(self.train_path), set_type="train")
161
162 def get_val_examples(self):
163 return self._create_examples(lines=read_json_lines(self.val_path), set_type="val")
164
165 def get_test_examples(self):
166 return self._create_examples(lines=read_json_lines(self.test_path), set_type="test")
167
168 @classmethod
169 def _create_examples(cls, lines, set_type):
170 examples = []
171 for line in lines:
172 examples.append(
173 Example(
174 # NOTE: WSCTask.super_glue_format_preds() is dependent on this guid format.
175 guid="%s-%s" % (set_type, line["idx"]),
176 text=line["text"],
177 span1_idx=line["span1_index"],
178 span2_idx=line["span2_index"],
179 span1_text=line["span1_text"],
180 span2_text=line["span2_text"],
181 label=line["label"] if set_type != "test" else cls.LABELS[-1],
182 )
183 )
184 return examples
185
186 @classmethod
187 def super_glue_format_preds(cls, pred_dict):
188 """Reformat this task's raw predictions to have the structure expected by SuperGLUE."""
189 lines = []
190 for pred, guid in zip(list(pred_dict["preds"]), list(pred_dict["guids"])):
191 lines.append({"idx": int(guid.split("-")[1]), "label": str(cls.LABELS[pred])})
192 return lines
193
194
195 def extract_char_span(full_text, span_text, space_index):
196 space_tokens = full_text.split()
197 extracted_span_text = space_tokens[space_index]
198 assert extracted_span_text.lower() in full_text.lower()
199 span_length = len(span_text)
200 if space_index == 0:
201 start = 0
202 else:
203 start = len(" ".join(space_tokens[:space_index])) + 1
204 # exclusive span
205 return ExclusiveSpan(start=start, end=start + span_length)
206
[end of jiant/tasks/lib/wsc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jiant/tasks/lib/wsc.py b/jiant/tasks/lib/wsc.py
--- a/jiant/tasks/lib/wsc.py
+++ b/jiant/tasks/lib/wsc.py
@@ -174,10 +174,10 @@
# NOTE: WSCTask.super_glue_format_preds() is dependent on this guid format.
guid="%s-%s" % (set_type, line["idx"]),
text=line["text"],
- span1_idx=line["span1_index"],
- span2_idx=line["span2_index"],
- span1_text=line["span1_text"],
- span2_text=line["span2_text"],
+ span1_idx=line["target"]["span1_index"],
+ span2_idx=line["target"]["span2_index"],
+ span1_text=line["target"]["span1_text"],
+ span2_text=line["target"]["span2_text"],
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
| {"golden_diff": "diff --git a/jiant/tasks/lib/wsc.py b/jiant/tasks/lib/wsc.py\n--- a/jiant/tasks/lib/wsc.py\n+++ b/jiant/tasks/lib/wsc.py\n@@ -174,10 +174,10 @@\n # NOTE: WSCTask.super_glue_format_preds() is dependent on this guid format.\n guid=\"%s-%s\" % (set_type, line[\"idx\"]),\n text=line[\"text\"],\n- span1_idx=line[\"span1_index\"],\n- span2_idx=line[\"span2_index\"],\n- span1_text=line[\"span1_text\"],\n- span2_text=line[\"span2_text\"],\n+ span1_idx=line[\"target\"][\"span1_index\"],\n+ span2_idx=line[\"target\"][\"span2_index\"],\n+ span1_text=line[\"target\"][\"span1_text\"],\n+ span2_text=line[\"target\"][\"span2_text\"],\n label=line[\"label\"] if set_type != \"test\" else cls.LABELS[-1],\n )\n )\n", "issue": "wsc get_train_examples doesn't match wsc data format\nIn jiant/jiant/tasks/lib/wsc.py, `_create_examples()` dies at the statement `span1_idx=line[\"span1_index\"]` (line 177) with `KeyError: 'span1_index'` because it mismatches the structure of the JSON task data.\r\n\r\nThe statement should be `span1_idx=line[\"target\"][\"span1_index\"]` and similarly for the next 3 statements.\r\n\r\n**To Reproduce**\r\n1. Install jiant v2 from any recent commit (where wsc.py hasn't been touched since 3bd801ce)\r\n2. I doubt this matters, but running Python version 3.8 in a recent linux on a 40 core, 80 thread skylake CPU with 384 GB of RAM and a VT100/16GB GPU.\r\n3. In the Python REPL,\r\n```python\r\nfrom jiant.proj.simple import runscript as run\r\nimport jiant.scripts.download_data.runscript as downloader\r\ndownloader.download_data([\"wsc\"], \"/home/rasmussen.63/wsc-speed/tasks\")\r\nargs = run.RunConfiguration(\r\n run_name=\"wsc-speed\",\r\n exp_dir=\"wsc-speed\",\r\n data_dir=\"wsc-speed/tasks\",\r\n model_type=\"roberta-base\",\r\n tasks=\"wsc\",\r\n train_batch_size=16,\r\n num_train_epochs=3\r\n)\r\nrun.run_simple(args)\r\n```\r\nWatch stderr come back to you until it stops at\r\n\r\n```\r\nTokenizing Task 'wsc' for phases 'train,val,test'\r\nWSCTask\r\n [train]: /home/rasmussen.63/wsc-speed/tasks/data/wsc/train.jsonl\r\n [val]: /home/rasmussen.63/wsc-speed/tasks/data/wsc/val.jsonl\r\n [test]: /home/rasmussen.63/wsc-speed/tasks/data/wsc/test.jsonl\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/rasmussen.63/jiant/jiant/proj/simple/runscript.py\", line 148, in run_simple\r\n tokenize_and_cache.main(\r\n File \"/home/rasmussen.63/jiant/jiant/proj/main/tokenize_and_cache.py\", line 165, in main\r\n examples=task.get_train_examples(),\r\n File \"/home/rasmussen.63/jiant/jiant/tasks/lib/wsc.py\", line 160, in get_train_examples\r\n return self._create_examples(lines=read_json_lines(self.train_path), set_type=\"train\")\r\n File \"/home/rasmussen.63/jiant/jiant/tasks/lib/wsc.py\", line 177, in _create_examples\r\n span1_idx=line[\"span1_index\"],\r\nKeyError: 'span1_index'\r\n```\r\n\r\n**Expected behavior**\r\nWSCTask should initialize an Example from the downloaded data.\r\n\r\n\n", "before_files": [{"content": "import numpy as np\nimport torch\nfrom dataclasses import dataclass\nfrom typing import List\n\nfrom jiant.tasks.core import (\n BaseExample,\n BaseTokenizedExample,\n BaseDataRow,\n BatchMixin,\n SuperGlueMixin,\n Task,\n TaskTypes,\n)\nfrom jiant.tasks.lib.templates.shared import (\n labels_to_bimap,\n add_cls_token,\n create_input_set_from_tokens_and_segments,\n)\nfrom jiant.tasks.utils import truncate_sequences, ExclusiveSpan\nfrom jiant.utils import retokenize\nfrom jiant.utils.python.io import read_json_lines\nfrom jiant.utils.tokenization_normalization import normalize_tokenizations\n\n\n@dataclass\nclass Example(BaseExample):\n guid: str\n text: str\n span1_idx: int\n span2_idx: int\n span1_text: str\n span2_text: str\n label: str\n\n def tokenize(self, tokenizer):\n space_tokenization = self.text.split()\n target_tokenization = tokenizer.tokenize(self.text)\n normed_space_tokenization, normed_target_tokenization = normalize_tokenizations(\n space_tokenization, target_tokenization, tokenizer\n )\n aligner = retokenize.TokenAligner(normed_space_tokenization, normed_target_tokenization)\n span1_token_count = len(self.span1_text.split())\n span2_token_count = len(self.span2_text.split())\n target_span1 = ExclusiveSpan(\n *aligner.project_token_span(self.span1_idx, self.span1_idx + span1_token_count)\n )\n target_span2 = ExclusiveSpan(\n *aligner.project_token_span(self.span2_idx, self.span2_idx + span2_token_count)\n )\n return TokenizedExample(\n guid=self.guid,\n tokens=target_tokenization,\n span1_span=target_span1,\n span2_span=target_span2,\n span1_text=self.span1_text,\n span2_text=self.span2_text,\n label_id=WSCTask.LABEL_TO_ID[self.label],\n )\n\n\n@dataclass\nclass TokenizedExample(BaseTokenizedExample):\n guid: str\n tokens: List\n span1_span: ExclusiveSpan\n span2_span: ExclusiveSpan\n span1_text: str\n span2_text: str\n label_id: int\n\n def featurize(self, tokenizer, feat_spec):\n special_tokens_count = 2 # CLS, SEP\n\n (tokens,) = truncate_sequences(\n tokens_ls=[self.tokens], max_length=feat_spec.max_seq_length - special_tokens_count,\n )\n\n unpadded_tokens = tokens + [tokenizer.sep_token]\n unpadded_segment_ids = [feat_spec.sequence_a_segment_id] * (len(self.tokens) + 1)\n\n unpadded_inputs = add_cls_token(\n unpadded_tokens=unpadded_tokens,\n unpadded_segment_ids=unpadded_segment_ids,\n tokenizer=tokenizer,\n feat_spec=feat_spec,\n )\n\n input_set = create_input_set_from_tokens_and_segments(\n unpadded_tokens=unpadded_inputs.unpadded_tokens,\n unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,\n tokenizer=tokenizer,\n feat_spec=feat_spec,\n )\n span1_span = ExclusiveSpan(\n start=self.span1_span[0] + unpadded_inputs.cls_offset,\n end=self.span1_span[1] + unpadded_inputs.cls_offset,\n ).to_inclusive()\n span2_span = ExclusiveSpan(\n start=self.span2_span[0] + unpadded_inputs.cls_offset,\n end=self.span2_span[1] + unpadded_inputs.cls_offset,\n ).to_inclusive()\n\n return DataRow(\n guid=self.guid,\n input_ids=np.array(input_set.input_ids),\n input_mask=np.array(input_set.input_mask),\n segment_ids=np.array(input_set.segment_ids),\n spans=np.array([span1_span, span2_span]),\n label_id=self.label_id,\n tokens=unpadded_inputs.unpadded_tokens,\n span1_text=self.span1_text,\n span2_text=self.span2_text,\n )\n\n\n@dataclass\nclass DataRow(BaseDataRow):\n guid: str\n input_ids: np.ndarray\n input_mask: np.ndarray\n segment_ids: np.ndarray\n spans: np.ndarray\n label_id: int\n tokens: List\n span1_text: str\n span2_text: str\n\n def get_tokens(self):\n return [self.tokens]\n\n\n@dataclass\nclass Batch(BatchMixin):\n input_ids: torch.LongTensor\n input_mask: torch.LongTensor\n segment_ids: torch.LongTensor\n spans: torch.LongTensor\n label_id: torch.LongTensor\n tokens: List\n span1_text: List\n span2_text: List\n\n\nclass WSCTask(SuperGlueMixin, Task):\n Example = Example\n TokenizedExample = Example\n DataRow = DataRow\n Batch = Batch\n\n TASK_TYPE = TaskTypes.SPAN_COMPARISON_CLASSIFICATION\n LABELS = [False, True]\n LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)\n\n @property\n def num_spans(self):\n return 2\n\n def get_train_examples(self):\n return self._create_examples(lines=read_json_lines(self.train_path), set_type=\"train\")\n\n def get_val_examples(self):\n return self._create_examples(lines=read_json_lines(self.val_path), set_type=\"val\")\n\n def get_test_examples(self):\n return self._create_examples(lines=read_json_lines(self.test_path), set_type=\"test\")\n\n @classmethod\n def _create_examples(cls, lines, set_type):\n examples = []\n for line in lines:\n examples.append(\n Example(\n # NOTE: WSCTask.super_glue_format_preds() is dependent on this guid format.\n guid=\"%s-%s\" % (set_type, line[\"idx\"]),\n text=line[\"text\"],\n span1_idx=line[\"span1_index\"],\n span2_idx=line[\"span2_index\"],\n span1_text=line[\"span1_text\"],\n span2_text=line[\"span2_text\"],\n label=line[\"label\"] if set_type != \"test\" else cls.LABELS[-1],\n )\n )\n return examples\n\n @classmethod\n def super_glue_format_preds(cls, pred_dict):\n \"\"\"Reformat this task's raw predictions to have the structure expected by SuperGLUE.\"\"\"\n lines = []\n for pred, guid in zip(list(pred_dict[\"preds\"]), list(pred_dict[\"guids\"])):\n lines.append({\"idx\": int(guid.split(\"-\")[1]), \"label\": str(cls.LABELS[pred])})\n return lines\n\n\ndef extract_char_span(full_text, span_text, space_index):\n space_tokens = full_text.split()\n extracted_span_text = space_tokens[space_index]\n assert extracted_span_text.lower() in full_text.lower()\n span_length = len(span_text)\n if space_index == 0:\n start = 0\n else:\n start = len(\" \".join(space_tokens[:space_index])) + 1\n # exclusive span\n return ExclusiveSpan(start=start, end=start + span_length)\n", "path": "jiant/tasks/lib/wsc.py"}]} | 3,213 | 219 |
gh_patches_debug_39004 | rasdani/github-patches | git_diff | google__flax-1691 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support `unrolled` steps in `jax_utils.scan_in_dims`
Motivated by [jax#3094](https://github.com/google/jax/issues/3094), [jax#3738](https://github.com/google/jax/pull/3738) and [jax#3076](https://github.com/google/jax/pull/3076), `jax.lax.scan` currently supports specifying the number of scan iterations to unroll into a single iteration of the loop using the argument `unrolls`.
It would be nice to be able to control this from `jax_utils.scan_in_dims`.
</issue>
<code>
[start of flax/jax_utils.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Utilities we could consider upstreaming to Jax.
16 """
17
18 import collections
19 from collections.abc import Iterable # pylint: disable=g-importing-member
20 import itertools
21 import warnings
22
23 import jax
24 from jax import lax
25 from jax import linear_util as lu
26 from jax.config import config
27 from jax.interpreters import partial_eval as pe
28 from jax.interpreters import xla
29 import jax.lib.xla_bridge as xb
30 import jax.numpy as jnp
31 import numpy as np
32
33
34 def replicate(tree, devices=None):
35 """Replicates arrays to multiple devices.
36
37 Args:
38 tree: a pytree containing the arrays that should be replicated.
39 devices: the devices the data is replicated to
40 (default: `jax.local_devices()`).
41 Returns:
42 A new pytree containing the replicated arrays.
43 """
44 if devices is None:
45 # match the default device assignments used in pmap:
46 # for single-host, that's the XLA default device assignment
47 # for multi-host, it's the order of jax.local_devices()
48 if jax.process_count() == 1:
49 devices = [d for d in xb.get_backend().get_default_device_assignment(
50 jax.device_count()) if d.process_index == jax.process_index()]
51 else:
52 devices = jax.local_devices()
53
54 return jax.device_put_replicated(tree, devices)
55
56
57 def unreplicate(tree):
58 """Returns a single instance of a replicated array."""
59 return jax.tree_map(lambda x: x[0], tree)
60
61
62 def pmean(xs, axis_name):
63 warnings.warn('use jax.lax.pmean instead',
64 DeprecationWarning)
65 return lax.pmean(xs, axis_name)
66
67
68 def partial_eval_by_shape(fn, input_spec, *args, **kwargs):
69 """Lazily evaluate a function by using the shapes of the inputs.
70
71 This function is similar to `jax.eval_shape` with the key difference that
72 function outputs that can be computed without a concrete value of the
73 inputs are returned as is instead of only the shape. See for example
74 `module.init_by_shape` where this functionality is used to initialize a
75 model without using input data lr computation.
76
77 Args:
78 fn: the function to be lazily evaluated.
79 input_spec: an iterable of shapes or (shape, dtype) tuples specifying the
80 shape and type of the inputs. If unspecified the dtype is float32.
81 *args: other arguments passed to the module's apply function
82 **kwargs: keyword arguments passed to the module's apply function
83 Returns:
84 A pair consisting of the model output and an instance of Model
85 """
86 # output cannot be returned in lazy_create because jax.eval_shape will only
87 # return the shape and dtype.
88 # TODO(mattjj,jheek): use a public JAX API
89 f = lambda *inputs: fn(*inputs, *args, **kwargs)
90 input_structs = [_parse_spec(spec) for spec in input_spec]
91 inputs_flat, in_tree = jax.tree_flatten(input_structs)
92 f_flat, out_tree = jax.api_util.flatten_fun_nokwargs(lu.wrap_init(f), in_tree)
93 in_pvals = [pe.PartialVal.unknown(jax.ShapedArray(x.shape, x.dtype))
94 for x in inputs_flat]
95 _, out_pvals, _ = pe.trace_to_jaxpr(f_flat, in_pvals)
96 out_flat = [const if pv is None else jax.ShapeDtypeStruct(pv.shape, pv.dtype)
97 for pv, const in out_pvals]
98 return jax.tree_unflatten(out_tree(), out_flat)
99
100
101 def _parse_spec(spec):
102 """Parse an input spec of the form (shape, dtype) or shape into a jax.ShapeDtypeStruct."""
103 spec = tuple(spec)
104 if len(spec) == 2 and isinstance(spec[0], Iterable):
105 return jax.ShapeDtypeStruct(tuple(spec[0]), spec[1])
106 else:
107 return jax.ShapeDtypeStruct(spec, jnp.float32)
108
109
110 def prefetch_to_device(iterator, size, devices=None):
111 """Shard and prefetch batches on device.
112
113 This utility takes an iterator and returns a new iterator which fills an on
114 device prefetch buffer. Eager prefetching can improve the performance of
115 training loops significantly by overlapping compute and data transfer.
116
117 This utility is mostly useful for GPUs, for TPUs and CPUs it should not be
118 necessary -- the TPU & CPU memory allocators (normally) don't pick a memory
119 location that isn't free yet so they don't block. Instead those allocators OOM.
120
121 Args:
122 iterator: an iterator that yields a pytree of ndarrays where the first
123 dimension is sharded across devices.
124
125 size: the size of the prefetch buffer.
126
127 If you're training on GPUs, 2 is generally the best choice because this
128 guarantees that you can overlap a training step on GPU with a data
129 prefetch step on CPU.
130
131 devices: the list of devices to which the arrays should be prefetched.
132
133 Yields:
134 The original items from the iterator where each ndarray is now a sharded to
135 the specified devices.
136 """
137 queue = collections.deque()
138 devices = devices or jax.local_devices()
139
140 def _prefetch(xs):
141 if hasattr(jax, "device_put_sharded"): # jax>=0.2.0
142 return jax.device_put_sharded(list(xs), devices)
143 else:
144 aval = jax.xla.abstractify(xs)
145 assert xs.shape[0] == len(devices), (
146 "The first dimension of the iterator's ndarrays is not "
147 "equal to the number of devices.")
148 buffers = [xla.device_put(x, devices[i])
149 for i, x in enumerate(xs)]
150 return jax.pxla.ShardedDeviceArray(aval, buffers)
151
152 def enqueue(n): # Enqueues *up to* `n` elements from the iterator.
153 for data in itertools.islice(iterator, n):
154 queue.append(jax.tree_map(_prefetch, data))
155
156 enqueue(size) # Fill up the buffer.
157 while queue:
158 yield queue.popleft()
159 enqueue(1)
160
161
162 def _scan_nd(body_fn, init, xs, n=1):
163 """Utility for performing an n-dimensional `lax.scan`.
164
165 The n-d scan is simply recursive call of 1-d scan.
166 Args:
167 body_fn: the body of the loop of type (c, x) -> (c, y).
168 init: initial value for the carry.
169 xs: a pytree of tensors to scan over.
170 n: number of dimensions to scan over (default: 1)
171 Returns:
172 A tuple of the final carry and the values returned by the body.
173 """
174 if n == 1:
175 return lax.scan(body_fn, init, xs)
176 else:
177 def scan_body(c, x):
178 return _scan_nd(body_fn, c, x, n=n-1)
179 return lax.scan(scan_body, init, xs)
180
181
182 def _invert_perm(perm):
183 perm_inv = [0] * len(perm)
184 for i, j in enumerate(perm):
185 perm_inv[j] = i
186 return tuple(perm_inv)
187
188
189 def scan_in_dim(body_fn, init, xs, axis=(0,), keepdims=False):
190 """utility for doing a scan along arbitrary dimensions.
191
192 see `lax.scan` for details on how the scan operation works.
193 Args:
194 body_fn: the body of the loop of type (c, x) -> (c, y).
195 init: initial value for the carry.
196 xs: a pytree of tensors to scan over.
197 axis: the axis to scan over.
198 keepdims: keep the dimensions that are scanned over.
199 Returns:
200 A tuple of the final carry and the values returned by the body.
201 """
202 if not isinstance(axis, Iterable):
203 axis = (axis,)
204
205 def transpose_in(x):
206 perm = axis + tuple(np.delete(np.arange(x.ndim), axis))
207 return x.transpose(perm)
208 def transpose_out(x):
209 perm = axis + tuple(np.delete(np.arange(x.ndim), axis))
210 return x.transpose(_invert_perm(perm))
211
212 def body_wrapper(c, xs):
213 if keepdims:
214 xs = jax.tree_map(lambda x: x.reshape((1,) * len(axis) + x.shape), xs)
215 xs = jax.tree_map(transpose_out, xs)
216 c, ys = body_fn(c, xs)
217 if keepdims:
218 ys = jax.tree_map(transpose_in, ys)
219 ys = jax.tree_map(lambda x: x.reshape(x.shape[len(axis):]), ys)
220 return c, ys
221
222 xs = jax.tree_map(transpose_in, xs)
223 c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis))
224 ys = jax.tree_map(transpose_out, ys)
225 return c, ys
226
[end of flax/jax_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/jax_utils.py b/flax/jax_utils.py
--- a/flax/jax_utils.py
+++ b/flax/jax_utils.py
@@ -159,7 +159,7 @@
enqueue(1)
-def _scan_nd(body_fn, init, xs, n=1):
+def _scan_nd(body_fn, init, xs, n=1, unroll=(1,)):
"""Utility for performing an n-dimensional `lax.scan`.
The n-d scan is simply recursive call of 1-d scan.
@@ -172,11 +172,11 @@
A tuple of the final carry and the values returned by the body.
"""
if n == 1:
- return lax.scan(body_fn, init, xs)
+ return lax.scan(body_fn, init, xs, unroll=unroll[0])
else:
def scan_body(c, x):
- return _scan_nd(body_fn, c, x, n=n-1)
- return lax.scan(scan_body, init, xs)
+ return _scan_nd(body_fn, c, x, n=n-1, unroll=unroll[1:])
+ return lax.scan(scan_body, init, xs, unroll=unroll[0])
def _invert_perm(perm):
@@ -186,22 +186,38 @@
return tuple(perm_inv)
-def scan_in_dim(body_fn, init, xs, axis=(0,), keepdims=False):
+def scan_in_dim(body_fn, init, xs, axis=(0,), unroll=(1,), keepdims=False):
"""utility for doing a scan along arbitrary dimensions.
- see `lax.scan` for details on how the scan operation works.
+ See `lax.scan` for details on how the scan operation works.
+
+ Note on `unroll`: This argument gets left padded with ones to match the size
+ of `axis`. Doing so allows unrolls to performed from the innermost loop first.
+ For example, `scan_in_dim(..., axis=(1, 2, 3), unroll=5)` is equivalent to
+ `scan_in_dim(..., axis=(1, 2, 3), unroll=(1, 1, 5))`.
+
Args:
body_fn: the body of the loop of type (c, x) -> (c, y).
init: initial value for the carry.
xs: a pytree of tensors to scan over.
axis: the axis to scan over.
keepdims: keep the dimensions that are scanned over.
+ unroll: an optional positive integer, or tuple of positive integers
+ showing how many iterations of the loop to be unroll into a single
+ iteration for each axis.
Returns:
A tuple of the final carry and the values returned by the body.
"""
if not isinstance(axis, Iterable):
axis = (axis,)
+ if not isinstance(unroll, Iterable):
+ unroll = (unroll,)
+
+ # Pad unroll with ones so we start unrolling from the innermost loop
+ len_diff = len(axis) - len(unroll)
+ unroll = (1,) * len_diff + unroll
+
def transpose_in(x):
perm = axis + tuple(np.delete(np.arange(x.ndim), axis))
return x.transpose(perm)
@@ -220,6 +236,6 @@
return c, ys
xs = jax.tree_map(transpose_in, xs)
- c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis))
+ c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis), unroll=unroll)
ys = jax.tree_map(transpose_out, ys)
return c, ys
| {"golden_diff": "diff --git a/flax/jax_utils.py b/flax/jax_utils.py\n--- a/flax/jax_utils.py\n+++ b/flax/jax_utils.py\n@@ -159,7 +159,7 @@\n enqueue(1)\n \n \n-def _scan_nd(body_fn, init, xs, n=1):\n+def _scan_nd(body_fn, init, xs, n=1, unroll=(1,)):\n \"\"\"Utility for performing an n-dimensional `lax.scan`.\n \n The n-d scan is simply recursive call of 1-d scan.\n@@ -172,11 +172,11 @@\n A tuple of the final carry and the values returned by the body.\n \"\"\"\n if n == 1:\n- return lax.scan(body_fn, init, xs)\n+ return lax.scan(body_fn, init, xs, unroll=unroll[0])\n else:\n def scan_body(c, x):\n- return _scan_nd(body_fn, c, x, n=n-1)\n- return lax.scan(scan_body, init, xs)\n+ return _scan_nd(body_fn, c, x, n=n-1, unroll=unroll[1:])\n+ return lax.scan(scan_body, init, xs, unroll=unroll[0])\n \n \n def _invert_perm(perm):\n@@ -186,22 +186,38 @@\n return tuple(perm_inv)\n \n \n-def scan_in_dim(body_fn, init, xs, axis=(0,), keepdims=False):\n+def scan_in_dim(body_fn, init, xs, axis=(0,), unroll=(1,), keepdims=False):\n \"\"\"utility for doing a scan along arbitrary dimensions.\n \n- see `lax.scan` for details on how the scan operation works.\n+ See `lax.scan` for details on how the scan operation works.\n+\n+ Note on `unroll`: This argument gets left padded with ones to match the size\n+ of `axis`. Doing so allows unrolls to performed from the innermost loop first.\n+ For example, `scan_in_dim(..., axis=(1, 2, 3), unroll=5)` is equivalent to\n+ `scan_in_dim(..., axis=(1, 2, 3), unroll=(1, 1, 5))`.\n+\n Args:\n body_fn: the body of the loop of type (c, x) -> (c, y).\n init: initial value for the carry.\n xs: a pytree of tensors to scan over.\n axis: the axis to scan over.\n keepdims: keep the dimensions that are scanned over.\n+ unroll: an optional positive integer, or tuple of positive integers\n+ showing how many iterations of the loop to be unroll into a single\n+ iteration for each axis.\n Returns:\n A tuple of the final carry and the values returned by the body.\n \"\"\"\n if not isinstance(axis, Iterable):\n axis = (axis,)\n \n+ if not isinstance(unroll, Iterable):\n+ unroll = (unroll,)\n+\n+ # Pad unroll with ones so we start unrolling from the innermost loop\n+ len_diff = len(axis) - len(unroll)\n+ unroll = (1,) * len_diff + unroll\n+\n def transpose_in(x):\n perm = axis + tuple(np.delete(np.arange(x.ndim), axis))\n return x.transpose(perm)\n@@ -220,6 +236,6 @@\n return c, ys\n \n xs = jax.tree_map(transpose_in, xs)\n- c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis))\n+ c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis), unroll=unroll)\n ys = jax.tree_map(transpose_out, ys)\n return c, ys\n", "issue": "Support `unrolled` steps in `jax_utils.scan_in_dims`\nMotivated by [jax#3094](https://github.com/google/jax/issues/3094), [jax#3738](https://github.com/google/jax/pull/3738) and [jax#3076](https://github.com/google/jax/pull/3076), `jax.lax.scan` currently supports specifying the number of scan iterations to unroll into a single iteration of the loop using the argument `unrolls`.\r\n\r\nIt would be nice to be able to control this from `jax_utils.scan_in_dims`.\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities we could consider upstreaming to Jax.\n\"\"\"\n\nimport collections\nfrom collections.abc import Iterable # pylint: disable=g-importing-member\nimport itertools\nimport warnings\n\nimport jax\nfrom jax import lax\nfrom jax import linear_util as lu\nfrom jax.config import config\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import xla\nimport jax.lib.xla_bridge as xb\nimport jax.numpy as jnp\nimport numpy as np\n\n\ndef replicate(tree, devices=None):\n \"\"\"Replicates arrays to multiple devices.\n\n Args:\n tree: a pytree containing the arrays that should be replicated.\n devices: the devices the data is replicated to\n (default: `jax.local_devices()`).\n Returns:\n A new pytree containing the replicated arrays.\n \"\"\"\n if devices is None:\n # match the default device assignments used in pmap:\n # for single-host, that's the XLA default device assignment\n # for multi-host, it's the order of jax.local_devices()\n if jax.process_count() == 1:\n devices = [d for d in xb.get_backend().get_default_device_assignment(\n jax.device_count()) if d.process_index == jax.process_index()]\n else:\n devices = jax.local_devices()\n\n return jax.device_put_replicated(tree, devices)\n\n\ndef unreplicate(tree):\n \"\"\"Returns a single instance of a replicated array.\"\"\"\n return jax.tree_map(lambda x: x[0], tree)\n\n\ndef pmean(xs, axis_name):\n warnings.warn('use jax.lax.pmean instead',\n DeprecationWarning)\n return lax.pmean(xs, axis_name)\n\n\ndef partial_eval_by_shape(fn, input_spec, *args, **kwargs):\n \"\"\"Lazily evaluate a function by using the shapes of the inputs.\n\n This function is similar to `jax.eval_shape` with the key difference that\n function outputs that can be computed without a concrete value of the\n inputs are returned as is instead of only the shape. See for example\n `module.init_by_shape` where this functionality is used to initialize a\n model without using input data lr computation.\n\n Args:\n fn: the function to be lazily evaluated.\n input_spec: an iterable of shapes or (shape, dtype) tuples specifying the\n shape and type of the inputs. If unspecified the dtype is float32.\n *args: other arguments passed to the module's apply function\n **kwargs: keyword arguments passed to the module's apply function\n Returns:\n A pair consisting of the model output and an instance of Model\n \"\"\"\n # output cannot be returned in lazy_create because jax.eval_shape will only\n # return the shape and dtype.\n # TODO(mattjj,jheek): use a public JAX API\n f = lambda *inputs: fn(*inputs, *args, **kwargs)\n input_structs = [_parse_spec(spec) for spec in input_spec]\n inputs_flat, in_tree = jax.tree_flatten(input_structs)\n f_flat, out_tree = jax.api_util.flatten_fun_nokwargs(lu.wrap_init(f), in_tree)\n in_pvals = [pe.PartialVal.unknown(jax.ShapedArray(x.shape, x.dtype))\n for x in inputs_flat]\n _, out_pvals, _ = pe.trace_to_jaxpr(f_flat, in_pvals)\n out_flat = [const if pv is None else jax.ShapeDtypeStruct(pv.shape, pv.dtype)\n for pv, const in out_pvals]\n return jax.tree_unflatten(out_tree(), out_flat)\n\n\ndef _parse_spec(spec):\n \"\"\"Parse an input spec of the form (shape, dtype) or shape into a jax.ShapeDtypeStruct.\"\"\"\n spec = tuple(spec)\n if len(spec) == 2 and isinstance(spec[0], Iterable):\n return jax.ShapeDtypeStruct(tuple(spec[0]), spec[1])\n else:\n return jax.ShapeDtypeStruct(spec, jnp.float32)\n\n\ndef prefetch_to_device(iterator, size, devices=None):\n \"\"\"Shard and prefetch batches on device.\n\n This utility takes an iterator and returns a new iterator which fills an on\n device prefetch buffer. Eager prefetching can improve the performance of\n training loops significantly by overlapping compute and data transfer.\n\n This utility is mostly useful for GPUs, for TPUs and CPUs it should not be\n necessary -- the TPU & CPU memory allocators (normally) don't pick a memory\n location that isn't free yet so they don't block. Instead those allocators OOM.\n\n Args:\n iterator: an iterator that yields a pytree of ndarrays where the first\n dimension is sharded across devices.\n\n size: the size of the prefetch buffer.\n\n If you're training on GPUs, 2 is generally the best choice because this\n guarantees that you can overlap a training step on GPU with a data\n prefetch step on CPU.\n\n devices: the list of devices to which the arrays should be prefetched.\n\n Yields:\n The original items from the iterator where each ndarray is now a sharded to\n the specified devices.\n \"\"\"\n queue = collections.deque()\n devices = devices or jax.local_devices()\n\n def _prefetch(xs):\n if hasattr(jax, \"device_put_sharded\"): # jax>=0.2.0\n return jax.device_put_sharded(list(xs), devices)\n else:\n aval = jax.xla.abstractify(xs)\n assert xs.shape[0] == len(devices), (\n \"The first dimension of the iterator's ndarrays is not \"\n \"equal to the number of devices.\")\n buffers = [xla.device_put(x, devices[i])\n for i, x in enumerate(xs)]\n return jax.pxla.ShardedDeviceArray(aval, buffers)\n\n def enqueue(n): # Enqueues *up to* `n` elements from the iterator.\n for data in itertools.islice(iterator, n):\n queue.append(jax.tree_map(_prefetch, data))\n\n enqueue(size) # Fill up the buffer.\n while queue:\n yield queue.popleft()\n enqueue(1)\n\n\ndef _scan_nd(body_fn, init, xs, n=1):\n \"\"\"Utility for performing an n-dimensional `lax.scan`.\n\n The n-d scan is simply recursive call of 1-d scan.\n Args:\n body_fn: the body of the loop of type (c, x) -> (c, y).\n init: initial value for the carry.\n xs: a pytree of tensors to scan over.\n n: number of dimensions to scan over (default: 1)\n Returns:\n A tuple of the final carry and the values returned by the body.\n \"\"\"\n if n == 1:\n return lax.scan(body_fn, init, xs)\n else:\n def scan_body(c, x):\n return _scan_nd(body_fn, c, x, n=n-1)\n return lax.scan(scan_body, init, xs)\n\n\ndef _invert_perm(perm):\n perm_inv = [0] * len(perm)\n for i, j in enumerate(perm):\n perm_inv[j] = i\n return tuple(perm_inv)\n\n\ndef scan_in_dim(body_fn, init, xs, axis=(0,), keepdims=False):\n \"\"\"utility for doing a scan along arbitrary dimensions.\n\n see `lax.scan` for details on how the scan operation works.\n Args:\n body_fn: the body of the loop of type (c, x) -> (c, y).\n init: initial value for the carry.\n xs: a pytree of tensors to scan over.\n axis: the axis to scan over.\n keepdims: keep the dimensions that are scanned over.\n Returns:\n A tuple of the final carry and the values returned by the body.\n \"\"\"\n if not isinstance(axis, Iterable):\n axis = (axis,)\n\n def transpose_in(x):\n perm = axis + tuple(np.delete(np.arange(x.ndim), axis))\n return x.transpose(perm)\n def transpose_out(x):\n perm = axis + tuple(np.delete(np.arange(x.ndim), axis))\n return x.transpose(_invert_perm(perm))\n\n def body_wrapper(c, xs):\n if keepdims:\n xs = jax.tree_map(lambda x: x.reshape((1,) * len(axis) + x.shape), xs)\n xs = jax.tree_map(transpose_out, xs)\n c, ys = body_fn(c, xs)\n if keepdims:\n ys = jax.tree_map(transpose_in, ys)\n ys = jax.tree_map(lambda x: x.reshape(x.shape[len(axis):]), ys)\n return c, ys\n\n xs = jax.tree_map(transpose_in, xs)\n c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis))\n ys = jax.tree_map(transpose_out, ys)\n return c, ys\n", "path": "flax/jax_utils.py"}]} | 3,329 | 841 |
gh_patches_debug_18980 | rasdani/github-patches | git_diff | mne-tools__mne-bids-320 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[joss] list supported python versions in pypi
piggy-backing off of one of @TomDonoghue's comments:
> Is there a minimum version of Python3 required? I guess that there is some minimum required sub-version of Py3 required, but it's not clear what that would be, and it might be useful to note that.
you can add supported python versions for pypi, you can add these:
```
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
```
[in your setup.py](https://github.com/mne-tools/mne-bids/blob/20fbb881afaab75206db3d8c2d4b226c4c3bc212/setup.py#L50)
you can also set [`python_requires`](https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires) in `setup.py` to ensure people do not use the incorrect python version with your package.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 """Setup MNE-BIDS."""
3 import os
4 from setuptools import setup, find_packages
5
6 # get the version
7 version = None
8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
9 for line in (line.strip() for line in fid):
10 if line.startswith('__version__'):
11 version = line.split('=')[1].strip().strip('\'')
12 break
13 if version is None:
14 raise RuntimeError('Could not determine version')
15
16
17 descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '
18 'specification and facilitating their analysis with MNE-Python')
19
20 DISTNAME = 'mne-bids'
21 DESCRIPTION = descr
22 MAINTAINER = 'Mainak Jas'
23 MAINTAINER_EMAIL = '[email protected]'
24 URL = 'https://mne.tools/mne-bids/'
25 LICENSE = 'BSD (3-clause)'
26 DOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'
27 VERSION = version
28
29 if __name__ == "__main__":
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 maintainer_email=MAINTAINER_EMAIL,
33 description=DESCRIPTION,
34 license=LICENSE,
35 url=URL,
36 version=VERSION,
37 download_url=DOWNLOAD_URL,
38 long_description=open('README.rst').read(),
39 long_description_content_type='text/x-rst',
40 classifiers=[
41 'Intended Audience :: Science/Research',
42 'Intended Audience :: Developers',
43 'License :: OSI Approved',
44 'Programming Language :: Python',
45 'Topic :: Software Development',
46 'Topic :: Scientific/Engineering',
47 'Operating System :: Microsoft :: Windows',
48 'Operating System :: POSIX',
49 'Operating System :: Unix',
50 'Operating System :: MacOS',
51 ],
52 platforms='any',
53 packages=find_packages(),
54 entry_points={'console_scripts': [
55 'mne_bids = mne_bids.commands.run:main',
56 ]},
57 project_urls={
58 'Documentation': 'https://mne.tools/mne-bids',
59 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',
60 'Source': 'https://github.com/mne-tools/mne-bids',
61 },
62 )
63
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
download_url=DOWNLOAD_URL,
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
+ python_requires='~=3.5',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
@@ -48,6 +49,9 @@
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
],
platforms='any',
packages=find_packages(),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n+ python_requires='~=3.5',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n@@ -48,6 +49,9 @@\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n ],\n platforms='any',\n packages=find_packages(),\n", "issue": "[joss] list supported python versions in pypi\npiggy-backing off of one of @TomDonoghue's comments:\r\n> Is there a minimum version of Python3 required? I guess that there is some minimum required sub-version of Py3 required, but it's not clear what that would be, and it might be useful to note that.\r\n\r\nyou can add supported python versions for pypi, you can add these:\r\n```\r\nProgramming Language :: Python :: 3.5\r\nProgramming Language :: Python :: 3.6\r\nProgramming Language :: Python :: 3.7\r\n```\r\n[in your setup.py](https://github.com/mne-tools/mne-bids/blob/20fbb881afaab75206db3d8c2d4b226c4c3bc212/setup.py#L50)\r\nyou can also set [`python_requires`](https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires) in `setup.py` to ensure people do not use the incorrect python version with your package.\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n 'specification and facilitating their analysis with MNE-Python')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n entry_points={'console_scripts': [\n 'mne_bids = mne_bids.commands.run:main',\n ]},\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}]} | 1,366 | 176 |
gh_patches_debug_25171 | rasdani/github-patches | git_diff | fidals__shopelectro-778 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cart issues on prod
This is PO comments:
-----
заходишь на сайт, переходишь в товар. добавляешь его в корзину. уходишь на другой товар, добавляешь его и переходишь. в корзину. ..., а его там нет
</issue>
<code>
[start of shopelectro/urls.py]
1 from datetime import timedelta
2 from collections import OrderedDict
3
4 from django.conf import settings
5 from django.conf.urls import url, include
6 from django.conf.urls.static import static
7 from django.contrib.sitemaps.views import sitemap
8 from django.views.decorators.cache import cache_page
9
10 from pages.views import RobotsView, SitemapPage
11 from pages.urls import custom_page_url
12
13 from shopelectro import sitemaps, views
14 from shopelectro.admin import se_admin
15
16
17 def cached_time(*args, **kwargs) -> int:
18 """Return value of time for caching in seconds."""
19 return int(timedelta(*args, **kwargs).total_seconds())
20
21
22 # Orders sitemaps instances
23 sitemaps = OrderedDict([
24 ('index', sitemaps.IndexSitemap),
25 ('category', sitemaps.CategorySitemap),
26 ('category-with-tags', sitemaps.CategoryWithTagsSitemap),
27 ('products', sitemaps.ProductSitemap),
28 ('site', sitemaps.PagesSitemap)
29 ])
30
31 # disable cache
32 if settings.DEBUG:
33 def cache_page(arg): # Ignore PyFlakesBear
34 if callable(arg):
35 return arg
36 return cache_page
37
38 cached_60d = cache_page(cached_time(days=60))
39 cached_2h = cache_page(cached_time(hours=2))
40
41 admin_urls = [
42 url(r'^', se_admin.urls),
43 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),
44 url(r'^get-tree-items/$', views.Tree.as_view()),
45 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),
46 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),
47 url(r'^select2/', include('django_select2.urls')),
48 ]
49
50 catalog_urls = [
51 # "category" group
52 url(r'^categories/(?P<slug>[\w-]+)/$',
53 cached_2h(views.CategoryPage.as_view()), name='category'),
54 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w_-]+)/$',
55 cached_2h(views.CategoryPage.as_view()), name='category'),
56 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$',
57 views.CategoryPage.as_view(), name='category'),
58 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
59 views.CategoryPage.as_view(), name='category'),
60 # "load more" group
61 url(r'categories/(?P<slug>[\w-]+)/load-more/'
62 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',
63 views.load_more, name='load_more'),
64 url(r'categories/(?P<slug>[\w-]+)/load-more/'
65 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
66 views.load_more, name='load_more'),
67 # rest of urls
68 url(r'^no-images/$', views.ProductsWithoutImages.as_view(),
69 name='products_without_images'),
70 url(r'^no-text/$', views.ProductsWithoutText.as_view(),
71 name='products_without_text'),
72 url(r'^products/(?P<product_vendor_code>[0-9]+)/$',
73 views.ProductPage.as_view(), name='product'),
74 ]
75
76 service_urls = [
77 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),
78 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),
79 url(r'^ya-feedback/redirect/$',
80 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),
81 url(r'^ya-feedback/request/$',
82 views.ya_feedback_request, name='ya_feedback_request'),
83 ]
84
85 search_urls = [
86 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),
87 ]
88
89 ecommerce_urls = [
90 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),
91 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),
92 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),
93 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),
94 url(r'^order-call/$', views.order_call),
95 url(r'^one-click-buy/$', views.one_click_buy),
96 url(r'^yandex-order/$', views.YandexOrder.as_view()),
97 url(r'', include('ecommerce.urls')),
98 ]
99
100 custom_pages = [
101 # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work
102 custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),
103 custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()),
104 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
105 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),
106 custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),
107 custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),
108 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
109 ]
110
111 urlpatterns = [
112 url('', include(custom_pages)),
113 url(r'^admin/', include(admin_urls)),
114 url(r'^catalog/', include(catalog_urls)),
115 url(r'^pages/', include('pages.urls')),
116 url(r'^save-feedback/$', views.save_feedback),
117 url(r'^delete-feedback/$', views.delete_feedback),
118 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),
119 url(r'^shop/', include(ecommerce_urls)),
120 url(r'^search/', include(search_urls)),
121 url(r'^service/', include(service_urls)),
122 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),
123 ]
124
125 if settings.DEBUG:
126 import debug_toolbar
127
128 urlpatterns += [
129 url(r'^__debug__/', include(debug_toolbar.urls)),
130 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
131 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
132 ]
133
[end of shopelectro/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/urls.py b/shopelectro/urls.py
--- a/shopelectro/urls.py
+++ b/shopelectro/urls.py
@@ -5,7 +5,7 @@
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
-from django.views.decorators.cache import cache_page
+from django.views.decorators.cache import cache_page, never_cache
from pages.views import RobotsView, SitemapPage
from pages.urls import custom_page_url
@@ -103,9 +103,10 @@
custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()),
custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),
- custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),
- custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),
custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
+ # these pages should show only actual state
+ custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),
+ custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),
]
urlpatterns = [
| {"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -5,7 +5,7 @@\n from django.conf.urls import url, include\n from django.conf.urls.static import static\n from django.contrib.sitemaps.views import sitemap\n-from django.views.decorators.cache import cache_page\n+from django.views.decorators.cache import cache_page, never_cache\n \n from pages.views import RobotsView, SitemapPage\n from pages.urls import custom_page_url\n@@ -103,9 +103,10 @@\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n- custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n- custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n+ # these pages should show only actual state\n+ custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n+ custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n ]\n \n urlpatterns = [\n", "issue": "Cart issues on prod\nThis is PO comments:\r\n\r\n-----\r\n\r\n\u0437\u0430\u0445\u043e\u0434\u0438\u0448\u044c \u043d\u0430 \u0441\u0430\u0439\u0442, \u043f\u0435\u0440\u0435\u0445\u043e\u0434\u0438\u0448\u044c \u0432 \u0442\u043e\u0432\u0430\u0440. \u0434\u043e\u0431\u0430\u0432\u043b\u044f\u0435\u0448\u044c \u0435\u0433\u043e \u0432 \u043a\u043e\u0440\u0437\u0438\u043d\u0443. \u0443\u0445\u043e\u0434\u0438\u0448\u044c \u043d\u0430 \u0434\u0440\u0443\u0433\u043e\u0439 \u0442\u043e\u0432\u0430\u0440, \u0434\u043e\u0431\u0430\u0432\u043b\u044f\u0435\u0448\u044c \u0435\u0433\u043e \u0438 \u043f\u0435\u0440\u0435\u0445\u043e\u0434\u0438\u0448\u044c. \u0432 \u043a\u043e\u0440\u0437\u0438\u043d\u0443. ..., \u0430 \u0435\u0433\u043e \u0442\u0430\u043c \u043d\u0435\u0442\r\n\n", "before_files": [{"content": "from datetime import timedelta\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}]} | 2,284 | 335 |
gh_patches_debug_31138 | rasdani/github-patches | git_diff | learningequality__kolibri-11049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Catch and handle EmptyResultSet error when trying to cache metadata labels
EmptyResultSet
Sentry Issue: [KOLIBRI-BACKEND-2E9](https://learningequality.sentry.io/issues/4005137733/?referrer=github_integration)
```
EmptyResultSet:
(16 additional frame(s) were not displayed)
...
File "django/db/models/sql/query.py", line 233, in __str__
sql, params = self.sql_with_params()
File "django/db/models/sql/query.py", line 241, in sql_with_params
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
File "django/db/models/sql/compiler.py", line 441, in as_sql
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
File "django/db/models/sql/compiler.py", line 373, in compile
sql, params = node.as_sql(self, self.connection)
File "django/db/models/sql/where.py", line 97, in as_sql
raise EmptyResultSet
```
</issue>
<code>
[start of kolibri/core/content/utils/search.py]
1 """
2 Avoiding direct model imports in here so that we can import these functions into places
3 that should not initiate the Django app registry.
4 """
5 import hashlib
6
7 try:
8 from django.contrib.postgres.aggregates import BitOr
9 except ImportError:
10 BitOr = None
11
12 from django.db import connections
13 from django.db.models import Aggregate
14 from django.db.models import Case
15 from django.db.models import Value
16 from django.db.models import When
17 from django.db.models.fields import IntegerField
18 from le_utils.constants.labels.accessibility_categories import (
19 ACCESSIBILITYCATEGORIESLIST,
20 )
21 from le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST
22 from le_utils.constants.labels.levels import LEVELSLIST
23 from le_utils.constants.labels.needs import NEEDSLIST
24 from le_utils.constants.labels.subjects import SUBJECTSLIST
25
26 from kolibri.core.utils.cache import process_cache as cache
27
28
29 metadata_lookup = {
30 "learning_activities": LEARNINGACTIVITIESLIST,
31 "categories": SUBJECTSLIST,
32 "grade_levels": LEVELSLIST,
33 "accessibility_labels": ACCESSIBILITYCATEGORIESLIST,
34 "learner_needs": NEEDSLIST,
35 }
36
37
38 metadata_bitmasks = {}
39
40 bitmask_fieldnames = {}
41
42
43 for key, labels in metadata_lookup.items():
44 bitmask_lookup = {}
45 i = 0
46 while labels[i : i + 64]:
47 bitmask_field_name = "{}_bitmask_{}".format(key, i)
48 bitmask_fieldnames[bitmask_field_name] = []
49 for j, label in enumerate(labels):
50 info = {
51 "bitmask_field_name": bitmask_field_name,
52 "field_name": key,
53 "bits": 2 ** j,
54 "label": label,
55 }
56 bitmask_lookup[label] = info
57 bitmask_fieldnames[bitmask_field_name].append(info)
58 i += 64
59 metadata_bitmasks[key] = bitmask_lookup
60
61
62 def _get_available_languages(base_queryset):
63 from kolibri.core.content.models import Language
64
65 langs = Language.objects.filter(
66 id__in=base_queryset.exclude(lang=None)
67 .values_list("lang_id", flat=True)
68 .distinct()
69 ).values("id", "lang_name")
70 return list(langs)
71
72
73 def _get_available_channels(base_queryset):
74 from kolibri.core.content.models import ChannelMetadata
75
76 return list(
77 ChannelMetadata.objects.filter(
78 id__in=base_queryset.values_list("channel_id", flat=True).distinct()
79 ).values("id", "name")
80 )
81
82
83 class SQLiteBitwiseORAggregate(Aggregate):
84 name = "BitwiseOR"
85
86 def __init__(self, expression, num_bits=None, **extra):
87 if not num_bits:
88 raise ValueError("num_bits must be a positive integer")
89 self.num_bits = num_bits
90 super(SQLiteBitwiseORAggregate, self).__init__(
91 expression, output_field=IntegerField(), **extra
92 )
93
94 @property
95 def template(self):
96 return " + ".join(
97 "max(%(expressions)s&{})".format(2 ** i) for i in range(0, self.num_bits)
98 )
99
100
101 def get_available_metadata_labels(base_queryset):
102 from kolibri.core.device.models import ContentCacheKey
103
104 content_cache_key = ContentCacheKey.get_cache_key()
105 cache_key = "search-labels:{}:{}".format(
106 content_cache_key,
107 hashlib.md5(str(base_queryset.query).encode("utf8")).hexdigest(),
108 )
109 if cache_key not in cache:
110 base_queryset = base_queryset.order_by()
111 aggregates = {}
112 for field in bitmask_fieldnames:
113 field_agg = field + "_agg"
114 if connections[base_queryset.db].vendor == "sqlite" or BitOr is None:
115 aggregates[field_agg] = SQLiteBitwiseORAggregate(
116 field, num_bits=len(bitmask_fieldnames[field])
117 )
118 elif connections[base_queryset.db].vendor == "postgresql":
119 aggregates[field_agg] = BitOr(field)
120 output = {}
121 agg = base_queryset.aggregate(**aggregates)
122 for field, values in bitmask_fieldnames.items():
123 bit_value = agg[field + "_agg"]
124 for value in values:
125 if value["field_name"] not in output:
126 output[value["field_name"]] = []
127 if bit_value is not None and bit_value & value["bits"]:
128 output[value["field_name"]].append(value["label"])
129 output["languages"] = _get_available_languages(base_queryset)
130 output["channels"] = _get_available_channels(base_queryset)
131 cache.set(cache_key, output, timeout=None)
132 return cache.get(cache_key)
133
134
135 def get_all_contentnode_label_metadata():
136 from kolibri.core.content.models import ContentNode
137
138 return get_available_metadata_labels(ContentNode.objects.filter(available=True))
139
140
141 def annotate_label_bitmasks(queryset):
142 update_statements = {}
143 for bitmask_fieldname, label_info in bitmask_fieldnames.items():
144 update_statements[bitmask_fieldname] = sum(
145 Case(
146 When(
147 **{
148 info["field_name"] + "__contains": info["label"],
149 "then": Value(info["bits"]),
150 }
151 ),
152 default=Value(0),
153 )
154 for info in label_info
155 )
156 queryset.update(**update_statements)
157
[end of kolibri/core/content/utils/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/content/utils/search.py b/kolibri/core/content/utils/search.py
--- a/kolibri/core/content/utils/search.py
+++ b/kolibri/core/content/utils/search.py
@@ -9,6 +9,7 @@
except ImportError:
BitOr = None
+from django.core.exceptions import EmptyResultSet
from django.db import connections
from django.db.models import Aggregate
from django.db.models import Case
@@ -39,6 +40,11 @@
bitmask_fieldnames = {}
+empty_labels = {
+ "languages": [],
+ "channels": [],
+}
+
for key, labels in metadata_lookup.items():
bitmask_lookup = {}
@@ -57,6 +63,7 @@
bitmask_fieldnames[bitmask_field_name].append(info)
i += 64
metadata_bitmasks[key] = bitmask_lookup
+ empty_labels[key] = []
def _get_available_languages(base_queryset):
@@ -98,14 +105,17 @@
)
-def get_available_metadata_labels(base_queryset):
+def get_available_metadata_labels(base_queryset): # noqa: C901
from kolibri.core.device.models import ContentCacheKey
content_cache_key = ContentCacheKey.get_cache_key()
- cache_key = "search-labels:{}:{}".format(
- content_cache_key,
- hashlib.md5(str(base_queryset.query).encode("utf8")).hexdigest(),
- )
+ try:
+ cache_key = "search-labels:{}:{}".format(
+ content_cache_key,
+ hashlib.md5(str(base_queryset.query).encode("utf8")).hexdigest(),
+ )
+ except EmptyResultSet:
+ return empty_labels
if cache_key not in cache:
base_queryset = base_queryset.order_by()
aggregates = {}
| {"golden_diff": "diff --git a/kolibri/core/content/utils/search.py b/kolibri/core/content/utils/search.py\n--- a/kolibri/core/content/utils/search.py\n+++ b/kolibri/core/content/utils/search.py\n@@ -9,6 +9,7 @@\n except ImportError:\n BitOr = None\n \n+from django.core.exceptions import EmptyResultSet\n from django.db import connections\n from django.db.models import Aggregate\n from django.db.models import Case\n@@ -39,6 +40,11 @@\n \n bitmask_fieldnames = {}\n \n+empty_labels = {\n+ \"languages\": [],\n+ \"channels\": [],\n+}\n+\n \n for key, labels in metadata_lookup.items():\n bitmask_lookup = {}\n@@ -57,6 +63,7 @@\n bitmask_fieldnames[bitmask_field_name].append(info)\n i += 64\n metadata_bitmasks[key] = bitmask_lookup\n+ empty_labels[key] = []\n \n \n def _get_available_languages(base_queryset):\n@@ -98,14 +105,17 @@\n )\n \n \n-def get_available_metadata_labels(base_queryset):\n+def get_available_metadata_labels(base_queryset): # noqa: C901\n from kolibri.core.device.models import ContentCacheKey\n \n content_cache_key = ContentCacheKey.get_cache_key()\n- cache_key = \"search-labels:{}:{}\".format(\n- content_cache_key,\n- hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n- )\n+ try:\n+ cache_key = \"search-labels:{}:{}\".format(\n+ content_cache_key,\n+ hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n+ )\n+ except EmptyResultSet:\n+ return empty_labels\n if cache_key not in cache:\n base_queryset = base_queryset.order_by()\n aggregates = {}\n", "issue": "Catch and handle EmptyResultSet error when trying to cache metadata labels\nEmptyResultSet\n\nSentry Issue: [KOLIBRI-BACKEND-2E9](https://learningequality.sentry.io/issues/4005137733/?referrer=github_integration)\n\n```\nEmptyResultSet: \n(16 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 233, in __str__\n sql, params = self.sql_with_params()\n File \"django/db/models/sql/query.py\", line 241, in sql_with_params\n return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()\n File \"django/db/models/sql/compiler.py\", line 441, in as_sql\n where, w_params = self.compile(self.where) if self.where is not None else (\"\", [])\n File \"django/db/models/sql/compiler.py\", line 373, in compile\n sql, params = node.as_sql(self, self.connection)\n File \"django/db/models/sql/where.py\", line 97, in as_sql\n raise EmptyResultSet\n```\n", "before_files": [{"content": "\"\"\"\nAvoiding direct model imports in here so that we can import these functions into places\nthat should not initiate the Django app registry.\n\"\"\"\nimport hashlib\n\ntry:\n from django.contrib.postgres.aggregates import BitOr\nexcept ImportError:\n BitOr = None\n\nfrom django.db import connections\nfrom django.db.models import Aggregate\nfrom django.db.models import Case\nfrom django.db.models import Value\nfrom django.db.models import When\nfrom django.db.models.fields import IntegerField\nfrom le_utils.constants.labels.accessibility_categories import (\n ACCESSIBILITYCATEGORIESLIST,\n)\nfrom le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST\nfrom le_utils.constants.labels.levels import LEVELSLIST\nfrom le_utils.constants.labels.needs import NEEDSLIST\nfrom le_utils.constants.labels.subjects import SUBJECTSLIST\n\nfrom kolibri.core.utils.cache import process_cache as cache\n\n\nmetadata_lookup = {\n \"learning_activities\": LEARNINGACTIVITIESLIST,\n \"categories\": SUBJECTSLIST,\n \"grade_levels\": LEVELSLIST,\n \"accessibility_labels\": ACCESSIBILITYCATEGORIESLIST,\n \"learner_needs\": NEEDSLIST,\n}\n\n\nmetadata_bitmasks = {}\n\nbitmask_fieldnames = {}\n\n\nfor key, labels in metadata_lookup.items():\n bitmask_lookup = {}\n i = 0\n while labels[i : i + 64]:\n bitmask_field_name = \"{}_bitmask_{}\".format(key, i)\n bitmask_fieldnames[bitmask_field_name] = []\n for j, label in enumerate(labels):\n info = {\n \"bitmask_field_name\": bitmask_field_name,\n \"field_name\": key,\n \"bits\": 2 ** j,\n \"label\": label,\n }\n bitmask_lookup[label] = info\n bitmask_fieldnames[bitmask_field_name].append(info)\n i += 64\n metadata_bitmasks[key] = bitmask_lookup\n\n\ndef _get_available_languages(base_queryset):\n from kolibri.core.content.models import Language\n\n langs = Language.objects.filter(\n id__in=base_queryset.exclude(lang=None)\n .values_list(\"lang_id\", flat=True)\n .distinct()\n ).values(\"id\", \"lang_name\")\n return list(langs)\n\n\ndef _get_available_channels(base_queryset):\n from kolibri.core.content.models import ChannelMetadata\n\n return list(\n ChannelMetadata.objects.filter(\n id__in=base_queryset.values_list(\"channel_id\", flat=True).distinct()\n ).values(\"id\", \"name\")\n )\n\n\nclass SQLiteBitwiseORAggregate(Aggregate):\n name = \"BitwiseOR\"\n\n def __init__(self, expression, num_bits=None, **extra):\n if not num_bits:\n raise ValueError(\"num_bits must be a positive integer\")\n self.num_bits = num_bits\n super(SQLiteBitwiseORAggregate, self).__init__(\n expression, output_field=IntegerField(), **extra\n )\n\n @property\n def template(self):\n return \" + \".join(\n \"max(%(expressions)s&{})\".format(2 ** i) for i in range(0, self.num_bits)\n )\n\n\ndef get_available_metadata_labels(base_queryset):\n from kolibri.core.device.models import ContentCacheKey\n\n content_cache_key = ContentCacheKey.get_cache_key()\n cache_key = \"search-labels:{}:{}\".format(\n content_cache_key,\n hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n )\n if cache_key not in cache:\n base_queryset = base_queryset.order_by()\n aggregates = {}\n for field in bitmask_fieldnames:\n field_agg = field + \"_agg\"\n if connections[base_queryset.db].vendor == \"sqlite\" or BitOr is None:\n aggregates[field_agg] = SQLiteBitwiseORAggregate(\n field, num_bits=len(bitmask_fieldnames[field])\n )\n elif connections[base_queryset.db].vendor == \"postgresql\":\n aggregates[field_agg] = BitOr(field)\n output = {}\n agg = base_queryset.aggregate(**aggregates)\n for field, values in bitmask_fieldnames.items():\n bit_value = agg[field + \"_agg\"]\n for value in values:\n if value[\"field_name\"] not in output:\n output[value[\"field_name\"]] = []\n if bit_value is not None and bit_value & value[\"bits\"]:\n output[value[\"field_name\"]].append(value[\"label\"])\n output[\"languages\"] = _get_available_languages(base_queryset)\n output[\"channels\"] = _get_available_channels(base_queryset)\n cache.set(cache_key, output, timeout=None)\n return cache.get(cache_key)\n\n\ndef get_all_contentnode_label_metadata():\n from kolibri.core.content.models import ContentNode\n\n return get_available_metadata_labels(ContentNode.objects.filter(available=True))\n\n\ndef annotate_label_bitmasks(queryset):\n update_statements = {}\n for bitmask_fieldname, label_info in bitmask_fieldnames.items():\n update_statements[bitmask_fieldname] = sum(\n Case(\n When(\n **{\n info[\"field_name\"] + \"__contains\": info[\"label\"],\n \"then\": Value(info[\"bits\"]),\n }\n ),\n default=Value(0),\n )\n for info in label_info\n )\n queryset.update(**update_statements)\n", "path": "kolibri/core/content/utils/search.py"}]} | 2,261 | 392 |
gh_patches_debug_18943 | rasdani/github-patches | git_diff | ansible__ansible-lint-996 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E208 is too aggressive
##### Summary
When E208 is triggered, it should be sure that a file is being created. There are modules which only optionally create the file and, when not used in that form, then they should not require a mode be set. Such an example are ini_file, lineinfile, or blockinfile. These modules are frequently used in their default mode with `create: false`. Someone should not be required to set `mode: "0644"` or such in this situation.
##### Issue Type
- Bug Report
##### Ansible and Ansible Lint details
<!--- Paste verbatim output between tripple backticks -->
```console (paste below)
ansible --version
ansible 2.9.12
config file = /home/greg/.ansible.cfg
configured module search path = ['/home/greg/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/greg/src/ansible_collections/devroles/system/.tox/lint_all/lib/python3.8/site-packages/ansible
executable location = .tox/lint_all/bin/ansible
python version = 3.8.5 (default, Aug 12 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
ansible-lint --version
ansible-lint 4.3.1
```
- ansible installation method: one of source, pip, OS package
pip
- ansible-lint installation method: one of source, pip, OS package
pip
##### OS / ENVIRONMENT
Fedora 32
##### STEPS TO REPRODUCE
```yaml
- name: should not produce E208
lineinfile:
line: "# some content here"
file: "{{ ansible_user_dir }}/.bashrc"
- name: SHOULD produce E208
lineinfile:
line: "# more content
file: "{{ ansible_user_dir }}/.bash_profile"
create: true
```
##### Desired Behaviour
ansible-lint should not error when it encounters a missing "mode" parameter for these modules, when `create: false`. Note that `create: false` is the default behavior of those modules
##### Actual Behaviour
ansible-lint throws E208 on both of the above steps
</issue>
<code>
[start of lib/ansiblelint/rules/MissingFilePermissionsRule.py]
1 # Copyright (c) 2020 Sorin Sbarnea <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20 from ansiblelint.rules import AnsibleLintRule
21
22
23 class MissingFilePermissionsRule(AnsibleLintRule):
24 id = "208"
25 shortdesc = 'File permissions not mentioned'
26 description = (
27 "Missing mode parameter can cause unexpected file permissions based "
28 "on version of Ansible being used. Be explicit, or if you still "
29 "want the default behavior you can use ``mode: preserve`` to avoid "
30 "hitting this rule. See "
31 "https://github.com/ansible/ansible/issues/71200"
32 )
33 severity = 'VERY_HIGH'
34 tags = ['unpredictability']
35 version_added = 'v4.3.0'
36
37 _modules = (
38 'copy',
39 'file',
40 'ini_file',
41 'lineinfile',
42 'replace',
43 'template',
44 'unarchive',
45 )
46
47 def matchtask(self, file, task):
48 if task["action"]["__ansible_module__"] not in self._modules:
49 return False
50
51 if task['action'].get('state', None) == "absent":
52 return False
53
54 if task['action'].get('state', None) == "link":
55 return False
56
57 mode = task['action'].get('mode', None)
58 return mode is None
59
[end of lib/ansiblelint/rules/MissingFilePermissionsRule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansiblelint/rules/MissingFilePermissionsRule.py b/lib/ansiblelint/rules/MissingFilePermissionsRule.py
--- a/lib/ansiblelint/rules/MissingFilePermissionsRule.py
+++ b/lib/ansiblelint/rules/MissingFilePermissionsRule.py
@@ -35,17 +35,28 @@
version_added = 'v4.3.0'
_modules = (
+ 'assemble',
+ 'archive',
'copy',
'file',
- 'ini_file',
- 'lineinfile',
'replace',
'template',
'unarchive',
)
+ _modules_with_create = (
+ 'blockinfile',
+ 'ini_file',
+ 'lineinfile'
+ )
+
def matchtask(self, file, task):
- if task["action"]["__ansible_module__"] not in self._modules:
+ if task["action"]["__ansible_module__"] not in self._modules and \
+ task["action"]["__ansible_module__"] not in self._modules_with_create:
+ return False
+
+ if task["action"]["__ansible_module__"] in self._modules_with_create and \
+ not task["action"].get("create", False):
return False
if task['action'].get('state', None) == "absent":
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/MissingFilePermissionsRule.py b/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n--- a/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n+++ b/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n@@ -35,17 +35,28 @@\n version_added = 'v4.3.0'\n \n _modules = (\n+ 'assemble',\n+ 'archive',\n 'copy',\n 'file',\n- 'ini_file',\n- 'lineinfile',\n 'replace',\n 'template',\n 'unarchive',\n )\n \n+ _modules_with_create = (\n+ 'blockinfile',\n+ 'ini_file',\n+ 'lineinfile'\n+ )\n+\n def matchtask(self, file, task):\n- if task[\"action\"][\"__ansible_module__\"] not in self._modules:\n+ if task[\"action\"][\"__ansible_module__\"] not in self._modules and \\\n+ task[\"action\"][\"__ansible_module__\"] not in self._modules_with_create:\n+ return False\n+\n+ if task[\"action\"][\"__ansible_module__\"] in self._modules_with_create and \\\n+ not task[\"action\"].get(\"create\", False):\n return False\n \n if task['action'].get('state', None) == \"absent\":\n", "issue": "E208 is too aggressive\n##### Summary\r\nWhen E208 is triggered, it should be sure that a file is being created. There are modules which only optionally create the file and, when not used in that form, then they should not require a mode be set. Such an example are ini_file, lineinfile, or blockinfile. These modules are frequently used in their default mode with `create: false`. Someone should not be required to set `mode: \"0644\"` or such in this situation.\r\n\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between tripple backticks -->\r\n```console (paste below)\r\nansible --version\r\nansible 2.9.12\r\n config file = /home/greg/.ansible.cfg\r\n configured module search path = ['/home/greg/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/greg/src/ansible_collections/devroles/system/.tox/lint_all/lib/python3.8/site-packages/ansible\r\n executable location = .tox/lint_all/bin/ansible\r\n python version = 3.8.5 (default, Aug 12 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]\r\n\r\nansible-lint --version\r\nansible-lint 4.3.1\r\n```\r\n\r\n- ansible installation method: one of source, pip, OS package\r\npip\r\n- ansible-lint installation method: one of source, pip, OS package\r\npip\r\n\r\n##### OS / ENVIRONMENT\r\nFedora 32\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n```yaml\r\n- name: should not produce E208\r\n lineinfile:\r\n line: \"# some content here\"\r\n file: \"{{ ansible_user_dir }}/.bashrc\"\r\n\r\n- name: SHOULD produce E208\r\n lineinfile:\r\n line: \"# more content\r\n file: \"{{ ansible_user_dir }}/.bash_profile\"\r\n create: true\r\n```\r\n##### Desired Behaviour\r\nansible-lint should not error when it encounters a missing \"mode\" parameter for these modules, when `create: false`. Note that `create: false` is the default behavior of those modules\r\n\r\n##### Actual Behaviour\r\nansible-lint throws E208 on both of the above steps\n", "before_files": [{"content": "# Copyright (c) 2020 Sorin Sbarnea <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom ansiblelint.rules import AnsibleLintRule\n\n\nclass MissingFilePermissionsRule(AnsibleLintRule):\n id = \"208\"\n shortdesc = 'File permissions not mentioned'\n description = (\n \"Missing mode parameter can cause unexpected file permissions based \"\n \"on version of Ansible being used. Be explicit, or if you still \"\n \"want the default behavior you can use ``mode: preserve`` to avoid \"\n \"hitting this rule. See \"\n \"https://github.com/ansible/ansible/issues/71200\"\n )\n severity = 'VERY_HIGH'\n tags = ['unpredictability']\n version_added = 'v4.3.0'\n\n _modules = (\n 'copy',\n 'file',\n 'ini_file',\n 'lineinfile',\n 'replace',\n 'template',\n 'unarchive',\n )\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] not in self._modules:\n return False\n\n if task['action'].get('state', None) == \"absent\":\n return False\n\n if task['action'].get('state', None) == \"link\":\n return False\n\n mode = task['action'].get('mode', None)\n return mode is None\n", "path": "lib/ansiblelint/rules/MissingFilePermissionsRule.py"}]} | 1,704 | 290 |
gh_patches_debug_22774 | rasdani/github-patches | git_diff | vispy__vispy-1596 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
XYZAxisVisuals Override Defaults
It looks like XYZAxisVisual is not overridable in the **init** function for the verts and color arguments? Passing in `pos=my_custom_verts` results in `TypeError: __init__() got multiple values for keyword argument 'pos'`.
The `**kwds` argument looks like it is being passed through to the Line class, via LineVisual. Does a method exist to specify the verts, color, and / or connect kwargs? I am hesitant to submit a PR modifying **kwds since I am not 100% sure how the passing is working.
</issue>
<code>
[start of vispy/visuals/xyz_axis.py]
1
2 import numpy as np
3
4 from .line import LineVisual
5
6
7 class XYZAxisVisual(LineVisual):
8 """
9 Simple 3D axis for indicating coordinate system orientation. Axes are
10 x=red, y=green, z=blue.
11 """
12 def __init__(self, **kwargs):
13 verts = np.array([[0, 0, 0],
14 [1, 0, 0],
15 [0, 0, 0],
16 [0, 1, 0],
17 [0, 0, 0],
18 [0, 0, 1]])
19 color = np.array([[1, 0, 0, 1],
20 [1, 0, 0, 1],
21 [0, 1, 0, 1],
22 [0, 1, 0, 1],
23 [0, 0, 1, 1],
24 [0, 0, 1, 1]])
25 LineVisual.__init__(self, pos=verts, color=color, connect='segments',
26 method='gl', **kwargs)
27
[end of vispy/visuals/xyz_axis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vispy/visuals/xyz_axis.py b/vispy/visuals/xyz_axis.py
--- a/vispy/visuals/xyz_axis.py
+++ b/vispy/visuals/xyz_axis.py
@@ -10,17 +10,24 @@
x=red, y=green, z=blue.
"""
def __init__(self, **kwargs):
- verts = np.array([[0, 0, 0],
- [1, 0, 0],
- [0, 0, 0],
- [0, 1, 0],
- [0, 0, 0],
- [0, 0, 1]])
+ pos = np.array([[0, 0, 0],
+ [1, 0, 0],
+ [0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 0, 1]])
color = np.array([[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 0, 1, 1]])
- LineVisual.__init__(self, pos=verts, color=color, connect='segments',
- method='gl', **kwargs)
+ connect = 'segments'
+ method = 'gl'
+
+ kwargs.setdefault('pos', pos)
+ kwargs.setdefault('color', color)
+ kwargs.setdefault('connect', connect)
+ kwargs.setdefault('method', method)
+
+ LineVisual.__init__(self, **kwargs)
| {"golden_diff": "diff --git a/vispy/visuals/xyz_axis.py b/vispy/visuals/xyz_axis.py\n--- a/vispy/visuals/xyz_axis.py\n+++ b/vispy/visuals/xyz_axis.py\n@@ -10,17 +10,24 @@\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n- verts = np.array([[0, 0, 0],\n- [1, 0, 0],\n- [0, 0, 0],\n- [0, 1, 0],\n- [0, 0, 0],\n- [0, 0, 1]])\n+ pos = np.array([[0, 0, 0],\n+ [1, 0, 0],\n+ [0, 0, 0],\n+ [0, 1, 0],\n+ [0, 0, 0],\n+ [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n- LineVisual.__init__(self, pos=verts, color=color, connect='segments',\n- method='gl', **kwargs)\n+ connect = 'segments'\n+ method = 'gl'\n+\n+ kwargs.setdefault('pos', pos)\n+ kwargs.setdefault('color', color)\n+ kwargs.setdefault('connect', connect)\n+ kwargs.setdefault('method', method)\n+\n+ LineVisual.__init__(self, **kwargs)\n", "issue": "XYZAxisVisuals Override Defaults\nIt looks like XYZAxisVisual is not overridable in the **init** function for the verts and color arguments? Passing in `pos=my_custom_verts` results in `TypeError: __init__() got multiple values for keyword argument 'pos'`.\n\nThe `**kwds` argument looks like it is being passed through to the Line class, via LineVisual. Does a method exist to specify the verts, color, and / or connect kwargs? I am hesitant to submit a PR modifying **kwds since I am not 100% sure how the passing is working.\n\n", "before_files": [{"content": "\nimport numpy as np\n\nfrom .line import LineVisual\n\n\nclass XYZAxisVisual(LineVisual):\n \"\"\"\n Simple 3D axis for indicating coordinate system orientation. Axes are\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n verts = np.array([[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n LineVisual.__init__(self, pos=verts, color=color, connect='segments',\n method='gl', **kwargs)\n", "path": "vispy/visuals/xyz_axis.py"}]} | 953 | 406 |
gh_patches_debug_21327 | rasdani/github-patches | git_diff | pymedusa__Medusa-3097 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cookie required when configuring custom provider
Branch: master
Commit: 3f23fd272652b10cf781e3058fee50daf79f11b6Version: v0.1.15
Database: 44.8
Python Version: | 2.7.13 (default, Dec 18 2016, 22:44:40) [GCC 4.9.3 20150311 (prerelease)]
SSL Version: | OpenSSL 1.0.2j 26 Sep 2016
OS: | Linux-3.10.102-armv7l-with-glibc2.4
Locale: | en_US.UTF-8
Trying to add custom provider, EZTV. I don't use a login, just linking to the RSS feed. When I try to add the site, it's requiring a cookie, something I don't recall having to do w/SR. When I put all of the details in except the cookie and hit add, I get an error box stating "No Cookies added from ui for provider: EZTV". Shouldn't this be optional or has something changed requiring cookies to be set?
</issue>
<code>
[start of medusa/providers/torrent/rss/rsstorrent.py]
1 # coding=utf-8
2
3 """Provider code for RSS Torrents."""
4
5 from __future__ import unicode_literals
6
7 import io
8 import logging
9 import os
10 import re
11
12 from bencode import bdecode
13
14 from medusa import (
15 app,
16 helpers,
17 tv,
18 )
19 from medusa.helper.exceptions import ex
20 from medusa.providers.torrent.torrent_provider import TorrentProvider
21
22 log = logging.getLogger(__name__)
23 log.logger.addHandler(logging.NullHandler())
24
25
26 class TorrentRssProvider(TorrentProvider):
27 """Torrent RSS provider."""
28
29 def __init__(self, name, url, cookies='',
30 title_tag='title', search_mode='eponly', search_fallback=False,
31 enable_daily=False, enable_backlog=False, enable_manualsearch=False):
32 """Initialize the class."""
33 super(TorrentRssProvider, self).__init__(name)
34
35 # Credentials
36
37 # URLs
38 self.url = url.rstrip('/')
39
40 # Proper Strings
41
42 # Miscellaneous Options
43 self.supports_backlog = False
44 self.search_mode = search_mode
45 self.search_fallback = search_fallback
46 self.enable_daily = enable_daily
47 self.enable_manualsearch = enable_manualsearch
48 self.enable_backlog = enable_backlog
49 self.enable_cookies = True
50 self.cookies = cookies
51 self.title_tag = title_tag
52
53 # Torrent Stats
54
55 # Cache
56 self.cache = TorrentRssCache(self, min_time=15)
57
58 def _get_title_and_url(self, item):
59 """Get title and url from result."""
60 title = item.get(self.title_tag, '').replace(' ', '.')
61
62 attempt_list = [
63 lambda: item.get('torrent_magneturi'),
64 lambda: item.enclosures[0].href,
65 lambda: item.get('link')
66 ]
67
68 url = None
69 for cur_attempt in attempt_list:
70 try:
71 url = cur_attempt()
72 except Exception:
73 continue
74
75 if title and url:
76 break
77
78 return title, url
79
80 def config_string(self):
81 """Return default RSS torrent provider config setting."""
82 return '{}|{}|{}|{}|{}|{}|{}|{}|{}|{}'.format(
83 self.name or '',
84 self.url or '',
85 self.cookies or '',
86 self.title_tag or '',
87 int(self.enabled),
88 self.search_mode or '',
89 int(self.search_fallback),
90 int(self.enable_daily),
91 int(self.enable_manualsearch),
92 int(self.enable_backlog)
93 )
94
95 @staticmethod
96 def get_providers_list(data):
97 """Get RSS torrent provider list."""
98 providers_list = [x for x in (TorrentRssProvider._make_provider(x) for x in data.split('!!!')) if x]
99 seen_values = set()
100 providers_set = []
101
102 for provider in providers_list:
103 value = provider.name
104
105 if value not in seen_values:
106 providers_set.append(provider)
107 seen_values.add(value)
108
109 return [x for x in providers_set if x]
110
111 def image_name(self):
112 """Return RSS torrent image."""
113 if os.path.isfile(os.path.join(app.PROG_DIR, 'static/images/providers/', self.get_id() + '.png')):
114 return self.get_id() + '.png'
115 return 'torrentrss.png'
116
117 @staticmethod
118 def _make_provider(config):
119 """Create new RSS provider."""
120 if not config:
121 return None
122
123 cookies = ''
124 enable_backlog = 0
125 enable_daily = 0
126 enable_manualsearch = 0
127 search_fallback = 0
128 search_mode = 'eponly'
129 title_tag = 'title'
130
131 try:
132 values = config.split('|')
133
134 if len(values) == 9:
135 name, url, cookies, title_tag, enabled, search_mode, search_fallback, enable_daily, enable_backlog = values
136 elif len(values) == 10:
137 name, url, cookies, title_tag, enabled, search_mode, search_fallback, enable_daily, enable_backlog, enable_manualsearch = values
138 elif len(values) == 8:
139 name, url, cookies, enabled, search_mode, search_fallback, enable_daily, enable_backlog = values
140 else:
141 enabled = values[4]
142 name = values[0]
143 url = values[1]
144 except ValueError:
145 log.error('Skipping RSS Torrent provider string: {0}, incorrect format', config)
146 return None
147
148 new_provider = TorrentRssProvider(
149 name, url, cookies=cookies, title_tag=title_tag, search_mode=search_mode, search_fallback=search_fallback,
150 enable_daily=enable_daily, enable_backlog=enable_backlog, enable_manualsearch=enable_manualsearch
151 )
152 new_provider.enabled = enabled == '1'
153
154 return new_provider
155
156 def validate_rss(self):
157 """Validate if RSS."""
158 try:
159 add_cookie = self.add_cookies_from_ui()
160 if not add_cookie.get('result'):
161 return add_cookie
162
163 data = self.cache._get_rss_data()['entries']
164 if not data:
165 return {'result': False,
166 'message': 'No items found in the RSS feed {0}'.format(self.url)}
167
168 title, url = self._get_title_and_url(data[0])
169
170 if not title:
171 return {'result': False,
172 'message': 'Unable to get title from first item'}
173
174 if not url:
175 return {'result': False,
176 'message': 'Unable to get torrent url from first item'}
177
178 if url.startswith('magnet:') and re.search(r'urn:btih:([\w]{32,40})', url):
179 return {'result': True,
180 'message': 'RSS feed Parsed correctly'}
181 else:
182 torrent_file = self.session.get_content(url)
183 try:
184 bdecode(torrent_file)
185 except Exception as error:
186 self.dump_html(torrent_file)
187 return {'result': False,
188 'message': 'Torrent link is not a valid torrent file: {0}'.format(ex(error))}
189
190 return {'result': True, 'message': 'RSS feed Parsed correctly'}
191
192 except Exception as error:
193 return {'result': False, 'message': 'Error when trying to load RSS: {0}'.format(ex(error))}
194
195 @staticmethod
196 def dump_html(data):
197 """Dump html data."""
198 dump_name = os.path.join(app.CACHE_DIR, 'custom_torrent.html')
199
200 try:
201 file_out = io.open(dump_name, 'wb')
202 file_out.write(data)
203 file_out.close()
204 helpers.chmod_as_parent(dump_name)
205 except IOError as error:
206 log.error('Unable to save the file: {0}', error)
207 return False
208
209 log.info('Saved custom_torrent html dump {0} ', dump_name)
210 return True
211
212
213 class TorrentRssCache(tv.Cache):
214 """RSS torrent cache class."""
215
216 def _get_rss_data(self):
217 """Get RSS data."""
218 return self.get_rss_feed(self.provider.url)
219
[end of medusa/providers/torrent/rss/rsstorrent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/providers/torrent/rss/rsstorrent.py b/medusa/providers/torrent/rss/rsstorrent.py
--- a/medusa/providers/torrent/rss/rsstorrent.py
+++ b/medusa/providers/torrent/rss/rsstorrent.py
@@ -46,8 +46,9 @@
self.enable_daily = enable_daily
self.enable_manualsearch = enable_manualsearch
self.enable_backlog = enable_backlog
- self.enable_cookies = True
+ self.enable_cookies = bool(cookies)
self.cookies = cookies
+ self.required_cookies = ('uid', 'pass')
self.title_tag = title_tag
# Torrent Stats
@@ -156,9 +157,10 @@
def validate_rss(self):
"""Validate if RSS."""
try:
- add_cookie = self.add_cookies_from_ui()
- if not add_cookie.get('result'):
- return add_cookie
+ if self.enable_cookies:
+ add_cookie = self.add_cookies_from_ui()
+ if not add_cookie.get('result'):
+ return add_cookie
data = self.cache._get_rss_data()['entries']
if not data:
| {"golden_diff": "diff --git a/medusa/providers/torrent/rss/rsstorrent.py b/medusa/providers/torrent/rss/rsstorrent.py\n--- a/medusa/providers/torrent/rss/rsstorrent.py\n+++ b/medusa/providers/torrent/rss/rsstorrent.py\n@@ -46,8 +46,9 @@\n self.enable_daily = enable_daily\n self.enable_manualsearch = enable_manualsearch\n self.enable_backlog = enable_backlog\n- self.enable_cookies = True\n+ self.enable_cookies = bool(cookies)\n self.cookies = cookies\n+ self.required_cookies = ('uid', 'pass')\n self.title_tag = title_tag\n \n # Torrent Stats\n@@ -156,9 +157,10 @@\n def validate_rss(self):\n \"\"\"Validate if RSS.\"\"\"\n try:\n- add_cookie = self.add_cookies_from_ui()\n- if not add_cookie.get('result'):\n- return add_cookie\n+ if self.enable_cookies:\n+ add_cookie = self.add_cookies_from_ui()\n+ if not add_cookie.get('result'):\n+ return add_cookie\n \n data = self.cache._get_rss_data()['entries']\n if not data:\n", "issue": "Cookie required when configuring custom provider\n\r\nBranch:\u00a0master\r\nCommit:\u00a03f23fd272652b10cf781e3058fee50daf79f11b6Version:\u00a0v0.1.15\r\nDatabase: 44.8\r\nPython Version: | 2.7.13 (default, Dec 18 2016, 22:44:40) [GCC 4.9.3 20150311 (prerelease)]\r\nSSL Version: | OpenSSL 1.0.2j 26 Sep 2016\r\nOS: | Linux-3.10.102-armv7l-with-glibc2.4\r\nLocale: | en_US.UTF-8\r\n\r\nTrying to add custom provider, EZTV. I don't use a login, just linking to the RSS feed. When I try to add the site, it's requiring a cookie, something I don't recall having to do w/SR. When I put all of the details in except the cookie and hit add, I get an error box stating \"No Cookies added from ui for provider: EZTV\". Shouldn't this be optional or has something changed requiring cookies to be set?\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for RSS Torrents.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os\nimport re\n\nfrom bencode import bdecode\n\nfrom medusa import (\n app,\n helpers,\n tv,\n)\nfrom medusa.helper.exceptions import ex\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nlog = logging.getLogger(__name__)\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass TorrentRssProvider(TorrentProvider):\n \"\"\"Torrent RSS provider.\"\"\"\n\n def __init__(self, name, url, cookies='',\n title_tag='title', search_mode='eponly', search_fallback=False,\n enable_daily=False, enable_backlog=False, enable_manualsearch=False):\n \"\"\"Initialize the class.\"\"\"\n super(TorrentRssProvider, self).__init__(name)\n\n # Credentials\n\n # URLs\n self.url = url.rstrip('/')\n\n # Proper Strings\n\n # Miscellaneous Options\n self.supports_backlog = False\n self.search_mode = search_mode\n self.search_fallback = search_fallback\n self.enable_daily = enable_daily\n self.enable_manualsearch = enable_manualsearch\n self.enable_backlog = enable_backlog\n self.enable_cookies = True\n self.cookies = cookies\n self.title_tag = title_tag\n\n # Torrent Stats\n\n # Cache\n self.cache = TorrentRssCache(self, min_time=15)\n\n def _get_title_and_url(self, item):\n \"\"\"Get title and url from result.\"\"\"\n title = item.get(self.title_tag, '').replace(' ', '.')\n\n attempt_list = [\n lambda: item.get('torrent_magneturi'),\n lambda: item.enclosures[0].href,\n lambda: item.get('link')\n ]\n\n url = None\n for cur_attempt in attempt_list:\n try:\n url = cur_attempt()\n except Exception:\n continue\n\n if title and url:\n break\n\n return title, url\n\n def config_string(self):\n \"\"\"Return default RSS torrent provider config setting.\"\"\"\n return '{}|{}|{}|{}|{}|{}|{}|{}|{}|{}'.format(\n self.name or '',\n self.url or '',\n self.cookies or '',\n self.title_tag or '',\n int(self.enabled),\n self.search_mode or '',\n int(self.search_fallback),\n int(self.enable_daily),\n int(self.enable_manualsearch),\n int(self.enable_backlog)\n )\n\n @staticmethod\n def get_providers_list(data):\n \"\"\"Get RSS torrent provider list.\"\"\"\n providers_list = [x for x in (TorrentRssProvider._make_provider(x) for x in data.split('!!!')) if x]\n seen_values = set()\n providers_set = []\n\n for provider in providers_list:\n value = provider.name\n\n if value not in seen_values:\n providers_set.append(provider)\n seen_values.add(value)\n\n return [x for x in providers_set if x]\n\n def image_name(self):\n \"\"\"Return RSS torrent image.\"\"\"\n if os.path.isfile(os.path.join(app.PROG_DIR, 'static/images/providers/', self.get_id() + '.png')):\n return self.get_id() + '.png'\n return 'torrentrss.png'\n\n @staticmethod\n def _make_provider(config):\n \"\"\"Create new RSS provider.\"\"\"\n if not config:\n return None\n\n cookies = ''\n enable_backlog = 0\n enable_daily = 0\n enable_manualsearch = 0\n search_fallback = 0\n search_mode = 'eponly'\n title_tag = 'title'\n\n try:\n values = config.split('|')\n\n if len(values) == 9:\n name, url, cookies, title_tag, enabled, search_mode, search_fallback, enable_daily, enable_backlog = values\n elif len(values) == 10:\n name, url, cookies, title_tag, enabled, search_mode, search_fallback, enable_daily, enable_backlog, enable_manualsearch = values\n elif len(values) == 8:\n name, url, cookies, enabled, search_mode, search_fallback, enable_daily, enable_backlog = values\n else:\n enabled = values[4]\n name = values[0]\n url = values[1]\n except ValueError:\n log.error('Skipping RSS Torrent provider string: {0}, incorrect format', config)\n return None\n\n new_provider = TorrentRssProvider(\n name, url, cookies=cookies, title_tag=title_tag, search_mode=search_mode, search_fallback=search_fallback,\n enable_daily=enable_daily, enable_backlog=enable_backlog, enable_manualsearch=enable_manualsearch\n )\n new_provider.enabled = enabled == '1'\n\n return new_provider\n\n def validate_rss(self):\n \"\"\"Validate if RSS.\"\"\"\n try:\n add_cookie = self.add_cookies_from_ui()\n if not add_cookie.get('result'):\n return add_cookie\n\n data = self.cache._get_rss_data()['entries']\n if not data:\n return {'result': False,\n 'message': 'No items found in the RSS feed {0}'.format(self.url)}\n\n title, url = self._get_title_and_url(data[0])\n\n if not title:\n return {'result': False,\n 'message': 'Unable to get title from first item'}\n\n if not url:\n return {'result': False,\n 'message': 'Unable to get torrent url from first item'}\n\n if url.startswith('magnet:') and re.search(r'urn:btih:([\\w]{32,40})', url):\n return {'result': True,\n 'message': 'RSS feed Parsed correctly'}\n else:\n torrent_file = self.session.get_content(url)\n try:\n bdecode(torrent_file)\n except Exception as error:\n self.dump_html(torrent_file)\n return {'result': False,\n 'message': 'Torrent link is not a valid torrent file: {0}'.format(ex(error))}\n\n return {'result': True, 'message': 'RSS feed Parsed correctly'}\n\n except Exception as error:\n return {'result': False, 'message': 'Error when trying to load RSS: {0}'.format(ex(error))}\n\n @staticmethod\n def dump_html(data):\n \"\"\"Dump html data.\"\"\"\n dump_name = os.path.join(app.CACHE_DIR, 'custom_torrent.html')\n\n try:\n file_out = io.open(dump_name, 'wb')\n file_out.write(data)\n file_out.close()\n helpers.chmod_as_parent(dump_name)\n except IOError as error:\n log.error('Unable to save the file: {0}', error)\n return False\n\n log.info('Saved custom_torrent html dump {0} ', dump_name)\n return True\n\n\nclass TorrentRssCache(tv.Cache):\n \"\"\"RSS torrent cache class.\"\"\"\n\n def _get_rss_data(self):\n \"\"\"Get RSS data.\"\"\"\n return self.get_rss_feed(self.provider.url)\n", "path": "medusa/providers/torrent/rss/rsstorrent.py"}]} | 2,911 | 259 |
gh_patches_debug_30221 | rasdani/github-patches | git_diff | hylang__hy-2354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Import from zip files
It would be handy to be able to import .hy files from .zip archives (for those of us who like to deploy stuff in single file bundles).
Right now I'm working around this by doing:
```
BUNDLE=sushy.zip
clean:
rm *.zip
bundle:
hyc sushy/*.hy
zip -r9 $(BUNDLE) . -i *.py *.pyc
test-bundle:
PYTHONPATH=$(BUNDLE) python -m sushy
```
</issue>
<code>
[start of hy/importer.py]
1 import builtins
2 import importlib
3 import inspect
4 import os
5 import pkgutil
6 import sys
7 import types
8 from contextlib import contextmanager
9 from functools import partial
10
11 import hy
12 from hy.compiler import hy_compile
13 from hy.reader import read_many
14
15
16 @contextmanager
17 def loader_module_obj(loader):
18 """Use the module object associated with a loader.
19
20 This is intended to be used by a loader object itself, and primarily as a
21 work-around for attempts to get module and/or file code from a loader
22 without actually creating a module object. Since Hy currently needs the
23 module object for macro importing, expansion, and whatnot, using this will
24 reconcile Hy with such attempts.
25
26 For example, if we're first compiling a Hy script starting from
27 `runpy.run_path`, the Hy compiler will need a valid module object in which
28 to run, but, given the way `runpy.run_path` works, there might not be one
29 yet (e.g. `__main__` for a .hy file). We compensate by properly loading
30 the module here.
31
32 The function `inspect.getmodule` has a hidden-ish feature that returns
33 modules using their associated filenames (via `inspect.modulesbyfile`),
34 and, since the Loaders (and their delegate Loaders) carry a filename/path
35 associated with the parent package, we use it as a more robust attempt to
36 obtain an existing module object.
37
38 When no module object is found, a temporary, minimally sufficient module
39 object is created for the duration of the `with` body.
40 """
41 tmp_mod = False
42
43 try:
44 module = inspect.getmodule(None, _filename=loader.path)
45 except KeyError:
46 module = None
47
48 if module is None:
49 tmp_mod = True
50 module = sys.modules.setdefault(loader.name, types.ModuleType(loader.name))
51 module.__file__ = loader.path
52 module.__name__ = loader.name
53
54 try:
55 yield module
56 finally:
57 if tmp_mod:
58 del sys.modules[loader.name]
59
60
61 def _hy_code_from_file(filename, loader_type=None):
62 """Use PEP-302 loader to produce code for a given Hy source file."""
63 full_fname = os.path.abspath(filename)
64 fname_path, fname_file = os.path.split(full_fname)
65 modname = os.path.splitext(fname_file)[0]
66 sys.path.insert(0, fname_path)
67 try:
68 if loader_type is None:
69 loader = pkgutil.get_loader(modname)
70 else:
71 loader = loader_type(modname, full_fname)
72 code = loader.get_code(modname)
73 finally:
74 sys.path.pop(0)
75
76 return code
77
78
79 def _get_code_from_file(run_name, fname=None, hy_src_check=lambda x: x.endswith(".hy")):
80 """A patch of `runpy._get_code_from_file` that will also run and cache Hy
81 code.
82 """
83 if fname is None and run_name is not None:
84 fname = run_name
85
86 # Check for bytecode first. (This is what the `runpy` version does!)
87 with open(fname, "rb") as f:
88 code = pkgutil.read_code(f)
89
90 if code is None:
91 if hy_src_check(fname):
92 code = _hy_code_from_file(fname, loader_type=HyLoader)
93 else:
94 # Try normal source
95 with open(fname, "rb") as f:
96 # This code differs from `runpy`'s only in that we
97 # force decoding into UTF-8.
98 source = f.read().decode("utf-8")
99 code = compile(source, fname, "exec")
100
101 return (code, fname)
102
103
104 importlib.machinery.SOURCE_SUFFIXES.insert(0, ".hy")
105 _py_source_to_code = importlib.machinery.SourceFileLoader.source_to_code
106
107
108 def _could_be_hy_src(filename):
109 return os.path.isfile(filename) and (
110 filename.endswith(".hy")
111 or not any(
112 filename.endswith(ext) for ext in importlib.machinery.SOURCE_SUFFIXES[1:]
113 )
114 )
115
116
117 def _hy_source_to_code(self, data, path, _optimize=-1):
118 if _could_be_hy_src(path):
119 if os.environ.get("HY_MESSAGE_WHEN_COMPILING"):
120 print("Compiling", path, file=sys.stderr)
121 source = data.decode("utf-8")
122 hy_tree = read_many(source, filename=path, skip_shebang=True)
123 with loader_module_obj(self) as module:
124 data = hy_compile(hy_tree, module)
125
126 return _py_source_to_code(self, data, path, _optimize=_optimize)
127
128
129 importlib.machinery.SourceFileLoader.source_to_code = _hy_source_to_code
130
131 # This is actually needed; otherwise, pre-created finders assigned to the
132 # current dir (i.e. `''`) in `sys.path` will not catch absolute imports of
133 # directory-local modules!
134 sys.path_importer_cache.clear()
135
136 # Do this one just in case?
137 importlib.invalidate_caches()
138
139 # These aren't truly cross-compliant.
140 # They're useful for testing, though.
141 class HyImporter(importlib.machinery.FileFinder):
142 pass
143
144
145 class HyLoader(importlib.machinery.SourceFileLoader):
146 pass
147
148
149 # We create a separate version of runpy, "runhy", that prefers Hy source over
150 # Python.
151 runhy = importlib.import_module("runpy")
152
153 runhy._get_code_from_file = partial(_get_code_from_file, hy_src_check=_could_be_hy_src)
154
155 del sys.modules["runpy"]
156
157 runpy = importlib.import_module("runpy")
158
159 _runpy_get_code_from_file = runpy._get_code_from_file
160 runpy._get_code_from_file = _get_code_from_file
161
162
163 def _import_from_path(name, path):
164 """A helper function that imports a module from the given path."""
165 spec = importlib.util.spec_from_file_location(name, path)
166 mod = importlib.util.module_from_spec(spec)
167 spec.loader.exec_module(mod)
168 return mod
169
170
171 def _inject_builtins():
172 """Inject the Hy core macros into Python's builtins if necessary"""
173 if hasattr(builtins, "__hy_injected__"):
174 return
175 hy.macros.load_macros(builtins)
176 # Set the marker so we don't inject again.
177 builtins.__hy_injected__ = True
178
[end of hy/importer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/importer.py b/hy/importer.py
--- a/hy/importer.py
+++ b/hy/importer.py
@@ -5,10 +5,12 @@
import pkgutil
import sys
import types
+import zipimport
from contextlib import contextmanager
from functools import partial
import hy
+from hy._compat import PY3_8
from hy.compiler import hy_compile
from hy.reader import read_many
@@ -107,10 +109,8 @@
def _could_be_hy_src(filename):
return os.path.isfile(filename) and (
- filename.endswith(".hy")
- or not any(
- filename.endswith(ext) for ext in importlib.machinery.SOURCE_SUFFIXES[1:]
- )
+ os.path.splitext(filename)[1]
+ not in set(importlib.machinery.SOURCE_SUFFIXES) - {".hy"}
)
@@ -128,6 +128,27 @@
importlib.machinery.SourceFileLoader.source_to_code = _hy_source_to_code
+
+if PY3_8 and (".hy", False, False) not in zipimport._zip_searchorder:
+ zipimport._zip_searchorder += ((".hy", False, False),)
+ _py_compile_source = zipimport._compile_source
+
+ def _hy_compile_source(pathname, source):
+ if not pathname.endswith(".hy"):
+ return _py_compile_source(pathname, source)
+ return compile(
+ hy_compile(
+ read_many(source.decode("UTF-8"), filename=pathname, skip_shebang=True),
+ f"<zip:{pathname}>",
+ ),
+ pathname,
+ "exec",
+ dont_inherit=True,
+ )
+
+ zipimport._compile_source = _hy_compile_source
+
+
# This is actually needed; otherwise, pre-created finders assigned to the
# current dir (i.e. `''`) in `sys.path` will not catch absolute imports of
# directory-local modules!
| {"golden_diff": "diff --git a/hy/importer.py b/hy/importer.py\n--- a/hy/importer.py\n+++ b/hy/importer.py\n@@ -5,10 +5,12 @@\n import pkgutil\n import sys\n import types\n+import zipimport\n from contextlib import contextmanager\n from functools import partial\n \n import hy\n+from hy._compat import PY3_8\n from hy.compiler import hy_compile\n from hy.reader import read_many\n \n@@ -107,10 +109,8 @@\n \n def _could_be_hy_src(filename):\n return os.path.isfile(filename) and (\n- filename.endswith(\".hy\")\n- or not any(\n- filename.endswith(ext) for ext in importlib.machinery.SOURCE_SUFFIXES[1:]\n- )\n+ os.path.splitext(filename)[1]\n+ not in set(importlib.machinery.SOURCE_SUFFIXES) - {\".hy\"}\n )\n \n \n@@ -128,6 +128,27 @@\n \n importlib.machinery.SourceFileLoader.source_to_code = _hy_source_to_code\n \n+\n+if PY3_8 and (\".hy\", False, False) not in zipimport._zip_searchorder:\n+ zipimport._zip_searchorder += ((\".hy\", False, False),)\n+ _py_compile_source = zipimport._compile_source\n+\n+ def _hy_compile_source(pathname, source):\n+ if not pathname.endswith(\".hy\"):\n+ return _py_compile_source(pathname, source)\n+ return compile(\n+ hy_compile(\n+ read_many(source.decode(\"UTF-8\"), filename=pathname, skip_shebang=True),\n+ f\"<zip:{pathname}>\",\n+ ),\n+ pathname,\n+ \"exec\",\n+ dont_inherit=True,\n+ )\n+\n+ zipimport._compile_source = _hy_compile_source\n+\n+\n # This is actually needed; otherwise, pre-created finders assigned to the\n # current dir (i.e. `''`) in `sys.path` will not catch absolute imports of\n # directory-local modules!\n", "issue": "Import from zip files\nIt would be handy to be able to import .hy files from .zip archives (for those of us who like to deploy stuff in single file bundles).\n\nRight now I'm working around this by doing:\n\n```\nBUNDLE=sushy.zip\n\nclean:\n rm *.zip\n\nbundle:\n hyc sushy/*.hy\n zip -r9 $(BUNDLE) . -i *.py *.pyc\n\ntest-bundle:\n PYTHONPATH=$(BUNDLE) python -m sushy\n```\n\n", "before_files": [{"content": "import builtins\nimport importlib\nimport inspect\nimport os\nimport pkgutil\nimport sys\nimport types\nfrom contextlib import contextmanager\nfrom functools import partial\n\nimport hy\nfrom hy.compiler import hy_compile\nfrom hy.reader import read_many\n\n\n@contextmanager\ndef loader_module_obj(loader):\n \"\"\"Use the module object associated with a loader.\n\n This is intended to be used by a loader object itself, and primarily as a\n work-around for attempts to get module and/or file code from a loader\n without actually creating a module object. Since Hy currently needs the\n module object for macro importing, expansion, and whatnot, using this will\n reconcile Hy with such attempts.\n\n For example, if we're first compiling a Hy script starting from\n `runpy.run_path`, the Hy compiler will need a valid module object in which\n to run, but, given the way `runpy.run_path` works, there might not be one\n yet (e.g. `__main__` for a .hy file). We compensate by properly loading\n the module here.\n\n The function `inspect.getmodule` has a hidden-ish feature that returns\n modules using their associated filenames (via `inspect.modulesbyfile`),\n and, since the Loaders (and their delegate Loaders) carry a filename/path\n associated with the parent package, we use it as a more robust attempt to\n obtain an existing module object.\n\n When no module object is found, a temporary, minimally sufficient module\n object is created for the duration of the `with` body.\n \"\"\"\n tmp_mod = False\n\n try:\n module = inspect.getmodule(None, _filename=loader.path)\n except KeyError:\n module = None\n\n if module is None:\n tmp_mod = True\n module = sys.modules.setdefault(loader.name, types.ModuleType(loader.name))\n module.__file__ = loader.path\n module.__name__ = loader.name\n\n try:\n yield module\n finally:\n if tmp_mod:\n del sys.modules[loader.name]\n\n\ndef _hy_code_from_file(filename, loader_type=None):\n \"\"\"Use PEP-302 loader to produce code for a given Hy source file.\"\"\"\n full_fname = os.path.abspath(filename)\n fname_path, fname_file = os.path.split(full_fname)\n modname = os.path.splitext(fname_file)[0]\n sys.path.insert(0, fname_path)\n try:\n if loader_type is None:\n loader = pkgutil.get_loader(modname)\n else:\n loader = loader_type(modname, full_fname)\n code = loader.get_code(modname)\n finally:\n sys.path.pop(0)\n\n return code\n\n\ndef _get_code_from_file(run_name, fname=None, hy_src_check=lambda x: x.endswith(\".hy\")):\n \"\"\"A patch of `runpy._get_code_from_file` that will also run and cache Hy\n code.\n \"\"\"\n if fname is None and run_name is not None:\n fname = run_name\n\n # Check for bytecode first. (This is what the `runpy` version does!)\n with open(fname, \"rb\") as f:\n code = pkgutil.read_code(f)\n\n if code is None:\n if hy_src_check(fname):\n code = _hy_code_from_file(fname, loader_type=HyLoader)\n else:\n # Try normal source\n with open(fname, \"rb\") as f:\n # This code differs from `runpy`'s only in that we\n # force decoding into UTF-8.\n source = f.read().decode(\"utf-8\")\n code = compile(source, fname, \"exec\")\n\n return (code, fname)\n\n\nimportlib.machinery.SOURCE_SUFFIXES.insert(0, \".hy\")\n_py_source_to_code = importlib.machinery.SourceFileLoader.source_to_code\n\n\ndef _could_be_hy_src(filename):\n return os.path.isfile(filename) and (\n filename.endswith(\".hy\")\n or not any(\n filename.endswith(ext) for ext in importlib.machinery.SOURCE_SUFFIXES[1:]\n )\n )\n\n\ndef _hy_source_to_code(self, data, path, _optimize=-1):\n if _could_be_hy_src(path):\n if os.environ.get(\"HY_MESSAGE_WHEN_COMPILING\"):\n print(\"Compiling\", path, file=sys.stderr)\n source = data.decode(\"utf-8\")\n hy_tree = read_many(source, filename=path, skip_shebang=True)\n with loader_module_obj(self) as module:\n data = hy_compile(hy_tree, module)\n\n return _py_source_to_code(self, data, path, _optimize=_optimize)\n\n\nimportlib.machinery.SourceFileLoader.source_to_code = _hy_source_to_code\n\n# This is actually needed; otherwise, pre-created finders assigned to the\n# current dir (i.e. `''`) in `sys.path` will not catch absolute imports of\n# directory-local modules!\nsys.path_importer_cache.clear()\n\n# Do this one just in case?\nimportlib.invalidate_caches()\n\n# These aren't truly cross-compliant.\n# They're useful for testing, though.\nclass HyImporter(importlib.machinery.FileFinder):\n pass\n\n\nclass HyLoader(importlib.machinery.SourceFileLoader):\n pass\n\n\n# We create a separate version of runpy, \"runhy\", that prefers Hy source over\n# Python.\nrunhy = importlib.import_module(\"runpy\")\n\nrunhy._get_code_from_file = partial(_get_code_from_file, hy_src_check=_could_be_hy_src)\n\ndel sys.modules[\"runpy\"]\n\nrunpy = importlib.import_module(\"runpy\")\n\n_runpy_get_code_from_file = runpy._get_code_from_file\nrunpy._get_code_from_file = _get_code_from_file\n\n\ndef _import_from_path(name, path):\n \"\"\"A helper function that imports a module from the given path.\"\"\"\n spec = importlib.util.spec_from_file_location(name, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n\n\ndef _inject_builtins():\n \"\"\"Inject the Hy core macros into Python's builtins if necessary\"\"\"\n if hasattr(builtins, \"__hy_injected__\"):\n return\n hy.macros.load_macros(builtins)\n # Set the marker so we don't inject again.\n builtins.__hy_injected__ = True\n", "path": "hy/importer.py"}]} | 2,465 | 447 |
gh_patches_debug_13633 | rasdani/github-patches | git_diff | feast-dev__feast-731 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feast cli feature-sets describe broken
## Expected Behavior
`feast feature-sets describe <name>` cannot be used because it doesn't allow the user to set a project, nor does it default to any value.
## Steps to reproduce
Call `feast feature-sets describe <some_feature_set>`
### Specifications
- Version: 0.5.0
## Possible Solution
The method calls `fs = feast_client.get_feature_set(name=name)`. Since no project is provided to `get_feature_set`, a default project needs to be set in the client.
Either
1. Allow users to pass feature set ids with projects specified (`project/feature_set`) or
2. Allow users to set a default project.
The method should fall back to a default project (`default`) should one not be provided.
</issue>
<code>
[start of sdk/python/feast/cli.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16 import logging
17 import sys
18
19 import click
20 import pkg_resources
21 import yaml
22
23 from feast.client import Client
24 from feast.config import Config
25 from feast.core.IngestionJob_pb2 import IngestionJobStatus
26 from feast.feature_set import FeatureSet, FeatureSetRef
27 from feast.loaders.yaml import yaml_loader
28
29 _logger = logging.getLogger(__name__)
30
31 _common_options = [
32 click.option("--core-url", help="Set Feast core URL to connect to"),
33 click.option("--serving-url", help="Set Feast serving URL to connect to"),
34 ]
35
36
37 def common_options(func):
38 """
39 Options that are available for most CLI commands
40 """
41 for option in reversed(_common_options):
42 func = option(func)
43 return func
44
45
46 @click.group()
47 def cli():
48 pass
49
50
51 @cli.command()
52 @click.option(
53 "--client-only", "-c", is_flag=True, help="Print only the version of the CLI"
54 )
55 @common_options
56 def version(client_only: bool, **kwargs):
57 """
58 Displays version and connectivity information
59 """
60
61 try:
62 feast_versions_dict = {
63 "sdk": {"version": str(pkg_resources.get_distribution("feast"))}
64 }
65
66 if not client_only:
67 feast_client = Client(**kwargs)
68 feast_versions_dict.update(feast_client.version())
69
70 print(json.dumps(feast_versions_dict))
71 except Exception as e:
72 _logger.error("Error initializing backend store")
73 _logger.exception(e)
74 sys.exit(1)
75
76
77 @cli.group()
78 def config():
79 """
80 View and edit Feast properties
81 """
82 pass
83
84
85 @config.command(name="list")
86 def config_list():
87 """
88 List Feast properties for the currently active configuration
89 """
90 try:
91 print(Config())
92 except Exception as e:
93 _logger.error("Error occurred when reading Feast configuration file")
94 _logger.exception(e)
95 sys.exit(1)
96
97
98 @config.command(name="set")
99 @click.argument("prop")
100 @click.argument("value")
101 def config_set(prop, value):
102 """
103 Set a Feast properties for the currently active configuration
104 """
105 try:
106 conf = Config()
107 conf.set(option=prop.strip(), value=value.strip())
108 conf.save()
109 except Exception as e:
110 _logger.error("Error in reading config file")
111 _logger.exception(e)
112 sys.exit(1)
113
114
115 @cli.group(name="feature-sets")
116 def feature_set():
117 """
118 Create and manage feature sets
119 """
120 pass
121
122
123 @feature_set.command(name="list")
124 def feature_set_list():
125 """
126 List all feature sets
127 """
128 feast_client = Client() # type: Client
129
130 table = []
131 for fs in feast_client.list_feature_sets(project="*", name="*"):
132 table.append([fs.name, repr(fs)])
133
134 from tabulate import tabulate
135
136 print(tabulate(table, headers=["NAME", "REFERENCE"], tablefmt="plain"))
137
138
139 @feature_set.command("apply")
140 # TODO: add project option to overwrite project setting.
141 @click.option(
142 "--filename",
143 "-f",
144 help="Path to a feature set configuration file that will be applied",
145 type=click.Path(exists=True),
146 )
147 def feature_set_create(filename):
148 """
149 Create or update a feature set
150 """
151
152 feature_sets = [FeatureSet.from_dict(fs_dict) for fs_dict in yaml_loader(filename)]
153 feast_client = Client() # type: Client
154 feast_client.apply(feature_sets)
155
156
157 @feature_set.command("describe")
158 @click.argument("name", type=click.STRING)
159 def feature_set_describe(name: str):
160 """
161 Describe a feature set
162 """
163 feast_client = Client() # type: Client
164 fs = feast_client.get_feature_set(name=name)
165 if not fs:
166 print(f'Feature set with name "{name}" could not be found')
167 return
168
169 print(yaml.dump(yaml.safe_load(str(fs)), default_flow_style=False, sort_keys=False))
170
171
172 @cli.group(name="projects")
173 def project():
174 """
175 Create and manage projects
176 """
177 pass
178
179
180 @project.command(name="create")
181 @click.argument("name", type=click.STRING)
182 def project_create(name: str):
183 """
184 Create a project
185 """
186 feast_client = Client() # type: Client
187 feast_client.create_project(name)
188
189
190 @project.command(name="archive")
191 @click.argument("name", type=click.STRING)
192 def project_archive(name: str):
193 """
194 Archive a project
195 """
196 feast_client = Client() # type: Client
197 feast_client.archive_project(name)
198
199
200 @project.command(name="list")
201 def project_list():
202 """
203 List all projects
204 """
205 feast_client = Client() # type: Client
206
207 table = []
208 for project in feast_client.list_projects():
209 table.append([project])
210
211 from tabulate import tabulate
212
213 print(tabulate(table, headers=["NAME"], tablefmt="plain"))
214
215
216 @cli.group(name="ingest-jobs")
217 def ingest_job():
218 """
219 Manage ingestion jobs
220 """
221 pass
222
223
224 @ingest_job.command("list")
225 @click.option("--job-id", "-i", help="Show only ingestion jobs with the given job id")
226 @click.option(
227 "--feature-set-ref",
228 "-f",
229 help="Show only ingestion job targeting the feature set with the given reference",
230 )
231 @click.option(
232 "--store-name",
233 "-s",
234 help="List only ingestion job that ingest into feast store with given name",
235 )
236 # TODO: types
237 def ingest_job_list(job_id, feature_set_ref, store_name):
238 """
239 List ingestion jobs
240 """
241 # parse feature set reference
242 if feature_set_ref is not None:
243 feature_set_ref = FeatureSetRef.from_str(feature_set_ref)
244
245 # pull & render ingestion jobs as a table
246 feast_client = Client()
247 table = []
248 for ingest_job in feast_client.list_ingest_jobs(
249 job_id=job_id, feature_set_ref=feature_set_ref, store_name=store_name
250 ):
251 table.append([ingest_job.id, IngestionJobStatus.Name(ingest_job.status)])
252
253 from tabulate import tabulate
254
255 print(tabulate(table, headers=["ID", "STATUS"], tablefmt="plain"))
256
257
258 @ingest_job.command("describe")
259 @click.argument("job_id")
260 def ingest_job_describe(job_id: str):
261 """
262 Describe the ingestion job with the given id.
263 """
264 # find ingestion job for id
265 feast_client = Client()
266 jobs = feast_client.list_ingest_jobs(job_id=job_id)
267 if len(jobs) < 1:
268 print(f"Ingestion Job with id {job_id} could not be found")
269 sys.exit(1)
270 job = jobs[0]
271
272 # pretty render ingestion job as yaml
273 print(
274 yaml.dump(yaml.safe_load(str(job)), default_flow_style=False, sort_keys=False)
275 )
276
277
278 @ingest_job.command("stop")
279 @click.option(
280 "--wait", "-w", is_flag=True, help="Wait for the ingestion job to fully stop."
281 )
282 @click.option(
283 "--timeout",
284 "-t",
285 default=600,
286 help="Timeout in seconds to wait for the job to stop.",
287 )
288 @click.argument("job_id")
289 def ingest_job_stop(wait: bool, timeout: int, job_id: str):
290 """
291 Stop ingestion job for id.
292 """
293 # find ingestion job for id
294 feast_client = Client()
295 jobs = feast_client.list_ingest_jobs(job_id=job_id)
296 if len(jobs) < 1:
297 print(f"Ingestion Job with id {job_id} could not be found")
298 sys.exit(1)
299 job = jobs[0]
300
301 feast_client.stop_ingest_job(job)
302
303 # wait for ingestion job to stop
304 if wait:
305 job.wait(IngestionJobStatus.ABORTED, timeout=timeout)
306
307
308 @ingest_job.command("restart")
309 @click.argument("job_id")
310 def ingest_job_restart(job_id: str):
311 """
312 Restart job for id.
313 Waits for the job to fully restart.
314 """
315 # find ingestion job for id
316 feast_client = Client()
317 jobs = feast_client.list_ingest_jobs(job_id=job_id)
318 if len(jobs) < 1:
319 print(f"Ingestion Job with id {job_id} could not be found")
320 sys.exit(1)
321 job = jobs[0]
322
323 feast_client.restart_ingest_job(job)
324
325
326 @cli.command()
327 @click.option(
328 "--name", "-n", help="Feature set name to ingest data into", required=True
329 )
330 @click.option(
331 "--filename",
332 "-f",
333 help="Path to file to be ingested",
334 type=click.Path(exists=True),
335 required=True,
336 )
337 @click.option(
338 "--file-type",
339 "-t",
340 type=click.Choice(["CSV"], case_sensitive=False),
341 help="Type of file to ingest. Defaults to CSV.",
342 )
343 def ingest(name, filename, file_type):
344 """
345 Ingest feature data into a feature set
346 """
347
348 feast_client = Client() # type: Client
349 feature_set = feast_client.get_feature_set(name=name)
350 feature_set.ingest_file(file_path=filename)
351
352
353 if __name__ == "__main__":
354 cli()
355
[end of sdk/python/feast/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/feast/cli.py b/sdk/python/feast/cli.py
--- a/sdk/python/feast/cli.py
+++ b/sdk/python/feast/cli.py
@@ -156,12 +156,20 @@
@feature_set.command("describe")
@click.argument("name", type=click.STRING)
-def feature_set_describe(name: str):
[email protected](
+ "--project",
+ "-p",
+ help="Project that feature set belongs to",
+ type=click.STRING,
+ default="default",
+)
+def feature_set_describe(name: str, project: str):
"""
Describe a feature set
"""
feast_client = Client() # type: Client
- fs = feast_client.get_feature_set(name=name)
+ fs = feast_client.get_feature_set(name=name, project=project)
+
if not fs:
print(f'Feature set with name "{name}" could not be found')
return
| {"golden_diff": "diff --git a/sdk/python/feast/cli.py b/sdk/python/feast/cli.py\n--- a/sdk/python/feast/cli.py\n+++ b/sdk/python/feast/cli.py\n@@ -156,12 +156,20 @@\n \n @feature_set.command(\"describe\")\n @click.argument(\"name\", type=click.STRING)\n-def feature_set_describe(name: str):\[email protected](\n+ \"--project\",\n+ \"-p\",\n+ help=\"Project that feature set belongs to\",\n+ type=click.STRING,\n+ default=\"default\",\n+)\n+def feature_set_describe(name: str, project: str):\n \"\"\"\n Describe a feature set\n \"\"\"\n feast_client = Client() # type: Client\n- fs = feast_client.get_feature_set(name=name)\n+ fs = feast_client.get_feature_set(name=name, project=project)\n+\n if not fs:\n print(f'Feature set with name \"{name}\" could not be found')\n return\n", "issue": "feast cli feature-sets describe broken\n## Expected Behavior \r\n`feast feature-sets describe <name>` cannot be used because it doesn't allow the user to set a project, nor does it default to any value.\r\n\r\n## Steps to reproduce\r\nCall `feast feature-sets describe <some_feature_set>`\r\n\r\n### Specifications\r\n\r\n- Version: 0.5.0\r\n\r\n## Possible Solution\r\n\r\nThe method calls `fs = feast_client.get_feature_set(name=name)`. Since no project is provided to `get_feature_set`, a default project needs to be set in the client. \r\n\r\nEither \r\n1. Allow users to pass feature set ids with projects specified (`project/feature_set`) or\r\n2. Allow users to set a default project.\r\n\r\nThe method should fall back to a default project (`default`) should one not be provided.\r\n\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport sys\n\nimport click\nimport pkg_resources\nimport yaml\n\nfrom feast.client import Client\nfrom feast.config import Config\nfrom feast.core.IngestionJob_pb2 import IngestionJobStatus\nfrom feast.feature_set import FeatureSet, FeatureSetRef\nfrom feast.loaders.yaml import yaml_loader\n\n_logger = logging.getLogger(__name__)\n\n_common_options = [\n click.option(\"--core-url\", help=\"Set Feast core URL to connect to\"),\n click.option(\"--serving-url\", help=\"Set Feast serving URL to connect to\"),\n]\n\n\ndef common_options(func):\n \"\"\"\n Options that are available for most CLI commands\n \"\"\"\n for option in reversed(_common_options):\n func = option(func)\n return func\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\[email protected](\n \"--client-only\", \"-c\", is_flag=True, help=\"Print only the version of the CLI\"\n)\n@common_options\ndef version(client_only: bool, **kwargs):\n \"\"\"\n Displays version and connectivity information\n \"\"\"\n\n try:\n feast_versions_dict = {\n \"sdk\": {\"version\": str(pkg_resources.get_distribution(\"feast\"))}\n }\n\n if not client_only:\n feast_client = Client(**kwargs)\n feast_versions_dict.update(feast_client.version())\n\n print(json.dumps(feast_versions_dict))\n except Exception as e:\n _logger.error(\"Error initializing backend store\")\n _logger.exception(e)\n sys.exit(1)\n\n\[email protected]()\ndef config():\n \"\"\"\n View and edit Feast properties\n \"\"\"\n pass\n\n\[email protected](name=\"list\")\ndef config_list():\n \"\"\"\n List Feast properties for the currently active configuration\n \"\"\"\n try:\n print(Config())\n except Exception as e:\n _logger.error(\"Error occurred when reading Feast configuration file\")\n _logger.exception(e)\n sys.exit(1)\n\n\[email protected](name=\"set\")\[email protected](\"prop\")\[email protected](\"value\")\ndef config_set(prop, value):\n \"\"\"\n Set a Feast properties for the currently active configuration\n \"\"\"\n try:\n conf = Config()\n conf.set(option=prop.strip(), value=value.strip())\n conf.save()\n except Exception as e:\n _logger.error(\"Error in reading config file\")\n _logger.exception(e)\n sys.exit(1)\n\n\[email protected](name=\"feature-sets\")\ndef feature_set():\n \"\"\"\n Create and manage feature sets\n \"\"\"\n pass\n\n\n@feature_set.command(name=\"list\")\ndef feature_set_list():\n \"\"\"\n List all feature sets\n \"\"\"\n feast_client = Client() # type: Client\n\n table = []\n for fs in feast_client.list_feature_sets(project=\"*\", name=\"*\"):\n table.append([fs.name, repr(fs)])\n\n from tabulate import tabulate\n\n print(tabulate(table, headers=[\"NAME\", \"REFERENCE\"], tablefmt=\"plain\"))\n\n\n@feature_set.command(\"apply\")\n# TODO: add project option to overwrite project setting.\[email protected](\n \"--filename\",\n \"-f\",\n help=\"Path to a feature set configuration file that will be applied\",\n type=click.Path(exists=True),\n)\ndef feature_set_create(filename):\n \"\"\"\n Create or update a feature set\n \"\"\"\n\n feature_sets = [FeatureSet.from_dict(fs_dict) for fs_dict in yaml_loader(filename)]\n feast_client = Client() # type: Client\n feast_client.apply(feature_sets)\n\n\n@feature_set.command(\"describe\")\[email protected](\"name\", type=click.STRING)\ndef feature_set_describe(name: str):\n \"\"\"\n Describe a feature set\n \"\"\"\n feast_client = Client() # type: Client\n fs = feast_client.get_feature_set(name=name)\n if not fs:\n print(f'Feature set with name \"{name}\" could not be found')\n return\n\n print(yaml.dump(yaml.safe_load(str(fs)), default_flow_style=False, sort_keys=False))\n\n\[email protected](name=\"projects\")\ndef project():\n \"\"\"\n Create and manage projects\n \"\"\"\n pass\n\n\[email protected](name=\"create\")\[email protected](\"name\", type=click.STRING)\ndef project_create(name: str):\n \"\"\"\n Create a project\n \"\"\"\n feast_client = Client() # type: Client\n feast_client.create_project(name)\n\n\[email protected](name=\"archive\")\[email protected](\"name\", type=click.STRING)\ndef project_archive(name: str):\n \"\"\"\n Archive a project\n \"\"\"\n feast_client = Client() # type: Client\n feast_client.archive_project(name)\n\n\[email protected](name=\"list\")\ndef project_list():\n \"\"\"\n List all projects\n \"\"\"\n feast_client = Client() # type: Client\n\n table = []\n for project in feast_client.list_projects():\n table.append([project])\n\n from tabulate import tabulate\n\n print(tabulate(table, headers=[\"NAME\"], tablefmt=\"plain\"))\n\n\[email protected](name=\"ingest-jobs\")\ndef ingest_job():\n \"\"\"\n Manage ingestion jobs\n \"\"\"\n pass\n\n\n@ingest_job.command(\"list\")\[email protected](\"--job-id\", \"-i\", help=\"Show only ingestion jobs with the given job id\")\[email protected](\n \"--feature-set-ref\",\n \"-f\",\n help=\"Show only ingestion job targeting the feature set with the given reference\",\n)\[email protected](\n \"--store-name\",\n \"-s\",\n help=\"List only ingestion job that ingest into feast store with given name\",\n)\n# TODO: types\ndef ingest_job_list(job_id, feature_set_ref, store_name):\n \"\"\"\n List ingestion jobs\n \"\"\"\n # parse feature set reference\n if feature_set_ref is not None:\n feature_set_ref = FeatureSetRef.from_str(feature_set_ref)\n\n # pull & render ingestion jobs as a table\n feast_client = Client()\n table = []\n for ingest_job in feast_client.list_ingest_jobs(\n job_id=job_id, feature_set_ref=feature_set_ref, store_name=store_name\n ):\n table.append([ingest_job.id, IngestionJobStatus.Name(ingest_job.status)])\n\n from tabulate import tabulate\n\n print(tabulate(table, headers=[\"ID\", \"STATUS\"], tablefmt=\"plain\"))\n\n\n@ingest_job.command(\"describe\")\[email protected](\"job_id\")\ndef ingest_job_describe(job_id: str):\n \"\"\"\n Describe the ingestion job with the given id.\n \"\"\"\n # find ingestion job for id\n feast_client = Client()\n jobs = feast_client.list_ingest_jobs(job_id=job_id)\n if len(jobs) < 1:\n print(f\"Ingestion Job with id {job_id} could not be found\")\n sys.exit(1)\n job = jobs[0]\n\n # pretty render ingestion job as yaml\n print(\n yaml.dump(yaml.safe_load(str(job)), default_flow_style=False, sort_keys=False)\n )\n\n\n@ingest_job.command(\"stop\")\[email protected](\n \"--wait\", \"-w\", is_flag=True, help=\"Wait for the ingestion job to fully stop.\"\n)\[email protected](\n \"--timeout\",\n \"-t\",\n default=600,\n help=\"Timeout in seconds to wait for the job to stop.\",\n)\[email protected](\"job_id\")\ndef ingest_job_stop(wait: bool, timeout: int, job_id: str):\n \"\"\"\n Stop ingestion job for id.\n \"\"\"\n # find ingestion job for id\n feast_client = Client()\n jobs = feast_client.list_ingest_jobs(job_id=job_id)\n if len(jobs) < 1:\n print(f\"Ingestion Job with id {job_id} could not be found\")\n sys.exit(1)\n job = jobs[0]\n\n feast_client.stop_ingest_job(job)\n\n # wait for ingestion job to stop\n if wait:\n job.wait(IngestionJobStatus.ABORTED, timeout=timeout)\n\n\n@ingest_job.command(\"restart\")\[email protected](\"job_id\")\ndef ingest_job_restart(job_id: str):\n \"\"\"\n Restart job for id.\n Waits for the job to fully restart.\n \"\"\"\n # find ingestion job for id\n feast_client = Client()\n jobs = feast_client.list_ingest_jobs(job_id=job_id)\n if len(jobs) < 1:\n print(f\"Ingestion Job with id {job_id} could not be found\")\n sys.exit(1)\n job = jobs[0]\n\n feast_client.restart_ingest_job(job)\n\n\[email protected]()\[email protected](\n \"--name\", \"-n\", help=\"Feature set name to ingest data into\", required=True\n)\[email protected](\n \"--filename\",\n \"-f\",\n help=\"Path to file to be ingested\",\n type=click.Path(exists=True),\n required=True,\n)\[email protected](\n \"--file-type\",\n \"-t\",\n type=click.Choice([\"CSV\"], case_sensitive=False),\n help=\"Type of file to ingest. Defaults to CSV.\",\n)\ndef ingest(name, filename, file_type):\n \"\"\"\n Ingest feature data into a feature set\n \"\"\"\n\n feast_client = Client() # type: Client\n feature_set = feast_client.get_feature_set(name=name)\n feature_set.ingest_file(file_path=filename)\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "sdk/python/feast/cli.py"}]} | 3,823 | 214 |
gh_patches_debug_18234 | rasdani/github-patches | git_diff | Pyomo__pyomo-1009 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cython support is broken.
Running `python setup.py install --with-cython` yield an error about `visitor_expr.pyx` not existing.
It turns out that the list of files-to-Cythonize in `setup.py` is not up-to-date.
</issue>
<code>
[start of setup.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 """
12 Script to generate the installer for pyomo.
13 """
14
15 import sys
16 import os
17
18
19 def _find_packages(path):
20 """
21 Generate a list of nested packages
22 """
23 pkg_list = []
24 if not os.path.exists(path):
25 return []
26 if not os.path.exists(path+os.sep+"__init__.py"):
27 return []
28 else:
29 pkg_list.append(path)
30 for root, dirs, files in os.walk(path, topdown=True):
31 if root in pkg_list and "__init__.py" in files:
32 for name in dirs:
33 if os.path.exists(root+os.sep+name+os.sep+"__init__.py"):
34 pkg_list.append(root+os.sep+name)
35 return [pkg for pkg in map(lambda x:x.replace(os.sep, "."), pkg_list)]
36
37
38 def read(*rnames):
39 return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
40
41 def get_version():
42 # Source pyomo/version/info.py to get the version number
43 _verInfo = dict(globals())
44 _verFile = os.path.join(os.path.dirname(__file__),
45 'pyomo','version','info.py')
46 with open(_verFile) as _FILE:
47 exec(_FILE.read(), _verInfo)
48 return _verInfo['__version__']
49
50 requires = [
51 'PyUtilib>=5.7.1.dev0',
52 'appdirs',
53 'ply',
54 'six>=1.4',
55 ]
56 if sys.version_info < (2, 7):
57 requires.append('argparse')
58 requires.append('unittest2')
59 requires.append('ordereddict')
60
61 from setuptools import setup
62 import sys
63
64 CYTHON_REQUIRED = "required"
65 if 'develop' in sys.argv:
66 using_cython = False
67 else:
68 using_cython = "automatic"
69 if '--with-cython' in sys.argv:
70 using_cython = CYTHON_REQUIRED
71 sys.argv.remove('--with-cython')
72 if '--without-cython' in sys.argv:
73 using_cython = False
74 sys.argv.remove('--without-cython')
75
76 ext_modules = []
77 if using_cython:
78 try:
79 import platform
80 if platform.python_implementation() != "CPython":
81 # break out of this try-except (disable Cython)
82 raise RuntimeError("Cython is only supported under CPython")
83 from Cython.Build import cythonize
84 #
85 # Note: The Cython developers recommend that you destribute C source
86 # files to users. But this is fine for evaluating the utility of Cython
87 #
88 import shutil
89 files = [
90 "pyomo/core/expr/numvalue.pyx",
91 "pyomo/core/expr/numeric_expr.pyx",
92 "pyomo/core/expr/logical_expr.pyx",
93 "pyomo/core/expr/visitor_expr.pyx",
94 "pyomo/core/util.pyx",
95 "pyomo/repn/standard_repn.pyx",
96 "pyomo/repn/plugins/cpxlp.pyx",
97 "pyomo/repn/plugins/gams_writer.pyx",
98 "pyomo/repn/plugins/baron_writer.pyx",
99 "pyomo/repn/plugins/ampl/ampl_.pyx",
100 ]
101 for f in files:
102 shutil.copyfile(f[:-1], f)
103 ext_modules = cythonize(files)
104 except:
105 if using_cython == CYTHON_REQUIRED:
106 print("""
107 ERROR: Cython was explicitly requested with --with-cython, but cythonization
108 of core Pyomo modules failed.
109 """)
110 raise
111 using_cython = False
112
113 packages = _find_packages('pyomo')
114
115 def run_setup():
116 setup(name='Pyomo',
117 #
118 # Note: the release number is set in pyomo/version/info.py
119 #
120 version=get_version(),
121 maintainer='William E. Hart',
122 maintainer_email='[email protected]',
123 url='http://pyomo.org',
124 license='BSD',
125 platforms=["any"],
126 description='Pyomo: Python Optimization Modeling Objects',
127 long_description=read('README.md'),
128 classifiers=[
129 'Development Status :: 5 - Production/Stable',
130 'Intended Audience :: End Users/Desktop',
131 'Intended Audience :: Science/Research',
132 'License :: OSI Approved :: BSD License',
133 'Natural Language :: English',
134 'Operating System :: MacOS',
135 'Operating System :: Microsoft :: Windows',
136 'Operating System :: Unix',
137 'Programming Language :: Python',
138 'Programming Language :: Python :: 2',
139 'Programming Language :: Python :: 2.7',
140 'Programming Language :: Python :: 3',
141 'Programming Language :: Python :: 3.4',
142 'Programming Language :: Python :: 3.5',
143 'Programming Language :: Python :: 3.6',
144 'Programming Language :: Python :: 3.7',
145 'Programming Language :: Python :: Implementation :: CPython',
146 'Programming Language :: Python :: Implementation :: Jython',
147 'Programming Language :: Python :: Implementation :: PyPy',
148 'Topic :: Scientific/Engineering :: Mathematics',
149 'Topic :: Software Development :: Libraries :: Python Modules' ],
150 packages=packages,
151 keywords=['optimization'],
152 install_requires=requires,
153 ext_modules = ext_modules,
154 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
155 entry_points="""
156 [console_scripts]
157 runbenders=pyomo.pysp.benders:Benders_main
158 evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main
159 runph=pyomo.pysp.phinit:PH_main
160 runef=pyomo.pysp.ef_writer_script:main
161 phsolverserver=pyomo.pysp.phsolverserver:main
162 scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main
163 computeconf=pyomo.pysp.computeconf:main
164
165 results_schema=pyomo.scripting.commands:results_schema
166 pyro_mip_server = pyomo.scripting.pyro_mip_server:main
167 test.pyomo = pyomo.scripting.runtests:runPyomoTests
168 pyomo = pyomo.scripting.pyomo_main:main_console_script
169 pyomo_ns = pyomo.scripting.commands:pyomo_ns
170 pyomo_nsc = pyomo.scripting.commands:pyomo_nsc
171 kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers
172 launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers
173 readsol = pyomo.scripting.commands:readsol
174 OSSolverService = pyomo.scripting.commands:OSSolverService
175 pyomo_python = pyomo.scripting.commands:pyomo_python
176 pyomo_old=pyomo.scripting.pyomo_command:main
177
178 [pyomo.command]
179 pyomo.runbenders=pyomo.pysp.benders
180 pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat
181 pyomo.runph=pyomo.pysp.phinit
182 pyomo.runef=pyomo.pysp.ef_writer_script
183 pyomo.phsolverserver=pyomo.pysp.phsolverserver
184 pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro
185 pyomo.computeconf=pyomo.pysp.computeconf
186
187 pyomo.help = pyomo.scripting.driver_help
188 pyomo.test.pyomo = pyomo.scripting.runtests
189 pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server
190 pyomo.results_schema=pyomo.scripting.commands
191 """
192 )
193
194 try:
195 run_setup()
196 except SystemExit as e_info:
197 # Cython can generate a SystemExit exception on Windows if the
198 # environment is missing / has an incorrect Microsoft compiler.
199 # Since Cython is not strictly required, we will disable Cython and
200 # try re-running setup(), but only for this very specific situation.
201 if 'Microsoft Visual C++' not in str(e_info):
202 raise
203 elif using_cython == CYTHON_REQUIRED:
204 print("""
205 ERROR: Cython was explicitly requested with --with-cython, but cythonization
206 of core Pyomo modules failed.
207 """)
208 raise
209 else:
210 print("""
211 ERROR: setup() failed:
212 %s
213 Re-running setup() without the Cython modules
214 """ % (e_info.message,))
215 ext_modules = []
216 run_setup()
217 print("""
218 WARNING: Installation completed successfully, but the attempt to cythonize
219 core Pyomo modules failed. Cython provides performance
220 optimizations and is not required for any Pyomo functionality.
221 Cython returned the following error:
222 "%s"
223 """ % (e_info.message,))
224
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -90,7 +90,7 @@
"pyomo/core/expr/numvalue.pyx",
"pyomo/core/expr/numeric_expr.pyx",
"pyomo/core/expr/logical_expr.pyx",
- "pyomo/core/expr/visitor_expr.pyx",
+ #"pyomo/core/expr/visitor.pyx",
"pyomo/core/util.pyx",
"pyomo/repn/standard_repn.pyx",
"pyomo/repn/plugins/cpxlp.pyx",
@@ -100,7 +100,8 @@
]
for f in files:
shutil.copyfile(f[:-1], f)
- ext_modules = cythonize(files)
+ ext_modules = cythonize(files, compiler_directives={
+ "language_level": 3 if sys.version_info >= (3, ) else 2})
except:
if using_cython == CYTHON_REQUIRED:
print("""
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -90,7 +90,7 @@\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/expr/numeric_expr.pyx\",\n \"pyomo/core/expr/logical_expr.pyx\",\n- \"pyomo/core/expr/visitor_expr.pyx\",\n+ #\"pyomo/core/expr/visitor.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n@@ -100,7 +100,8 @@\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n- ext_modules = cythonize(files)\n+ ext_modules = cythonize(files, compiler_directives={\n+ \"language_level\": 3 if sys.version_info >= (3, ) else 2})\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\n", "issue": "Cython support is broken.\nRunning `python setup.py install --with-cython` yield an error about `visitor_expr.pyx` not existing.\r\n\r\nIt turns out that the list of files-to-Cythonize in `setup.py` is not up-to-date.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nScript to generate the installer for pyomo.\n\"\"\"\n\nimport sys\nimport os\n\n\ndef _find_packages(path):\n \"\"\"\n Generate a list of nested packages\n \"\"\"\n pkg_list = []\n if not os.path.exists(path):\n return []\n if not os.path.exists(path+os.sep+\"__init__.py\"):\n return []\n else:\n pkg_list.append(path)\n for root, dirs, files in os.walk(path, topdown=True):\n if root in pkg_list and \"__init__.py\" in files:\n for name in dirs:\n if os.path.exists(root+os.sep+name+os.sep+\"__init__.py\"):\n pkg_list.append(root+os.sep+name)\n return [pkg for pkg in map(lambda x:x.replace(os.sep, \".\"), pkg_list)]\n\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\ndef get_version():\n # Source pyomo/version/info.py to get the version number\n _verInfo = dict(globals())\n _verFile = os.path.join(os.path.dirname(__file__),\n 'pyomo','version','info.py')\n with open(_verFile) as _FILE:\n exec(_FILE.read(), _verInfo)\n return _verInfo['__version__']\n\nrequires = [\n 'PyUtilib>=5.7.1.dev0',\n 'appdirs',\n 'ply',\n 'six>=1.4',\n ]\nif sys.version_info < (2, 7):\n requires.append('argparse')\n requires.append('unittest2')\n requires.append('ordereddict')\n\nfrom setuptools import setup\nimport sys\n\nCYTHON_REQUIRED = \"required\"\nif 'develop' in sys.argv:\n using_cython = False\nelse:\n using_cython = \"automatic\"\nif '--with-cython' in sys.argv:\n using_cython = CYTHON_REQUIRED\n sys.argv.remove('--with-cython')\nif '--without-cython' in sys.argv:\n using_cython = False\n sys.argv.remove('--without-cython')\n\next_modules = []\nif using_cython:\n try:\n import platform\n if platform.python_implementation() != \"CPython\":\n # break out of this try-except (disable Cython)\n raise RuntimeError(\"Cython is only supported under CPython\")\n from Cython.Build import cythonize\n #\n # Note: The Cython developers recommend that you destribute C source\n # files to users. But this is fine for evaluating the utility of Cython\n #\n import shutil\n files = [\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/expr/numeric_expr.pyx\",\n \"pyomo/core/expr/logical_expr.pyx\",\n \"pyomo/core/expr/visitor_expr.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n \"pyomo/repn/plugins/gams_writer.pyx\",\n \"pyomo/repn/plugins/baron_writer.pyx\",\n \"pyomo/repn/plugins/ampl/ampl_.pyx\",\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n ext_modules = cythonize(files)\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n using_cython = False\n\npackages = _find_packages('pyomo')\n\ndef run_setup():\n setup(name='Pyomo',\n #\n # Note: the release number is set in pyomo/version/info.py\n #\n version=get_version(),\n maintainer='William E. Hart',\n maintainer_email='[email protected]',\n url='http://pyomo.org',\n license='BSD',\n platforms=[\"any\"],\n description='Pyomo: Python Optimization Modeling Objects',\n long_description=read('README.md'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n packages=packages,\n keywords=['optimization'],\n install_requires=requires,\n ext_modules = ext_modules,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points=\"\"\"\n [console_scripts]\n runbenders=pyomo.pysp.benders:Benders_main\n evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main\n runph=pyomo.pysp.phinit:PH_main\n runef=pyomo.pysp.ef_writer_script:main\n phsolverserver=pyomo.pysp.phsolverserver:main\n scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main\n computeconf=pyomo.pysp.computeconf:main\n\n results_schema=pyomo.scripting.commands:results_schema\n pyro_mip_server = pyomo.scripting.pyro_mip_server:main\n test.pyomo = pyomo.scripting.runtests:runPyomoTests\n pyomo = pyomo.scripting.pyomo_main:main_console_script\n pyomo_ns = pyomo.scripting.commands:pyomo_ns\n pyomo_nsc = pyomo.scripting.commands:pyomo_nsc\n kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers\n launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers\n readsol = pyomo.scripting.commands:readsol\n OSSolverService = pyomo.scripting.commands:OSSolverService\n pyomo_python = pyomo.scripting.commands:pyomo_python\n pyomo_old=pyomo.scripting.pyomo_command:main\n\n [pyomo.command]\n pyomo.runbenders=pyomo.pysp.benders\n pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat\n pyomo.runph=pyomo.pysp.phinit\n pyomo.runef=pyomo.pysp.ef_writer_script\n pyomo.phsolverserver=pyomo.pysp.phsolverserver\n pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro\n pyomo.computeconf=pyomo.pysp.computeconf\n\n pyomo.help = pyomo.scripting.driver_help\n pyomo.test.pyomo = pyomo.scripting.runtests\n pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server\n pyomo.results_schema=pyomo.scripting.commands\n \"\"\"\n )\n\ntry:\n run_setup()\nexcept SystemExit as e_info:\n # Cython can generate a SystemExit exception on Windows if the\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n if 'Microsoft Visual C++' not in str(e_info):\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n else:\n print(\"\"\"\nERROR: setup() failed:\n %s\nRe-running setup() without the Cython modules\n\"\"\" % (e_info.message,))\n ext_modules = []\n run_setup()\n print(\"\"\"\nWARNING: Installation completed successfully, but the attempt to cythonize\n core Pyomo modules failed. Cython provides performance\n optimizations and is not required for any Pyomo functionality.\n Cython returned the following error:\n \"%s\"\n\"\"\" % (e_info.message,))\n", "path": "setup.py"}]} | 3,154 | 228 |
gh_patches_debug_23975 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-449 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EHN: let pipeline support return `Series`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [x] closes #380
- [x] whatsnew entry
The transformer is in charge of the specific transforming method.
The pipeline is in charge of connecting results from each transformer.
I don't want to rewrite every transformer, let them support dataframe or series data structure.
So pipeline should be rebuilt. If the inputting of a transformer from the last transformer is dataframe or series, its outputting should be dataframe or series too.
</issue>
<code>
[start of dtoolkit/transformer/sklearn.py]
1 from __future__ import annotations
2
3 from textwrap import dedent
4 from typing import TYPE_CHECKING
5
6 import numpy as np
7 import pandas as pd
8 from pandas.util._decorators import doc
9 from sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler
10 from sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder
11
12 from dtoolkit.accessor.dataframe import cols # noqa
13 from dtoolkit.accessor.series import cols # noqa
14 from dtoolkit.transformer._util import transform_array_to_frame
15 from dtoolkit.transformer._util import transform_frame_to_series
16 from dtoolkit.transformer._util import transform_series_to_frame
17
18 if TYPE_CHECKING:
19 from scipy.sparse import csr_matrix
20
21 from dtoolkit._typing import SeriesOrFrame
22 from dtoolkit._typing import TwoDimArray
23
24
25 class MinMaxScaler(SKMinMaxScaler):
26 """
27 Transform features by scaling each feature to a given range.
28
29 The transformation is given by::
30
31 X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
32 X_scaled = X_std * (max - min) + min
33
34 where :exc:`min, max = feature_range`.
35
36 Examples
37 --------
38 >>> from dtoolkit.transformer import MinMaxScaler
39 >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
40 >>> scaler = MinMaxScaler()
41 >>> scaler.fit(data)
42 MinMaxScaler()
43 >>> scaler.data_max_
44 array([ 1., 18.])
45 >>> scaler.transform(data)
46 array([[0. , 0. ],
47 [0.25, 0.25],
48 [0.5 , 0.5 ],
49 [1. , 1. ]])
50 >>> scaler.transform([[2, 2]])
51 array([[1.5, 0. ]])
52
53 Notes
54 -----
55 Different to :obj:`sklearn.preprocessing.MinMaxScaler`.
56 This would let :obj:`~pandas.DataFrame` in and
57 :obj:`~pandas.DataFrame` out.
58 """
59
60 @doc(SKMinMaxScaler.fit)
61 def fit(self, X, y=None):
62 X = transform_series_to_frame(X)
63
64 return super().fit(X, y)
65
66 def transform(self, X: TwoDimArray) -> TwoDimArray:
67 """
68 Scale features of X according to feature_range.
69
70 Parameters
71 ----------
72 X : DataFrame or array-like of shape `(n_samples, n_features)`
73 Input data that will be transformed.
74
75 Returns
76 -------
77 DataFrame or ndarray of shape `(n_samples, n_features)`
78 Transformed data.
79
80 Notes
81 -----
82 This would let :obj:`~pandas.DataFrame` in and
83 :obj:`~pandas.DataFrame` out.
84 """
85
86 X = transform_series_to_frame(X)
87 Xt = super().transform(X)
88 Xt = transform_array_to_frame(Xt, X)
89
90 return transform_frame_to_series(Xt)
91
92 def inverse_transform(self, X: SeriesOrFrame | np.ndarray) -> TwoDimArray:
93 """
94 Undo the scaling of X according to feature_range.
95
96 Parameters
97 ----------
98 X : Series, DataFrame or array-like of shape `(n_samples, n_features)`
99 Input data that will be transformed. It cannot be sparse.
100
101 Returns
102 -------
103 DataFrame or ndarray of shape (n_samples, n_features)
104 Transformed data.
105
106 Notes
107 -----
108 This would let :obj:`~pandas.DataFrame` in and
109 :obj:`~pandas.DataFrame` out.
110 """
111
112 X = transform_series_to_frame(X)
113 Xt = super().inverse_transform(X)
114 Xt = transform_array_to_frame(Xt, X)
115
116 return transform_frame_to_series(Xt)
117
118
119 class OneHotEncoder(SKOneHotEncoder):
120 """
121 Encode categorical features as a one-hot numeric array.
122
123 Parameters
124 ----------
125 categories_with_parent : bool, default False
126 Returned column would hook parent labels if ``True`` else
127 would be ``categories``.
128
129 sparse : bool, default False
130 Will return sparse matrix if ``True`` else will return an array.
131
132 kwargs
133 See :obj:`sklearn.preprocessing.OneHotEncoder`.
134
135 Notes
136 -----
137 Different to :obj:`sklearn.preprocessing.OneHotEncoder`.
138 The result would return a :obj:`~pandas.DataFrame` which uses categories
139 as columns.
140
141 Examples
142 --------
143 Given a dataset with two features, we let the encoder find the unique
144 values per feature and transform the data to a binary one-hot encoding.
145
146 :obj:`~pandas.DataFrame` in, :obj:`~pandas.DataFrame` out with categories
147 as columns.
148
149 >>> from dtoolkit.transformer import OneHotEncoder
150 >>> import pandas as pd
151 >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
152 >>> df = pd.DataFrame(X, columns=['gender', 'number'])
153 >>> df
154 gender number
155 0 Male 1
156 1 Female 3
157 2 Female 2
158 >>> enc = OneHotEncoder()
159 >>> enc.fit_transform(df)
160 Female Male 1 2 3
161 0 0.0 1.0 1.0 0.0 0.0
162 1 1.0 0.0 0.0 0.0 1.0
163 2 1.0 0.0 0.0 1.0 0.0
164
165 The encoded data also could hook parent labels.
166
167 >>> enc = OneHotEncoder(categories_with_parent=True)
168 >>> enc.fit_transform(df)
169 gender_Female gender_Male number_1 number_2 number_3
170 0 0.0 1.0 1.0 0.0 0.0
171 1 1.0 0.0 0.0 0.0 1.0
172 2 1.0 0.0 0.0 1.0 0.0
173 """
174
175 @doc(SKOneHotEncoder.__init__)
176 def __init__(
177 self,
178 categories_with_parent: bool = False,
179 sparse: bool = False,
180 **kwargs,
181 ):
182 super().__init__(sparse=sparse, **kwargs)
183 self.categories_with_parent = categories_with_parent
184
185 @doc(
186 SKOneHotEncoder.transform,
187 dedent(
188 """
189 Notes
190 -----
191 This would let :obj:`~pandas.DataFrame` out.
192 """,
193 ),
194 )
195 def transform(self, X: TwoDimArray) -> TwoDimArray | csr_matrix:
196 from itertools import chain
197
198 Xt = super().transform(X)
199
200 if self.sparse is False:
201 categories = (
202 self.get_feature_names_out(X.cols())
203 if self.categories_with_parent
204 else chain.from_iterable(self.categories_)
205 )
206
207 return pd.DataFrame(Xt, columns=categories)
208
209 return Xt
210
[end of dtoolkit/transformer/sklearn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dtoolkit/transformer/sklearn.py b/dtoolkit/transformer/sklearn.py
--- a/dtoolkit/transformer/sklearn.py
+++ b/dtoolkit/transformer/sklearn.py
@@ -26,6 +26,12 @@
"""
Transform features by scaling each feature to a given range.
+ .. warning::
+ Transformer :class:`dtoolkit.transformer.MinMaxScaler` is deprecated and
+ will be removed in 0.0.13.
+ Please use :class:`sklearn.preprocessing.MinMaxScaler` instead.
+ (Warning added DToolKit 0.0.12)
+
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
@@ -57,6 +63,24 @@
:obj:`~pandas.DataFrame` out.
"""
+ @doc(SKMinMaxScaler.__init__)
+ def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):
+ from warnings import warn
+
+ warn(
+ "Transformer 'dtoolkit.transformer.MinMaxScaler' is deprecated and "
+ "will be removed in 0.0.13. "
+ "Please use 'sklearn.preprocessing.MinMaxScaler' instead. "
+ "(Warning added DToolKit 0.0.12)",
+ DeprecationWarning,
+ )
+
+ super().__init__(
+ feature_range=feature_range,
+ copy=copy,
+ clip=clip,
+ )
+
@doc(SKMinMaxScaler.fit)
def fit(self, X, y=None):
X = transform_series_to_frame(X)
| {"golden_diff": "diff --git a/dtoolkit/transformer/sklearn.py b/dtoolkit/transformer/sklearn.py\n--- a/dtoolkit/transformer/sklearn.py\n+++ b/dtoolkit/transformer/sklearn.py\n@@ -26,6 +26,12 @@\n \"\"\"\n Transform features by scaling each feature to a given range.\n \n+ .. warning::\n+ Transformer :class:`dtoolkit.transformer.MinMaxScaler` is deprecated and\n+ will be removed in 0.0.13.\n+ Please use :class:`sklearn.preprocessing.MinMaxScaler` instead.\n+ (Warning added DToolKit 0.0.12)\n+\n The transformation is given by::\n \n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n@@ -57,6 +63,24 @@\n :obj:`~pandas.DataFrame` out.\n \"\"\"\n \n+ @doc(SKMinMaxScaler.__init__)\n+ def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):\n+ from warnings import warn\n+\n+ warn(\n+ \"Transformer 'dtoolkit.transformer.MinMaxScaler' is deprecated and \"\n+ \"will be removed in 0.0.13. \"\n+ \"Please use 'sklearn.preprocessing.MinMaxScaler' instead. \"\n+ \"(Warning added DToolKit 0.0.12)\",\n+ DeprecationWarning,\n+ )\n+\n+ super().__init__(\n+ feature_range=feature_range,\n+ copy=copy,\n+ clip=clip,\n+ )\n+\n @doc(SKMinMaxScaler.fit)\n def fit(self, X, y=None):\n X = transform_series_to_frame(X)\n", "issue": "EHN: let pipeline support return `Series`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #380\r\n- [x] whatsnew entry\r\n\r\nThe transformer is in charge of the specific transforming method.\r\nThe pipeline is in charge of connecting results from each transformer.\r\n\r\nI don't want to rewrite every transformer, let them support dataframe or series data structure. \r\nSo pipeline should be rebuilt. If the inputting of a transformer from the last transformer is dataframe or series, its outputting should be dataframe or series too.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util._decorators import doc\nfrom sklearn.preprocessing import MinMaxScaler as SKMinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder as SKOneHotEncoder\n\nfrom dtoolkit.accessor.dataframe import cols # noqa\nfrom dtoolkit.accessor.series import cols # noqa\nfrom dtoolkit.transformer._util import transform_array_to_frame\nfrom dtoolkit.transformer._util import transform_frame_to_series\nfrom dtoolkit.transformer._util import transform_series_to_frame\n\nif TYPE_CHECKING:\n from scipy.sparse import csr_matrix\n\n from dtoolkit._typing import SeriesOrFrame\n from dtoolkit._typing import TwoDimArray\n\n\nclass MinMaxScaler(SKMinMaxScaler):\n \"\"\"\n Transform features by scaling each feature to a given range.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where :exc:`min, max = feature_range`.\n\n Examples\n --------\n >>> from dtoolkit.transformer import MinMaxScaler\n >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]\n >>> scaler = MinMaxScaler()\n >>> scaler.fit(data)\n MinMaxScaler()\n >>> scaler.data_max_\n array([ 1., 18.])\n >>> scaler.transform(data)\n array([[0. , 0. ],\n [0.25, 0.25],\n [0.5 , 0.5 ],\n [1. , 1. ]])\n >>> scaler.transform([[2, 2]])\n array([[1.5, 0. ]])\n\n Notes\n -----\n Different to :obj:`sklearn.preprocessing.MinMaxScaler`.\n This would let :obj:`~pandas.DataFrame` in and\n :obj:`~pandas.DataFrame` out.\n \"\"\"\n\n @doc(SKMinMaxScaler.fit)\n def fit(self, X, y=None):\n X = transform_series_to_frame(X)\n\n return super().fit(X, y)\n\n def transform(self, X: TwoDimArray) -> TwoDimArray:\n \"\"\"\n Scale features of X according to feature_range.\n\n Parameters\n ----------\n X : DataFrame or array-like of shape `(n_samples, n_features)`\n Input data that will be transformed.\n\n Returns\n -------\n DataFrame or ndarray of shape `(n_samples, n_features)`\n Transformed data.\n\n Notes\n -----\n This would let :obj:`~pandas.DataFrame` in and\n :obj:`~pandas.DataFrame` out.\n \"\"\"\n\n X = transform_series_to_frame(X)\n Xt = super().transform(X)\n Xt = transform_array_to_frame(Xt, X)\n\n return transform_frame_to_series(Xt)\n\n def inverse_transform(self, X: SeriesOrFrame | np.ndarray) -> TwoDimArray:\n \"\"\"\n Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : Series, DataFrame or array-like of shape `(n_samples, n_features)`\n Input data that will be transformed. It cannot be sparse.\n\n Returns\n -------\n DataFrame or ndarray of shape (n_samples, n_features)\n Transformed data.\n\n Notes\n -----\n This would let :obj:`~pandas.DataFrame` in and\n :obj:`~pandas.DataFrame` out.\n \"\"\"\n\n X = transform_series_to_frame(X)\n Xt = super().inverse_transform(X)\n Xt = transform_array_to_frame(Xt, X)\n\n return transform_frame_to_series(Xt)\n\n\nclass OneHotEncoder(SKOneHotEncoder):\n \"\"\"\n Encode categorical features as a one-hot numeric array.\n\n Parameters\n ----------\n categories_with_parent : bool, default False\n Returned column would hook parent labels if ``True`` else\n would be ``categories``.\n\n sparse : bool, default False\n Will return sparse matrix if ``True`` else will return an array.\n\n kwargs\n See :obj:`sklearn.preprocessing.OneHotEncoder`.\n\n Notes\n -----\n Different to :obj:`sklearn.preprocessing.OneHotEncoder`.\n The result would return a :obj:`~pandas.DataFrame` which uses categories\n as columns.\n\n Examples\n --------\n Given a dataset with two features, we let the encoder find the unique\n values per feature and transform the data to a binary one-hot encoding.\n\n :obj:`~pandas.DataFrame` in, :obj:`~pandas.DataFrame` out with categories\n as columns.\n\n >>> from dtoolkit.transformer import OneHotEncoder\n >>> import pandas as pd\n >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]\n >>> df = pd.DataFrame(X, columns=['gender', 'number'])\n >>> df\n gender number\n 0 Male 1\n 1 Female 3\n 2 Female 2\n >>> enc = OneHotEncoder()\n >>> enc.fit_transform(df)\n Female Male 1 2 3\n 0 0.0 1.0 1.0 0.0 0.0\n 1 1.0 0.0 0.0 0.0 1.0\n 2 1.0 0.0 0.0 1.0 0.0\n\n The encoded data also could hook parent labels.\n\n >>> enc = OneHotEncoder(categories_with_parent=True)\n >>> enc.fit_transform(df)\n gender_Female gender_Male number_1 number_2 number_3\n 0 0.0 1.0 1.0 0.0 0.0\n 1 1.0 0.0 0.0 0.0 1.0\n 2 1.0 0.0 0.0 1.0 0.0\n \"\"\"\n\n @doc(SKOneHotEncoder.__init__)\n def __init__(\n self,\n categories_with_parent: bool = False,\n sparse: bool = False,\n **kwargs,\n ):\n super().__init__(sparse=sparse, **kwargs)\n self.categories_with_parent = categories_with_parent\n\n @doc(\n SKOneHotEncoder.transform,\n dedent(\n \"\"\"\n Notes\n -----\n This would let :obj:`~pandas.DataFrame` out.\n \"\"\",\n ),\n )\n def transform(self, X: TwoDimArray) -> TwoDimArray | csr_matrix:\n from itertools import chain\n\n Xt = super().transform(X)\n\n if self.sparse is False:\n categories = (\n self.get_feature_names_out(X.cols())\n if self.categories_with_parent\n else chain.from_iterable(self.categories_)\n )\n\n return pd.DataFrame(Xt, columns=categories)\n\n return Xt\n", "path": "dtoolkit/transformer/sklearn.py"}]} | 2,917 | 392 |
gh_patches_debug_30528 | rasdani/github-patches | git_diff | pulp__pulpcore-1062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
File System Exporter should have an option to generate listing files
**Is your feature request related to a problem? Please describe.**
When Katello uses the File System exporter it does not generate the listing files in CDN directory structure. Katello does not directly have access to the pulp's content directories. Due to this it is not able to easily generate `Listing` files. It relies right now on the hammer export command being run from the same machine as pulp.
**Describe the solution you'd like**
Pulp exporter should ideally have an option or a flag to generate `listing` files when fs exporter is run.
**Describe alternatives you've considered**
Couple of things to consider here
- This is only relevant to YUM/KS repos
- This probably only useful for certain Yum repos. But a flag in the exporter would take care of that.
</issue>
<code>
[start of pulpcore/app/serializers/content.py]
1 import hashlib
2 from gettext import gettext as _
3
4 from django.db import transaction
5 from rest_framework import serializers
6 from rest_framework.validators import UniqueValidator
7
8 from pulpcore.app import models
9 from pulpcore.app.serializers import base, fields
10
11
12 class BaseContentSerializer(base.ModelSerializer):
13 pulp_href = base.DetailIdentityField(view_name_pattern=r"contents(-.*/.*)-detail")
14
15 class Meta:
16 model = models.Content
17 fields = base.ModelSerializer.Meta.fields
18
19
20 class NoArtifactContentSerializer(BaseContentSerializer):
21 class Meta:
22 model = models.Content
23 fields = BaseContentSerializer.Meta.fields
24
25
26 class SingleArtifactContentSerializer(BaseContentSerializer):
27 artifact = fields.SingleContentArtifactField(
28 help_text=_("Artifact file representing the physical content"),
29 )
30
31 relative_path = serializers.CharField(
32 help_text=_("Path where the artifact is located relative to distributions base_path"),
33 validators=[fields.relative_path_validator],
34 write_only=True,
35 )
36
37 def __init__(self, *args, **kwargs):
38 """
39 Initializer for SingleArtifactContentSerializer
40 """
41 super().__init__(*args, **kwargs)
42
43 # If the content model has its own database field 'relative_path',
44 # we should not mark the field write_only
45 if hasattr(self.Meta.model, "relative_path") and "relative_path" in self.fields:
46 self.fields["relative_path"].write_only = False
47
48 @transaction.atomic
49 def create(self, validated_data):
50 """
51 Create the content and associate it with its Artifact.
52
53 Args:
54 validated_data (dict): Data to save to the database
55 """
56 artifact = validated_data.pop("artifact")
57 if "relative_path" not in self.fields or self.fields["relative_path"].write_only:
58 relative_path = validated_data.pop("relative_path")
59 else:
60 relative_path = validated_data.get("relative_path")
61 content = self.Meta.model.objects.create(**validated_data)
62 models.ContentArtifact.objects.create(
63 artifact=artifact, content=content, relative_path=relative_path
64 )
65 return content
66
67 class Meta:
68 model = models.Content
69 fields = BaseContentSerializer.Meta.fields + ("artifact", "relative_path")
70
71
72 class MultipleArtifactContentSerializer(BaseContentSerializer):
73 artifacts = fields.ContentArtifactsField(
74 help_text=_(
75 "A dict mapping relative paths inside the Content to the corresponding"
76 "Artifact URLs. E.g.: {'relative/path': "
77 "'/artifacts/1/'"
78 ),
79 )
80
81 @transaction.atomic
82 def create(self, validated_data):
83 """
84 Create the content and associate it with all its Artifacts.
85
86 Args:
87 validated_data (dict): Data to save to the database
88 """
89 artifacts = validated_data.pop("artifacts")
90 content = self.Meta.model.objects.create(**validated_data)
91 for relative_path, artifact in artifacts.items():
92 models.ContentArtifact.objects.create(
93 artifact=artifact, content=content, relative_path=relative_path
94 )
95 return content
96
97 class Meta:
98 model = models.Content
99 fields = BaseContentSerializer.Meta.fields + ("artifacts",)
100
101
102 class ContentChecksumSerializer(serializers.Serializer):
103 """
104 Provide a serializer with artifact checksum fields for single artifact content.
105
106 If you use this serializer, it's recommended that you prefetch artifacts:
107
108 Content.objects.prefetch_related("_artifacts").all()
109 """
110
111 md5 = fields.ContentArtifactChecksumField(
112 help_text=_("The MD5 checksum if available."),
113 checksum="md5",
114 )
115
116 sha1 = fields.ContentArtifactChecksumField(
117 help_text=_("The SHA-1 checksum if available."),
118 checksum="sha1",
119 )
120
121 sha224 = fields.ContentArtifactChecksumField(
122 help_text=_("The SHA-224 checksum if available."),
123 checksum="sha224",
124 )
125
126 sha256 = fields.ContentArtifactChecksumField(
127 help_text=_("The SHA-256 checksum if available."),
128 checksum="sha256",
129 )
130
131 sha384 = fields.ContentArtifactChecksumField(
132 help_text=_("The SHA-384 checksum if available."),
133 checksum="sha384",
134 )
135
136 sha512 = fields.ContentArtifactChecksumField(
137 help_text=_("The SHA-512 checksum if available."),
138 checksum="sha512",
139 )
140
141 class Meta:
142 model = models.Content
143 fields = base.ModelSerializer.Meta.fields + (
144 "md5",
145 "sha1",
146 "sha224",
147 "sha256",
148 "sha384",
149 "sha512",
150 )
151
152
153 class ArtifactSerializer(base.ModelSerializer):
154 pulp_href = base.IdentityField(view_name="artifacts-detail")
155
156 file = serializers.FileField(help_text=_("The stored file."), allow_empty_file=True)
157
158 size = serializers.IntegerField(help_text=_("The size of the file in bytes."), required=False)
159
160 md5 = serializers.CharField(
161 help_text=_("The MD5 checksum of the file if available."), required=False, allow_null=True
162 )
163
164 sha1 = serializers.CharField(
165 help_text=_("The SHA-1 checksum of the file if available."),
166 required=False,
167 allow_null=True,
168 )
169
170 sha224 = serializers.CharField(
171 help_text=_("The SHA-224 checksum of the file if available."),
172 required=False,
173 allow_null=True,
174 )
175
176 sha256 = serializers.CharField(
177 help_text=_("The SHA-256 checksum of the file if available."),
178 required=False,
179 allow_null=True,
180 )
181
182 sha384 = serializers.CharField(
183 help_text=_("The SHA-384 checksum of the file if available."),
184 required=False,
185 allow_null=True,
186 )
187
188 sha512 = serializers.CharField(
189 help_text=_("The SHA-512 checksum of the file if available."),
190 required=False,
191 allow_null=True,
192 )
193
194 def validate(self, data):
195 """
196 Validate file by size and by all checksums provided.
197
198 Args:
199 data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their
200 values
201
202 Raises:
203 :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any
204 of the checksums don't match their actual values.
205 """
206 super().validate(data)
207 if "size" in data:
208 if data["file"].size != int(data["size"]):
209 raise serializers.ValidationError(_("The size did not match actual size of file."))
210 else:
211 data["size"] = data["file"].size
212
213 bad_algs = []
214 for algorithm in models.Artifact.FORBIDDEN_DIGESTS:
215 if algorithm in data:
216 bad_algs.append(algorithm)
217 if bad_algs:
218 raise serializers.ValidationError(
219 _(f"Checksum algorithms {bad_algs} forbidden for this Pulp instance.")
220 )
221
222 for algorithm in hashlib.algorithms_guaranteed:
223 if algorithm in models.Artifact.DIGEST_FIELDS:
224 digest = data["file"].hashers[algorithm].hexdigest()
225
226 if algorithm in data and digest != data[algorithm]:
227 raise serializers.ValidationError(
228 _("The %s checksum did not match.") % algorithm
229 )
230 else:
231 data[algorithm] = digest
232 if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:
233 validator = UniqueValidator(
234 models.Artifact.objects.all(),
235 message=_("{0} checksum must be unique.").format(algorithm),
236 )
237 validator.instance = None
238
239 validator(digest, self.fields[algorithm])
240 return data
241
242 class Meta:
243 model = models.Artifact
244 fields = base.ModelSerializer.Meta.fields + (
245 "file",
246 "size",
247 "md5",
248 "sha1",
249 "sha224",
250 "sha256",
251 "sha384",
252 "sha512",
253 )
254
255
256 class SigningServiceSerializer(base.ModelSerializer):
257 """
258 A serializer for the model declaring a signing service.
259 """
260
261 pulp_href = base.IdentityField(view_name="signing-services-detail")
262 name = serializers.CharField(help_text=_("A unique name used to recognize a script."))
263 script = serializers.CharField(
264 help_text=_("An absolute path to a script which is going to be used for the signing.")
265 )
266
267 class Meta:
268 model = models.SigningService
269 fields = BaseContentSerializer.Meta.fields + ("name", "script")
270
[end of pulpcore/app/serializers/content.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -1,4 +1,3 @@
-import hashlib
from gettext import gettext as _
from django.db import transaction
@@ -219,24 +218,24 @@
_(f"Checksum algorithms {bad_algs} forbidden for this Pulp instance.")
)
- for algorithm in hashlib.algorithms_guaranteed:
- if algorithm in models.Artifact.DIGEST_FIELDS:
- digest = data["file"].hashers[algorithm].hexdigest()
-
- if algorithm in data and digest != data[algorithm]:
- raise serializers.ValidationError(
- _("The %s checksum did not match.") % algorithm
- )
- else:
- data[algorithm] = digest
- if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:
- validator = UniqueValidator(
- models.Artifact.objects.all(),
- message=_("{0} checksum must be unique.").format(algorithm),
- )
- validator.instance = None
-
- validator(digest, self.fields[algorithm])
+ for algorithm in reversed(models.Artifact.DIGEST_FIELDS):
+ digest = data["file"].hashers[algorithm].hexdigest()
+
+ if algorithm in data and digest != data[algorithm]:
+ raise serializers.ValidationError(_("The %s checksum did not match.") % algorithm)
+ else:
+ data[algorithm] = digest
+
+ if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:
+ validator = UniqueValidator(
+ models.Artifact.objects.all(),
+ message=_("Artifact with {0} checksum of '{1}' already exists.").format(
+ algorithm, digest
+ ),
+ )
+ validator.instance = None
+ validator(digest, self.fields[algorithm])
+
return data
class Meta:
| {"golden_diff": "diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py\n--- a/pulpcore/app/serializers/content.py\n+++ b/pulpcore/app/serializers/content.py\n@@ -1,4 +1,3 @@\n-import hashlib\n from gettext import gettext as _\n \n from django.db import transaction\n@@ -219,24 +218,24 @@\n _(f\"Checksum algorithms {bad_algs} forbidden for this Pulp instance.\")\n )\n \n- for algorithm in hashlib.algorithms_guaranteed:\n- if algorithm in models.Artifact.DIGEST_FIELDS:\n- digest = data[\"file\"].hashers[algorithm].hexdigest()\n-\n- if algorithm in data and digest != data[algorithm]:\n- raise serializers.ValidationError(\n- _(\"The %s checksum did not match.\") % algorithm\n- )\n- else:\n- data[algorithm] = digest\n- if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:\n- validator = UniqueValidator(\n- models.Artifact.objects.all(),\n- message=_(\"{0} checksum must be unique.\").format(algorithm),\n- )\n- validator.instance = None\n-\n- validator(digest, self.fields[algorithm])\n+ for algorithm in reversed(models.Artifact.DIGEST_FIELDS):\n+ digest = data[\"file\"].hashers[algorithm].hexdigest()\n+\n+ if algorithm in data and digest != data[algorithm]:\n+ raise serializers.ValidationError(_(\"The %s checksum did not match.\") % algorithm)\n+ else:\n+ data[algorithm] = digest\n+\n+ if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:\n+ validator = UniqueValidator(\n+ models.Artifact.objects.all(),\n+ message=_(\"Artifact with {0} checksum of '{1}' already exists.\").format(\n+ algorithm, digest\n+ ),\n+ )\n+ validator.instance = None\n+ validator(digest, self.fields[algorithm])\n+\n return data\n \n class Meta:\n", "issue": "File System Exporter should have an option to generate listing files\n**Is your feature request related to a problem? Please describe.**\r\nWhen Katello uses the File System exporter it does not generate the listing files in CDN directory structure. Katello does not directly have access to the pulp's content directories. Due to this it is not able to easily generate `Listing` files. It relies right now on the hammer export command being run from the same machine as pulp.\r\n\r\n**Describe the solution you'd like**\r\nPulp exporter should ideally have an option or a flag to generate `listing` files when fs exporter is run.\r\n\r\n**Describe alternatives you've considered**\r\nCouple of things to consider here \r\n- This is only relevant to YUM/KS repos\r\n- This probably only useful for certain Yum repos. But a flag in the exporter would take care of that.\r\n\r\n\n", "before_files": [{"content": "import hashlib\nfrom gettext import gettext as _\n\nfrom django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\n\nfrom pulpcore.app import models\nfrom pulpcore.app.serializers import base, fields\n\n\nclass BaseContentSerializer(base.ModelSerializer):\n pulp_href = base.DetailIdentityField(view_name_pattern=r\"contents(-.*/.*)-detail\")\n\n class Meta:\n model = models.Content\n fields = base.ModelSerializer.Meta.fields\n\n\nclass NoArtifactContentSerializer(BaseContentSerializer):\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields\n\n\nclass SingleArtifactContentSerializer(BaseContentSerializer):\n artifact = fields.SingleContentArtifactField(\n help_text=_(\"Artifact file representing the physical content\"),\n )\n\n relative_path = serializers.CharField(\n help_text=_(\"Path where the artifact is located relative to distributions base_path\"),\n validators=[fields.relative_path_validator],\n write_only=True,\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializer for SingleArtifactContentSerializer\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # If the content model has its own database field 'relative_path',\n # we should not mark the field write_only\n if hasattr(self.Meta.model, \"relative_path\") and \"relative_path\" in self.fields:\n self.fields[\"relative_path\"].write_only = False\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with its Artifact.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = validated_data.pop(\"artifact\")\n if \"relative_path\" not in self.fields or self.fields[\"relative_path\"].write_only:\n relative_path = validated_data.pop(\"relative_path\")\n else:\n relative_path = validated_data.get(\"relative_path\")\n content = self.Meta.model.objects.create(**validated_data)\n models.ContentArtifact.objects.create(\n artifact=artifact, content=content, relative_path=relative_path\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + (\"artifact\", \"relative_path\")\n\n\nclass MultipleArtifactContentSerializer(BaseContentSerializer):\n artifacts = fields.ContentArtifactsField(\n help_text=_(\n \"A dict mapping relative paths inside the Content to the corresponding\"\n \"Artifact URLs. E.g.: {'relative/path': \"\n \"'/artifacts/1/'\"\n ),\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with all its Artifacts.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifacts = validated_data.pop(\"artifacts\")\n content = self.Meta.model.objects.create(**validated_data)\n for relative_path, artifact in artifacts.items():\n models.ContentArtifact.objects.create(\n artifact=artifact, content=content, relative_path=relative_path\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + (\"artifacts\",)\n\n\nclass ContentChecksumSerializer(serializers.Serializer):\n \"\"\"\n Provide a serializer with artifact checksum fields for single artifact content.\n\n If you use this serializer, it's recommended that you prefetch artifacts:\n\n Content.objects.prefetch_related(\"_artifacts\").all()\n \"\"\"\n\n md5 = fields.ContentArtifactChecksumField(\n help_text=_(\"The MD5 checksum if available.\"),\n checksum=\"md5\",\n )\n\n sha1 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-1 checksum if available.\"),\n checksum=\"sha1\",\n )\n\n sha224 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-224 checksum if available.\"),\n checksum=\"sha224\",\n )\n\n sha256 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-256 checksum if available.\"),\n checksum=\"sha256\",\n )\n\n sha384 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-384 checksum if available.\"),\n checksum=\"sha384\",\n )\n\n sha512 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-512 checksum if available.\"),\n checksum=\"sha512\",\n )\n\n class Meta:\n model = models.Content\n fields = base.ModelSerializer.Meta.fields + (\n \"md5\",\n \"sha1\",\n \"sha224\",\n \"sha256\",\n \"sha384\",\n \"sha512\",\n )\n\n\nclass ArtifactSerializer(base.ModelSerializer):\n pulp_href = base.IdentityField(view_name=\"artifacts-detail\")\n\n file = serializers.FileField(help_text=_(\"The stored file.\"), allow_empty_file=True)\n\n size = serializers.IntegerField(help_text=_(\"The size of the file in bytes.\"), required=False)\n\n md5 = serializers.CharField(\n help_text=_(\"The MD5 checksum of the file if available.\"), required=False, allow_null=True\n )\n\n sha1 = serializers.CharField(\n help_text=_(\"The SHA-1 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha224 = serializers.CharField(\n help_text=_(\"The SHA-224 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha256 = serializers.CharField(\n help_text=_(\"The SHA-256 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha384 = serializers.CharField(\n help_text=_(\"The SHA-384 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha512 = serializers.CharField(\n help_text=_(\"The SHA-512 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n def validate(self, data):\n \"\"\"\n Validate file by size and by all checksums provided.\n\n Args:\n data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their\n values\n\n Raises:\n :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any\n of the checksums don't match their actual values.\n \"\"\"\n super().validate(data)\n if \"size\" in data:\n if data[\"file\"].size != int(data[\"size\"]):\n raise serializers.ValidationError(_(\"The size did not match actual size of file.\"))\n else:\n data[\"size\"] = data[\"file\"].size\n\n bad_algs = []\n for algorithm in models.Artifact.FORBIDDEN_DIGESTS:\n if algorithm in data:\n bad_algs.append(algorithm)\n if bad_algs:\n raise serializers.ValidationError(\n _(f\"Checksum algorithms {bad_algs} forbidden for this Pulp instance.\")\n )\n\n for algorithm in hashlib.algorithms_guaranteed:\n if algorithm in models.Artifact.DIGEST_FIELDS:\n digest = data[\"file\"].hashers[algorithm].hexdigest()\n\n if algorithm in data and digest != data[algorithm]:\n raise serializers.ValidationError(\n _(\"The %s checksum did not match.\") % algorithm\n )\n else:\n data[algorithm] = digest\n if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:\n validator = UniqueValidator(\n models.Artifact.objects.all(),\n message=_(\"{0} checksum must be unique.\").format(algorithm),\n )\n validator.instance = None\n\n validator(digest, self.fields[algorithm])\n return data\n\n class Meta:\n model = models.Artifact\n fields = base.ModelSerializer.Meta.fields + (\n \"file\",\n \"size\",\n \"md5\",\n \"sha1\",\n \"sha224\",\n \"sha256\",\n \"sha384\",\n \"sha512\",\n )\n\n\nclass SigningServiceSerializer(base.ModelSerializer):\n \"\"\"\n A serializer for the model declaring a signing service.\n \"\"\"\n\n pulp_href = base.IdentityField(view_name=\"signing-services-detail\")\n name = serializers.CharField(help_text=_(\"A unique name used to recognize a script.\"))\n script = serializers.CharField(\n help_text=_(\"An absolute path to a script which is going to be used for the signing.\")\n )\n\n class Meta:\n model = models.SigningService\n fields = BaseContentSerializer.Meta.fields + (\"name\", \"script\")\n", "path": "pulpcore/app/serializers/content.py"}]} | 3,220 | 429 |
gh_patches_debug_30150 | rasdani/github-patches | git_diff | ocf__ocfweb-141 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs don't auto-reload in dev
Because we only read docs once, changes to the Markdown files require manually killing and restarting the server.
</issue>
<code>
[start of ocfweb/docs/markdown_based.py]
1 """Documents backed by Markdown.
2
3 This is the most common type of doc. It reads a Markdown fil and renders it in
4 a standard template.
5
6 Markdown documents can specify the meta attributes:
7
8 - [[!meta title="Page title"]]
9 Changes the page title; all templates must specify this.
10
11 - [[!meta template="my_template.html"]]
12 Changes the page template; most templates should *not* specify this,
13 unless they want to customize something (e.g. the sidebar)
14 """
15 import os
16 from functools import partial
17 from pathlib import Path
18
19 from django.shortcuts import render
20
21 from ocfweb.component.markdown import markdown_and_toc
22 from ocfweb.component.markdown import text_and_meta
23 from ocfweb.docs.doc import Document
24
25
26 DOCS_DIR = Path(__file__).parent.joinpath('docs')
27
28
29 def render_markdown_doc(meta, text, doc, request):
30 html, toc = markdown_and_toc(text)
31 return render(
32 request,
33 meta.get('template', 'doc.html'),
34 {
35 'title': doc.title,
36 'doc': doc,
37 'html': html,
38 'toc': toc,
39 },
40 )
41
42
43 def get_markdown_docs():
44 for path in DOCS_DIR.glob('**/*.md'):
45 name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR)))
46
47 # sanity check that the file is under the directory we expect
48 assert DOCS_DIR in path.parents
49
50 with path.open() as f:
51 text, meta = text_and_meta(f)
52
53 if 'title' not in meta:
54 raise ValueError('Document {} lacks required title meta variable.'.format(name))
55
56 yield Document(
57 name='/' + name,
58 title=meta['title'],
59 render=partial(render_markdown_doc, meta, text)
60 )
61
[end of ocfweb/docs/markdown_based.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ocfweb/docs/markdown_based.py b/ocfweb/docs/markdown_based.py
--- a/ocfweb/docs/markdown_based.py
+++ b/ocfweb/docs/markdown_based.py
@@ -1,6 +1,6 @@
"""Documents backed by Markdown.
-This is the most common type of doc. It reads a Markdown fil and renders it in
+This is the most common type of doc. It reads a Markdown file and renders it in
a standard template.
Markdown documents can specify the meta attributes:
@@ -16,6 +16,7 @@
from functools import partial
from pathlib import Path
+from django.conf import settings
from django.shortcuts import render
from ocfweb.component.markdown import markdown_and_toc
@@ -26,13 +27,20 @@
DOCS_DIR = Path(__file__).parent.joinpath('docs')
-def render_markdown_doc(meta, text, doc, request):
+def render_markdown_doc(path, meta, text, doc, request):
+
+ # Reload markdown docs if in development
+ if settings.DEBUG:
+ with path.open() as f:
+ text, meta = text_and_meta(f)
+
html, toc = markdown_and_toc(text)
+
return render(
request,
meta.get('template', 'doc.html'),
{
- 'title': doc.title,
+ 'title': meta['title'],
'doc': doc,
'html': html,
'toc': toc,
@@ -56,5 +64,5 @@
yield Document(
name='/' + name,
title=meta['title'],
- render=partial(render_markdown_doc, meta, text)
+ render=partial(render_markdown_doc, path, meta, text),
)
| {"golden_diff": "diff --git a/ocfweb/docs/markdown_based.py b/ocfweb/docs/markdown_based.py\n--- a/ocfweb/docs/markdown_based.py\n+++ b/ocfweb/docs/markdown_based.py\n@@ -1,6 +1,6 @@\n \"\"\"Documents backed by Markdown.\n \n-This is the most common type of doc. It reads a Markdown fil and renders it in\n+This is the most common type of doc. It reads a Markdown file and renders it in\n a standard template.\n \n Markdown documents can specify the meta attributes:\n@@ -16,6 +16,7 @@\n from functools import partial\n from pathlib import Path\n \n+from django.conf import settings\n from django.shortcuts import render\n \n from ocfweb.component.markdown import markdown_and_toc\n@@ -26,13 +27,20 @@\n DOCS_DIR = Path(__file__).parent.joinpath('docs')\n \n \n-def render_markdown_doc(meta, text, doc, request):\n+def render_markdown_doc(path, meta, text, doc, request):\n+\n+ # Reload markdown docs if in development\n+ if settings.DEBUG:\n+ with path.open() as f:\n+ text, meta = text_and_meta(f)\n+\n html, toc = markdown_and_toc(text)\n+\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n- 'title': doc.title,\n+ 'title': meta['title'],\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n@@ -56,5 +64,5 @@\n yield Document(\n name='/' + name,\n title=meta['title'],\n- render=partial(render_markdown_doc, meta, text)\n+ render=partial(render_markdown_doc, path, meta, text),\n )\n", "issue": "Docs don't auto-reload in dev\nBecause we only read docs once, changes to the Markdown files require manually killing and restarting the server.\n\n", "before_files": [{"content": "\"\"\"Documents backed by Markdown.\n\nThis is the most common type of doc. It reads a Markdown fil and renders it in\na standard template.\n\nMarkdown documents can specify the meta attributes:\n\n - [[!meta title=\"Page title\"]]\n Changes the page title; all templates must specify this.\n\n - [[!meta template=\"my_template.html\"]]\n Changes the page template; most templates should *not* specify this,\n unless they want to customize something (e.g. the sidebar)\n\"\"\"\nimport os\nfrom functools import partial\nfrom pathlib import Path\n\nfrom django.shortcuts import render\n\nfrom ocfweb.component.markdown import markdown_and_toc\nfrom ocfweb.component.markdown import text_and_meta\nfrom ocfweb.docs.doc import Document\n\n\nDOCS_DIR = Path(__file__).parent.joinpath('docs')\n\n\ndef render_markdown_doc(meta, text, doc, request):\n html, toc = markdown_and_toc(text)\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n 'title': doc.title,\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n },\n )\n\n\ndef get_markdown_docs():\n for path in DOCS_DIR.glob('**/*.md'):\n name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR)))\n\n # sanity check that the file is under the directory we expect\n assert DOCS_DIR in path.parents\n\n with path.open() as f:\n text, meta = text_and_meta(f)\n\n if 'title' not in meta:\n raise ValueError('Document {} lacks required title meta variable.'.format(name))\n\n yield Document(\n name='/' + name,\n title=meta['title'],\n render=partial(render_markdown_doc, meta, text)\n )\n", "path": "ocfweb/docs/markdown_based.py"}]} | 1,072 | 390 |
gh_patches_debug_23560 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "Civis" tab to the user profile view
We recently removed some features from the user Profile view and now want to reimplement them using only Django.
This task will be to add a "Civis" tab to the user profile view. The tab should link to a sub-path of the user profile so it is clear to the viewer that they are viewing Civis related to the user.
## Task
All of these tasks should be done in the `accounts` app.
- [ ] create a Django view called `UserCivis` (in `accounts/views.py`)
- [ ] create a URL definition `profile/<str:username>/civis` that renders the `UserCivis` view ( in `accounts/urls.py`)
- [ ] ensure the `UserCivis` view context contains a list of Civis created by the relevant user (given via the `username` in the URL)
- [ ] create a template to render the user civis - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)
</issue>
<code>
[start of project/accounts/views.py]
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from accounts.authentication import account_activation_token, send_activation_email
8 from accounts.forms import ProfileEditForm, UserRegistrationForm
9 from accounts.models import Profile
10 from django.conf import settings
11 from django.contrib.auth import get_user_model, login
12 from django.contrib.auth import views as auth_views
13 from django.contrib.auth.decorators import login_required
14 from django.contrib.auth.mixins import LoginRequiredMixin
15 from django.contrib.sites.shortcuts import get_current_site
16 from django.http import HttpResponseRedirect
17 from django.shortcuts import get_object_or_404, redirect
18 from django.template.response import TemplateResponse
19 from django.urls import reverse, reverse_lazy
20 from django.utils.encoding import force_str
21 from django.utils.http import urlsafe_base64_decode
22 from django.views import View
23 from django.views.generic.edit import FormView, UpdateView
24
25
26 class ProfileFollow(LoginRequiredMixin, View):
27 def get(self, request, *args, **kwargs):
28 # Prevent users from following themselves.
29 if request.user.username == kwargs["username"]:
30 pass
31 else:
32 following_profile = Profile.objects.get(user__username=kwargs["username"])
33
34 self.request.user.profile.following.add(following_profile)
35
36 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
37
38 return HttpResponseRedirect(redirect_to)
39
40
41 class ProfileUnfollow(LoginRequiredMixin, View):
42 def get(self, request, *args, **kwargs):
43 # Prevent users from following themselves.
44 if request.user.username == kwargs["username"]:
45 pass
46 else:
47 following_profile = Profile.objects.get(user__username=kwargs["username"])
48
49 self.request.user.profile.following.remove(following_profile)
50
51 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
52
53 return HttpResponseRedirect(redirect_to)
54
55
56 class RegisterView(FormView):
57 """
58 A form view that handles user registration.
59 """
60
61 template_name = "accounts/register/register.html"
62 form_class = UserRegistrationForm
63 success_url = "/"
64
65 def _create_user(self, form):
66 username = form.cleaned_data["username"]
67 password = form.cleaned_data["password"]
68 email = form.cleaned_data["email"]
69 user = get_user_model().objects.create_user(username, email, password)
70 return user
71
72 def _send_email(self, user):
73 domain = get_current_site(self.request).domain
74 send_activation_email(user, domain)
75
76 def _login(self, user):
77 login(self.request, user)
78
79 def form_valid(self, form):
80 user = self._create_user(form)
81
82 self._send_email(user)
83 self._login(user)
84
85 return super(RegisterView, self).form_valid(form)
86
87
88 class ProfileActivationView(View):
89 """
90 This shows different views to the user when they are verifying
91 their account based on whether they are already verified or not.
92 """
93
94 def get(self, request, uidb64, token):
95
96 try:
97 uid = force_str(urlsafe_base64_decode(uidb64))
98 user = get_user_model().objects.get(pk=uid)
99
100 except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
101 user = None
102
103 redirect_link = {"href": "/", "label": "Back to Main"}
104
105 template_var = {
106 "link": redirect_link,
107 }
108
109 if user is not None and account_activation_token.check_token(user, token):
110 profile = user.profile
111
112 if profile.is_verified:
113 template_var["title"] = "Email Already Verified"
114 template_var["content"] = "You have already verified your email."
115 else:
116 profile.is_verified = True
117 profile.save()
118
119 template_var["title"] = "Email Verification Successful"
120 template_var["content"] = "Thank you for verifying your email."
121 else:
122 # invalid link
123 template_var["title"] = "Email Verification Error"
124 template_var["content"] = "Email could not be verified"
125
126 return TemplateResponse(request, "general_message.html", template_var)
127
128
129 class PasswordResetView(auth_views.PasswordResetView):
130 template_name = "accounts/users/password_reset.html"
131 email_template_name = "accounts/users/password_reset_email.html"
132 subject_template_name = "accounts/users/password_reset_subject.txt"
133 from_email = settings.EMAIL_HOST_USER
134 success_url = reverse_lazy("accounts_password_reset_done")
135
136
137 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
138 template_name = "accounts/users/password_reset_done.html"
139
140
141 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
142 template_name = "accounts/users/password_reset_confirm.html"
143 success_url = reverse_lazy("accounts_password_reset_complete")
144
145
146 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
147 template_name = "accounts/users/password_reset_complete.html"
148
149
150 class SettingsView(LoginRequiredMixin, UpdateView):
151 """A form view to edit Profile"""
152
153 login_url = "accounts_login"
154 form_class = ProfileEditForm
155 success_url = reverse_lazy("accounts_settings")
156 template_name = "accounts/settings.html"
157
158 def get_object(self, queryset=None):
159 return Profile.objects.get(user=self.request.user)
160
161 def get_initial(self):
162 profile = Profile.objects.get(user=self.request.user)
163 self.initial.update(
164 {
165 "username": profile.user.username,
166 "email": profile.user.email,
167 "first_name": profile.first_name or None,
168 "last_name": profile.last_name or None,
169 "about_me": profile.about_me or None,
170 "profile_image": profile.profile_image or None,
171 }
172 )
173 return super(SettingsView, self).get_initial()
174
175
176 class UserProfileView(LoginRequiredMixin, View):
177 """A view that shows profile for authorized users"""
178
179 def get(self, request, username=None):
180 profile = get_object_or_404(Profile, user__username=username)
181
182 return TemplateResponse(
183 request,
184 "account.html",
185 {
186 "profile": profile,
187 },
188 )
189
190
191 class ProfileFollowing(LoginRequiredMixin, View):
192 """
193 A view that shows list of profiles
194 that profile with given username is following
195 """
196
197 def get(self, request, username=None):
198 profile = get_object_or_404(Profile, user__username=username)
199
200 return TemplateResponse(
201 request,
202 "profile_following.html",
203 {
204 "profile": profile,
205 },
206 )
207
208
209 @login_required
210 def expunge_user(request):
211 """
212 Delete User Information
213 """
214
215 user_model = get_user_model()
216 user = get_object_or_404(user_model, username=request.user.username)
217
218 profile = get_object_or_404(Profile, user=user)
219
220 # Expunge personally identifiable data in user
221 expunged_user_data = {
222 "is_active": False,
223 "email": "",
224 "first_name": "",
225 "last_name": "",
226 "username": f"expunged-{ user.id }",
227 }
228 user.__dict__.update(expunged_user_data)
229 user.save()
230
231 # Expunge personally identifiable data in profile
232 expunged_profile_data = {
233 "first_name": "",
234 "last_name": "",
235 "about_me": "",
236 }
237 profile.__dict__.update(expunged_profile_data)
238 profile.save()
239
240 return redirect("/")
241
[end of project/accounts/views.py]
[start of project/accounts/urls.py]
1 from accounts.views import (
2 PasswordResetCompleteView,
3 PasswordResetConfirmView,
4 PasswordResetDoneView,
5 PasswordResetView,
6 ProfileActivationView,
7 ProfileFollow,
8 ProfileUnfollow,
9 RegisterView,
10 SettingsView,
11 ProfileFollowing,
12 UserProfileView,
13 expunge_user,
14 )
15 from django.contrib.auth import views as auth_views
16 from django.urls import path
17
18 urlpatterns = [
19 path(
20 "login/",
21 auth_views.LoginView.as_view(template_name="accounts/register/login.html"),
22 name="accounts_login",
23 ),
24 path("logout/", auth_views.LogoutView.as_view(), name="accounts_logout"),
25 path("register/", RegisterView.as_view(), name="accounts_register"),
26 path("settings/", SettingsView.as_view(), name="accounts_settings"),
27 path(
28 "activate_account/<uidb64>/<token>/",
29 ProfileActivationView.as_view(),
30 name="accounts_activate",
31 ),
32 path("profile/<str:username>/", UserProfileView.as_view(), name="profile"),
33 path(
34 "profile/<str:username>/follow", ProfileFollow.as_view(), name="profile-follow"
35 ),
36 path(
37 "profile/<str:username>/unfollow",
38 ProfileUnfollow.as_view(),
39 name="profile-unfollow",
40 ),
41 path(
42 "profile/<str:username>/following",
43 ProfileFollowing.as_view(),
44 name="profile-following",
45 ),
46 path(
47 "accounts/password_reset/",
48 PasswordResetView.as_view(),
49 name="accounts_password_reset",
50 ),
51 path(
52 "accounts/password_reset_done/",
53 PasswordResetDoneView.as_view(),
54 name="accounts_password_reset_done",
55 ),
56 path(
57 "accounts/password_reset_confirm/<uidb64>/<token>/",
58 PasswordResetConfirmView.as_view(),
59 name="accounts_password_reset_confirm",
60 ),
61 path(
62 "accounts/password_reset_complete/",
63 PasswordResetCompleteView.as_view(),
64 name="accounts_password_reset_complete",
65 ),
66 path("accounts/expunge/", expunge_user, name="expunge_user"),
67 ]
68
[end of project/accounts/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/project/accounts/urls.py b/project/accounts/urls.py
--- a/project/accounts/urls.py
+++ b/project/accounts/urls.py
@@ -10,6 +10,7 @@
SettingsView,
ProfileFollowing,
UserProfileView,
+ UserCivis,
expunge_user,
)
from django.contrib.auth import views as auth_views
@@ -43,6 +44,11 @@
ProfileFollowing.as_view(),
name="profile-following",
),
+ path(
+ "profile/<str:username>/civis/",
+ UserCivis.as_view(),
+ name="user-civis",
+ ),
path(
"accounts/password_reset/",
PasswordResetView.as_view(),
diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -206,6 +206,24 @@
)
+class UserCivis(LoginRequiredMixin, View):
+ """
+ A view that shows list of civis
+ that profile with given username created
+ """
+
+ def get(self, request, username=None):
+ profile = get_object_or_404(Profile, user__username=username)
+ user = profile.user
+ civis = user.civis.all()
+
+ return TemplateResponse(
+ request,
+ "user_civis.html",
+ {"profile": profile, "civis": civis},
+ )
+
+
@login_required
def expunge_user(request):
"""
| {"golden_diff": "diff --git a/project/accounts/urls.py b/project/accounts/urls.py\n--- a/project/accounts/urls.py\n+++ b/project/accounts/urls.py\n@@ -10,6 +10,7 @@\n SettingsView,\n ProfileFollowing,\n UserProfileView,\n+ UserCivis,\n expunge_user,\n )\n from django.contrib.auth import views as auth_views\n@@ -43,6 +44,11 @@\n ProfileFollowing.as_view(),\n name=\"profile-following\",\n ),\n+ path(\n+ \"profile/<str:username>/civis/\",\n+ UserCivis.as_view(),\n+ name=\"user-civis\",\n+ ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\ndiff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -206,6 +206,24 @@\n )\n \n \n+class UserCivis(LoginRequiredMixin, View):\n+ \"\"\"\n+ A view that shows list of civis\n+ that profile with given username created\n+ \"\"\"\n+\n+ def get(self, request, username=None):\n+ profile = get_object_or_404(Profile, user__username=username)\n+ user = profile.user\n+ civis = user.civis.all()\n+\n+ return TemplateResponse(\n+ request,\n+ \"user_civis.html\",\n+ {\"profile\": profile, \"civis\": civis},\n+ )\n+\n+\n @login_required\n def expunge_user(request):\n \"\"\"\n", "issue": "Add \"Civis\" tab to the user profile view\nWe recently removed some features from the user Profile view and now want to reimplement them using only Django.\r\n\r\nThis task will be to add a \"Civis\" tab to the user profile view. The tab should link to a sub-path of the user profile so it is clear to the viewer that they are viewing Civis related to the user.\r\n\r\n## Task\r\nAll of these tasks should be done in the `accounts` app.\r\n\r\n- [ ] create a Django view called `UserCivis` (in `accounts/views.py`)\r\n- [ ] create a URL definition `profile/<str:username>/civis` that renders the `UserCivis` view ( in `accounts/urls.py`)\r\n- [ ] ensure the `UserCivis` view context contains a list of Civis created by the relevant user (given via the `username` in the URL)\r\n- [ ] create a template to render the user civis - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)\r\n\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n that profile with given username is following\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"profile_following.html\",\n {\n \"profile\": profile,\n },\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n", "path": "project/accounts/views.py"}, {"content": "from accounts.views import (\n PasswordResetCompleteView,\n PasswordResetConfirmView,\n PasswordResetDoneView,\n PasswordResetView,\n ProfileActivationView,\n ProfileFollow,\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n ProfileFollowing,\n UserProfileView,\n expunge_user,\n)\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n path(\n \"profile/<str:username>/following\",\n ProfileFollowing.as_view(),\n name=\"profile-following\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n path(\"accounts/expunge/\", expunge_user, name=\"expunge_user\"),\n]\n", "path": "project/accounts/urls.py"}]} | 3,521 | 344 |
gh_patches_debug_8027 | rasdani/github-patches | git_diff | pytorch__examples-1098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
minGPT-ddp: AttributeError: 'Trainer' object has no attribute 'dataloader'
When executing examples/distributed/minGPT-ddp/mingpt/main.py
This error is raised when trying to train minGPT.
Python version: main branch
## Possible Solution
113 def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):
114 #self.dataloader.sampler.set_epoch(epoch)
115 dataloader.sampler.set_epoch(epoch)
## Steps to Reproduce
Just run main.py
## Failure Logs [if any]
Traceback (most recent call last):
File "/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/main.py", line 41, in <module>
main()
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/main.py", line 90, in decorated_main
_run_hydra(
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py", line 389, in _run_hydra
_run_app(
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py", line 452, in _run_app
run_and_report(
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py", line 216, in run_and_report
raise ex
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py", line 213, in run_and_report
return func()
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py", line 453, in <lambda>
lambda: hydra.run(
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/hydra.py", line 132, in run
_ = ret.return_value
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/core/utils.py", line 260, in return_value
raise self._return_value
File "/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/core/utils.py", line 186, in run_job
ret.return_value = task_function(task_cfg)
File "/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/main.py", line 35, in main
trainer.train()
File "/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/trainer.py", line 144, in train
self._run_epoch(epoch, self.train_loader, train=True)
File "/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/trainer.py", line 114, in _run_epoch
self.dataloader.sampler.set_epoch(epoch)
AttributeError: 'Trainer' object has no attribute 'dataloader'. Did you mean: 'test_loader'?
</issue>
<code>
[start of distributed/minGPT-ddp/mingpt/trainer.py]
1 """
2 Simple training loop; Boilerplate that could apply to any arbitrary neural network,
3 so nothing in this file really has anything to do with GPT specifically.
4 """
5
6 from dataclasses import dataclass, asdict
7 from collections import OrderedDict
8 from typing import Optional, Any, Dict
9 import os
10
11 import torch
12 from torch.utils.data import Dataset, DataLoader
13 from torch.nn.parallel import DistributedDataParallel as DDP
14 from torch.utils.data.distributed import DistributedSampler
15
16 import boto3
17 from urllib.parse import urlparse
18 import fsspec
19 import io
20
21 @dataclass
22 class TrainerConfig:
23 max_epochs: int = None
24 batch_size: int = None
25 data_loader_workers: int = None
26 grad_norm_clip: float = None
27 snapshot_path: Optional[str] = None
28 save_every: int = None
29 use_amp: bool = None
30
31 @dataclass
32 class Snapshot:
33 model_state: 'OrderedDict[str, torch.Tensor]'
34 optimizer_state: Dict[str, Any]
35 finished_epoch: int
36
37 def upload_to_s3(obj, dst):
38 buffer = io.BytesIO()
39 torch.save(obj, buffer)
40 buffer.seek(0)
41 dst = urlparse(dst, allow_fragments=False)
42 boto3.client('s3').upload_fileobj(buffer, dst.netloc, dst.path.lstrip('/'))
43
44 class Trainer:
45
46 def __init__(self, trainer_config: TrainerConfig, model, optimizer, train_dataset, test_dataset=None):
47 self.config = trainer_config
48 # set torchrun variables
49 self.local_rank = int(os.environ["LOCAL_RANK"])
50 self.global_rank = int(os.environ["RANK"])
51 # data stuff
52 self.train_dataset = train_dataset
53 self.train_loader = self._prepare_dataloader(train_dataset)
54 self.test_loader = self._prepare_dataloader(test_dataset) if test_dataset else None
55 # initialize train states
56 self.epochs_run = 0
57 self.model = model.to(self.local_rank)
58 self.optimizer = optimizer
59 self.save_every = self.config.save_every
60 if self.config.use_amp:
61 self.scaler = torch.cuda.amp.GradScaler()
62 # load snapshot if available. only necessary on the first node.
63 if self.config.snapshot_path is None:
64 self.config.snapshot_path = "snapshot.pt"
65 self._load_snapshot()
66 # wrap with DDP. this step will synch model across all the processes.
67 self.model = DDP(self.model, device_ids=[self.local_rank])
68
69 def _prepare_dataloader(self, dataset: Dataset):
70 return DataLoader(
71 dataset,
72 batch_size=self.config.batch_size,
73 pin_memory=True,
74 shuffle=False,
75 num_workers=self.config.data_loader_workers,
76 sampler=DistributedSampler(dataset)
77 )
78
79 def _load_snapshot(self):
80 try:
81 snapshot = fsspec.open(self.config.snapshot_path)
82 with snapshot as f:
83 snapshot_data = torch.load(f, map_location="cpu")
84 except FileNotFoundError:
85 print("Snapshot not found. Training model from scratch")
86 return
87
88 snapshot = Snapshot(**snapshot_data)
89 self.model.load_state_dict(snapshot.model_state)
90 self.optimizer.load_state_dict(snapshot.optimizer_state)
91 self.epochs_run = snapshot.finished_epoch
92 print(f"Resuming training from snapshot at Epoch {self.epochs_run}")
93
94
95 def _run_batch(self, source, targets, train: bool = True) -> float:
96 with torch.set_grad_enabled(train), torch.amp.autocast(device_type="cuda", dtype=torch.float16, enabled=(self.config.use_amp)):
97 _, loss = self.model(source, targets)
98
99 if train:
100 self.optimizer.zero_grad(set_to_none=True)
101 if self.config.use_amp:
102 self.scaler.scale(loss).backward()
103 torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)
104 self.scaler.step(self.optimizer)
105 self.scaler.update()
106 else:
107 loss.backward()
108 torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)
109 self.optimizer.step()
110
111 return loss.item()
112
113 def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):
114 self.dataloader.sampler.set_epoch(epoch)
115 for iter, (source, targets) in enumerate(dataloader):
116 step_type = "Train" if train else "Eval"
117 source = source.to(self.local_rank)
118 targets = targets.to(self.local_rank)
119 batch_loss = self._run_batch(source, targets, train)
120 if iter % 100 == 0:
121 print(f"[GPU{self.global_rank}] Epoch {epoch} | Iter {iter} | {step_type} Loss {batch_loss:.5f}")
122
123 def _save_snapshot(self, epoch):
124 # capture snapshot
125 model = self.model
126 raw_model = model.module if hasattr(model, "module") else model
127 snapshot = Snapshot(
128 model_state=raw_model.state_dict(),
129 optimizer_state=self.optimizer.state_dict(),
130 finished_epoch=epoch
131 )
132 # save snapshot
133 snapshot = asdict(snapshot)
134 if self.config.snapshot_path.startswith("s3://"):
135 upload_to_s3(snapshot, self.config.snapshot_path)
136 else:
137 torch.save(snapshot, self.config.snapshot_path)
138
139 print(f"Snapshot saved at epoch {epoch}")
140
141 def train(self):
142 for epoch in range(self.epochs_run, self.config.max_epochs):
143 epoch += 1
144 self._run_epoch(epoch, self.train_loader, train=True)
145 if self.local_rank == 0 and epoch % self.save_every == 0:
146 self._save_snapshot(epoch)
147 # eval run
148 if self.test_loader:
149 self._run_epoch(epoch, self.test_loader, train=False)
150
[end of distributed/minGPT-ddp/mingpt/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/minGPT-ddp/mingpt/trainer.py b/distributed/minGPT-ddp/mingpt/trainer.py
--- a/distributed/minGPT-ddp/mingpt/trainer.py
+++ b/distributed/minGPT-ddp/mingpt/trainer.py
@@ -111,7 +111,7 @@
return loss.item()
def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):
- self.dataloader.sampler.set_epoch(epoch)
+ dataloader.sampler.set_epoch(epoch)
for iter, (source, targets) in enumerate(dataloader):
step_type = "Train" if train else "Eval"
source = source.to(self.local_rank)
| {"golden_diff": "diff --git a/distributed/minGPT-ddp/mingpt/trainer.py b/distributed/minGPT-ddp/mingpt/trainer.py\n--- a/distributed/minGPT-ddp/mingpt/trainer.py\n+++ b/distributed/minGPT-ddp/mingpt/trainer.py\n@@ -111,7 +111,7 @@\n return loss.item()\n \n def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):\n- self.dataloader.sampler.set_epoch(epoch)\n+ dataloader.sampler.set_epoch(epoch)\n for iter, (source, targets) in enumerate(dataloader):\n step_type = \"Train\" if train else \"Eval\"\n source = source.to(self.local_rank)\n", "issue": "minGPT-ddp: AttributeError: 'Trainer' object has no attribute 'dataloader'\nWhen executing examples/distributed/minGPT-ddp/mingpt/main.py\r\n\r\nThis error is raised when trying to train minGPT.\r\n\r\nPython version: main branch\r\n\r\n## Possible Solution\r\n113 def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):\r\n114 #self.dataloader.sampler.set_epoch(epoch)\r\n115 dataloader.sampler.set_epoch(epoch)\r\n\r\n\r\n## Steps to Reproduce\r\nJust run main.py\r\n\r\n## Failure Logs [if any]\r\n\r\nTraceback (most recent call last):\r\n File \"/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/main.py\", line 41, in <module>\r\n main()\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/main.py\", line 90, in decorated_main\r\n _run_hydra(\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py\", line 389, in _run_hydra\r\n _run_app(\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py\", line 452, in _run_app\r\n run_and_report(\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py\", line 216, in run_and_report\r\n raise ex\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py\", line 213, in run_and_report\r\n return func()\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/utils.py\", line 453, in <lambda>\r\n lambda: hydra.run(\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/_internal/hydra.py\", line 132, in run\r\n _ = ret.return_value\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/core/utils.py\", line 260, in return_value\r\n raise self._return_value\r\n File \"/home/users/fmansouri/.local/lib/python3.10/site-packages/hydra/core/utils.py\", line 186, in run_job\r\n ret.return_value = task_function(task_cfg)\r\n File \"/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/main.py\", line 35, in main\r\n trainer.train()\r\n File \"/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/trainer.py\", line 144, in train\r\n self._run_epoch(epoch, self.train_loader, train=True)\r\n File \"/mnt/tier1/project/lxp/fmansouri/pytorch/examples/distributed/minGPT-ddp/mingpt/trainer.py\", line 114, in _run_epoch\r\n self.dataloader.sampler.set_epoch(epoch)\r\nAttributeError: 'Trainer' object has no attribute 'dataloader'. Did you mean: 'test_loader'?\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nSimple training loop; Boilerplate that could apply to any arbitrary neural network,\nso nothing in this file really has anything to do with GPT specifically.\n\"\"\"\n\nfrom dataclasses import dataclass, asdict\nfrom collections import OrderedDict\nfrom typing import Optional, Any, Dict\nimport os\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data.distributed import DistributedSampler\n\nimport boto3\nfrom urllib.parse import urlparse\nimport fsspec\nimport io\n\n@dataclass\nclass TrainerConfig:\n max_epochs: int = None\n batch_size: int = None\n data_loader_workers: int = None\n grad_norm_clip: float = None\n snapshot_path: Optional[str] = None\n save_every: int = None\n use_amp: bool = None\n\n@dataclass\nclass Snapshot:\n model_state: 'OrderedDict[str, torch.Tensor]'\n optimizer_state: Dict[str, Any]\n finished_epoch: int\n\ndef upload_to_s3(obj, dst):\n buffer = io.BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n dst = urlparse(dst, allow_fragments=False)\n boto3.client('s3').upload_fileobj(buffer, dst.netloc, dst.path.lstrip('/'))\n\nclass Trainer:\n\n def __init__(self, trainer_config: TrainerConfig, model, optimizer, train_dataset, test_dataset=None):\n self.config = trainer_config\n # set torchrun variables\n self.local_rank = int(os.environ[\"LOCAL_RANK\"])\n self.global_rank = int(os.environ[\"RANK\"]) \n # data stuff\n self.train_dataset = train_dataset\n self.train_loader = self._prepare_dataloader(train_dataset)\n self.test_loader = self._prepare_dataloader(test_dataset) if test_dataset else None\n # initialize train states\n self.epochs_run = 0\n self.model = model.to(self.local_rank)\n self.optimizer = optimizer \n self.save_every = self.config.save_every\n if self.config.use_amp:\n self.scaler = torch.cuda.amp.GradScaler()\n # load snapshot if available. only necessary on the first node.\n if self.config.snapshot_path is None:\n self.config.snapshot_path = \"snapshot.pt\"\n self._load_snapshot()\n # wrap with DDP. this step will synch model across all the processes.\n self.model = DDP(self.model, device_ids=[self.local_rank])\n \n def _prepare_dataloader(self, dataset: Dataset):\n return DataLoader(\n dataset,\n batch_size=self.config.batch_size,\n pin_memory=True,\n shuffle=False,\n num_workers=self.config.data_loader_workers,\n sampler=DistributedSampler(dataset)\n )\n\n def _load_snapshot(self):\n try:\n snapshot = fsspec.open(self.config.snapshot_path)\n with snapshot as f:\n snapshot_data = torch.load(f, map_location=\"cpu\")\n except FileNotFoundError:\n print(\"Snapshot not found. Training model from scratch\")\n return \n\n snapshot = Snapshot(**snapshot_data)\n self.model.load_state_dict(snapshot.model_state)\n self.optimizer.load_state_dict(snapshot.optimizer_state)\n self.epochs_run = snapshot.finished_epoch\n print(f\"Resuming training from snapshot at Epoch {self.epochs_run}\")\n\n\n def _run_batch(self, source, targets, train: bool = True) -> float:\n with torch.set_grad_enabled(train), torch.amp.autocast(device_type=\"cuda\", dtype=torch.float16, enabled=(self.config.use_amp)):\n _, loss = self.model(source, targets)\n \n if train:\n self.optimizer.zero_grad(set_to_none=True)\n if self.config.use_amp: \n self.scaler.scale(loss).backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)\n self.optimizer.step()\n \n return loss.item()\n\n def _run_epoch(self, epoch: int, dataloader: DataLoader, train: bool = True):\n self.dataloader.sampler.set_epoch(epoch)\n for iter, (source, targets) in enumerate(dataloader):\n step_type = \"Train\" if train else \"Eval\"\n source = source.to(self.local_rank)\n targets = targets.to(self.local_rank)\n batch_loss = self._run_batch(source, targets, train)\n if iter % 100 == 0:\n print(f\"[GPU{self.global_rank}] Epoch {epoch} | Iter {iter} | {step_type} Loss {batch_loss:.5f}\")\n\n def _save_snapshot(self, epoch):\n # capture snapshot\n model = self.model\n raw_model = model.module if hasattr(model, \"module\") else model\n snapshot = Snapshot(\n model_state=raw_model.state_dict(),\n optimizer_state=self.optimizer.state_dict(),\n finished_epoch=epoch\n )\n # save snapshot\n snapshot = asdict(snapshot)\n if self.config.snapshot_path.startswith(\"s3://\"):\n upload_to_s3(snapshot, self.config.snapshot_path)\n else:\n torch.save(snapshot, self.config.snapshot_path)\n \n print(f\"Snapshot saved at epoch {epoch}\")\n\n def train(self):\n for epoch in range(self.epochs_run, self.config.max_epochs):\n epoch += 1\n self._run_epoch(epoch, self.train_loader, train=True)\n if self.local_rank == 0 and epoch % self.save_every == 0:\n self._save_snapshot(epoch)\n # eval run\n if self.test_loader:\n self._run_epoch(epoch, self.test_loader, train=False)\n", "path": "distributed/minGPT-ddp/mingpt/trainer.py"}]} | 2,843 | 165 |
gh_patches_debug_12226 | rasdani/github-patches | git_diff | googleapis__python-bigquery-643 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deps: expand extras to support pyarrow v4
We're actually already testing with pyarrow v4 in some of the samples tests, so this should be safe to expand in our `setup.py`
</issue>
<code>
[start of setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "packaging >= 14.3",
37 "protobuf >= 3.12.0",
38 "requests >= 2.18.0, < 3.0.0dev",
39 ]
40 extras = {
41 "bqstorage": [
42 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
44 # installed, even though `google-cloud-bigquery-storage` specifies it
45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
46 # See: https://github.com/googleapis/python-bigquery/issues/83 The
47 # grpc.Channel.close() method isn't added until 1.32.0.
48 # https://github.com/grpc/grpc/pull/15254
49 "grpcio >= 1.32.0, < 2.0dev",
50 "pyarrow >= 1.0.0, < 4.0dev",
51 ],
52 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"],
53 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
54 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
55 "opentelemetry": [
56 "opentelemetry-api >= 0.11b0",
57 "opentelemetry-sdk >= 0.11b0",
58 "opentelemetry-instrumentation >= 0.11b0",
59 ],
60 }
61
62 all_extras = []
63
64 for extra in extras:
65 # Exclude this extra from all to avoid overly strict dependencies on core
66 # libraries such as pyarrow.
67 # https://github.com/googleapis/python-bigquery/issues/563
68 if extra in {"bignumeric_type"}:
69 continue
70 all_extras.extend(extras[extra])
71
72 extras["all"] = all_extras
73
74 # Setup boilerplate below this line.
75
76 package_root = os.path.abspath(os.path.dirname(__file__))
77
78 readme_filename = os.path.join(package_root, "README.rst")
79 with io.open(readme_filename, encoding="utf-8") as readme_file:
80 readme = readme_file.read()
81
82 version = {}
83 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
84 exec(fp.read(), version)
85 version = version["__version__"]
86
87 # Only include packages under the 'google' namespace. Do not include tests,
88 # benchmarks, etc.
89 packages = [
90 package
91 for package in setuptools.PEP420PackageFinder.find()
92 if package.startswith("google")
93 ]
94
95 # Determine which namespaces are needed.
96 namespaces = ["google"]
97 if "google.cloud" in packages:
98 namespaces.append("google.cloud")
99
100
101 setuptools.setup(
102 name=name,
103 version=version,
104 description=description,
105 long_description=readme,
106 author="Google LLC",
107 author_email="[email protected]",
108 license="Apache 2.0",
109 url="https://github.com/googleapis/python-bigquery",
110 classifiers=[
111 release_status,
112 "Intended Audience :: Developers",
113 "License :: OSI Approved :: Apache Software License",
114 "Programming Language :: Python",
115 "Programming Language :: Python :: 3",
116 "Programming Language :: Python :: 3.6",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Operating System :: OS Independent",
121 "Topic :: Internet",
122 ],
123 platforms="Posix; MacOS X; Windows",
124 packages=packages,
125 namespace_packages=namespaces,
126 install_requires=dependencies,
127 extras_require=extras,
128 python_requires=">=3.6, <3.10",
129 include_package_data=True,
130 zip_safe=False,
131 )
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -47,10 +47,10 @@
# grpc.Channel.close() method isn't added until 1.32.0.
# https://github.com/grpc/grpc/pull/15254
"grpcio >= 1.32.0, < 2.0dev",
- "pyarrow >= 1.0.0, < 4.0dev",
+ "pyarrow >= 1.0.0, < 5.0dev",
],
- "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"],
- "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
+ "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 5.0dev"],
+ "bignumeric_type": ["pyarrow >= 3.0.0, < 5.0dev"],
"tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
"opentelemetry": [
"opentelemetry-api >= 0.11b0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -47,10 +47,10 @@\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n- \"pyarrow >= 1.0.0, < 4.0dev\",\n+ \"pyarrow >= 1.0.0, < 5.0dev\",\n ],\n- \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n- \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n+ \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 5.0dev\"],\n+ \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 5.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n", "issue": "deps: expand extras to support pyarrow v4\nWe're actually already testing with pyarrow v4 in some of the samples tests, so this should be safe to expand in our `setup.py`\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 2,029 | 302 |
gh_patches_debug_15741 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom output function in console exporter
What do folks think of making the output function used by `ConsoleSpanExporter.export` customizable? Currently it uses `print(span)` but there are situations where a user may want to perform a transform on `span` before outputting it, or to output another data structure entirely.
To be more concrete, we currently have downstream infrastructure that requires our telemetry data to be in a specific dictionary format when written to stdout. We'd like to still use the `ConsoleSpanExporter` to write to stdout, but need to convert the `span` to our `dict` object before doing so.
Currently we are addressing this by writing a custom exporter class that inherits from `SpanExporter` and writes our custom data to stdout in its `export` method. Another solution though would be to allow `ConsoleSpanExporter` to take a keyword argument to its `__init__` method that would default to `print` but would also allow custom output functions. It would then use the passed function in `ConsoleSpanExporter.export` to output to stdout.
Do folks see any value in adding this functionality here, rather than requiring users who want to do custom console output to write their own exporter classes? Also happy to write up the proposed solution as a PR if folks think that would be helpful.
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py]
1 # Copyright 2019, OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import collections
16 import logging
17 import threading
18 import typing
19 from enum import Enum
20
21 from opentelemetry.context import Context
22 from opentelemetry.trace import DefaultSpan
23 from opentelemetry.util import time_ns
24
25 from .. import Span, SpanProcessor
26
27 logger = logging.getLogger(__name__)
28
29
30 class SpanExportResult(Enum):
31 SUCCESS = 0
32 FAILED_RETRYABLE = 1
33 FAILED_NOT_RETRYABLE = 2
34
35
36 class SpanExporter:
37 """Interface for exporting spans.
38
39 Interface to be implemented by services that want to export recorded in
40 its own format.
41
42 To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
43 `SimpleExportSpanProcessor` or a `BatchExportSpanProcessor`.
44 """
45
46 def export(self, spans: typing.Sequence[Span]) -> "SpanExportResult":
47 """Exports a batch of telemetry data.
48
49 Args:
50 spans: The list of `opentelemetry.trace.Span` objects to be exported
51
52 Returns:
53 The result of the export
54 """
55
56 def shutdown(self) -> None:
57 """Shuts down the exporter.
58
59 Called when the SDK is shut down.
60 """
61
62
63 class SimpleExportSpanProcessor(SpanProcessor):
64 """Simple SpanProcessor implementation.
65
66 SimpleExportSpanProcessor is an implementation of `SpanProcessor` that
67 passes ended spans directly to the configured `SpanExporter`.
68 """
69
70 def __init__(self, span_exporter: SpanExporter):
71 self.span_exporter = span_exporter
72
73 def on_start(self, span: Span) -> None:
74 pass
75
76 def on_end(self, span: Span) -> None:
77 with Context.use(suppress_instrumentation=True):
78 try:
79 self.span_exporter.export((span,))
80 # pylint: disable=broad-except
81 except Exception:
82 logger.exception("Exception while exporting Span.")
83
84 def shutdown(self) -> None:
85 self.span_exporter.shutdown()
86
87 def force_flush(self, timeout_millis: int = 30000) -> bool:
88 # pylint: disable=unused-argument
89 return True
90
91
92 class BatchExportSpanProcessor(SpanProcessor):
93 """Batch span processor implementation.
94
95 BatchExportSpanProcessor is an implementation of `SpanProcessor` that
96 batches ended spans and pushes them to the configured `SpanExporter`.
97 """
98
99 _FLUSH_TOKEN_SPAN = DefaultSpan(context=None)
100
101 def __init__(
102 self,
103 span_exporter: SpanExporter,
104 max_queue_size: int = 2048,
105 schedule_delay_millis: float = 5000,
106 max_export_batch_size: int = 512,
107 ):
108 if max_queue_size <= 0:
109 raise ValueError("max_queue_size must be a positive integer.")
110
111 if schedule_delay_millis <= 0:
112 raise ValueError("schedule_delay_millis must be positive.")
113
114 if max_export_batch_size <= 0:
115 raise ValueError(
116 "max_export_batch_size must be a positive integer."
117 )
118
119 if max_export_batch_size > max_queue_size:
120 raise ValueError(
121 "max_export_batch_size must be less than and equal to max_export_batch_size."
122 )
123
124 self.span_exporter = span_exporter
125 self.queue = collections.deque(
126 [], max_queue_size
127 ) # type: typing.Deque[Span]
128 self.worker_thread = threading.Thread(target=self.worker, daemon=True)
129 self.condition = threading.Condition(threading.Lock())
130 self.flush_condition = threading.Condition(threading.Lock())
131 # flag to indicate that there is a flush operation on progress
132 self._flushing = False
133 self.schedule_delay_millis = schedule_delay_millis
134 self.max_export_batch_size = max_export_batch_size
135 self.max_queue_size = max_queue_size
136 self.done = False
137 # flag that indicates that spans are being dropped
138 self._spans_dropped = False
139 # precallocated list to send spans to exporter
140 self.spans_list = [
141 None
142 ] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]
143 self.worker_thread.start()
144
145 def on_start(self, span: Span) -> None:
146 pass
147
148 def on_end(self, span: Span) -> None:
149 if self.done:
150 logger.warning("Already shutdown, dropping span.")
151 return
152 if len(self.queue) == self.max_queue_size:
153 if not self._spans_dropped:
154 logger.warning("Queue is full, likely spans will be dropped.")
155 self._spans_dropped = True
156
157 self.queue.appendleft(span)
158
159 if len(self.queue) >= self.max_queue_size // 2:
160 with self.condition:
161 self.condition.notify()
162
163 def worker(self):
164 timeout = self.schedule_delay_millis / 1e3
165 while not self.done:
166 if (
167 len(self.queue) < self.max_export_batch_size
168 and not self._flushing
169 ):
170 with self.condition:
171 self.condition.wait(timeout)
172 if not self.queue:
173 # spurious notification, let's wait again
174 continue
175 if self.done:
176 # missing spans will be sent when calling flush
177 break
178
179 # substract the duration of this export call to the next timeout
180 start = time_ns()
181 self.export()
182 end = time_ns()
183 duration = (end - start) / 1e9
184 timeout = self.schedule_delay_millis / 1e3 - duration
185
186 # be sure that all spans are sent
187 self._drain_queue()
188
189 def export(self) -> None:
190 """Exports at most max_export_batch_size spans."""
191 idx = 0
192 notify_flush = False
193 # currently only a single thread acts as consumer, so queue.pop() will
194 # not raise an exception
195 while idx < self.max_export_batch_size and self.queue:
196 span = self.queue.pop()
197 if span is self._FLUSH_TOKEN_SPAN:
198 notify_flush = True
199 else:
200 self.spans_list[idx] = span
201 idx += 1
202 with Context.use(suppress_instrumentation=True):
203 try:
204 # Ignore type b/c the Optional[None]+slicing is too "clever"
205 # for mypy
206 self.span_exporter.export(
207 self.spans_list[:idx]
208 ) # type: ignore
209 # pylint: disable=broad-except
210 except Exception:
211 logger.exception("Exception while exporting Span batch.")
212
213 if notify_flush:
214 with self.flush_condition:
215 self.flush_condition.notify()
216
217 # clean up list
218 for index in range(idx):
219 self.spans_list[index] = None
220
221 def _drain_queue(self):
222 """"Export all elements until queue is empty.
223
224 Can only be called from the worker thread context because it invokes
225 `export` that is not thread safe.
226 """
227 while self.queue:
228 self.export()
229
230 def force_flush(self, timeout_millis: int = 30000) -> bool:
231 if self.done:
232 logger.warning("Already shutdown, ignoring call to force_flush().")
233 return True
234
235 self._flushing = True
236 self.queue.appendleft(self._FLUSH_TOKEN_SPAN)
237
238 # wake up worker thread
239 with self.condition:
240 self.condition.notify_all()
241
242 # wait for token to be processed
243 with self.flush_condition:
244 ret = self.flush_condition.wait(timeout_millis / 1e3)
245
246 self._flushing = False
247
248 if not ret:
249 logger.warning("Timeout was exceeded in force_flush().")
250 return ret
251
252 def shutdown(self) -> None:
253 # signal the worker thread to finish and then wait for it
254 self.done = True
255 with self.condition:
256 self.condition.notify_all()
257 self.worker_thread.join()
258 self.span_exporter.shutdown()
259
260
261 class ConsoleSpanExporter(SpanExporter):
262 """Implementation of :class:`SpanExporter` that prints spans to the
263 console.
264
265 This class can be used for diagnostic purposes. It prints the exported
266 spans to the console STDOUT.
267 """
268
269 def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:
270 for span in spans:
271 print(span)
272 return SpanExportResult.SUCCESS
273
[end of opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py
@@ -14,6 +14,7 @@
import collections
import logging
+import sys
import threading
import typing
from enum import Enum
@@ -266,7 +267,15 @@
spans to the console STDOUT.
"""
+ def __init__(
+ self,
+ out: typing.IO = sys.stdout,
+ formatter: typing.Callable[[Span], str] = str,
+ ):
+ self.out = out
+ self.formatter = formatter
+
def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:
for span in spans:
- print(span)
+ self.out.write(self.formatter(span))
return SpanExportResult.SUCCESS
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py\n@@ -14,6 +14,7 @@\n \n import collections\n import logging\n+import sys\n import threading\n import typing\n from enum import Enum\n@@ -266,7 +267,15 @@\n spans to the console STDOUT.\n \"\"\"\n \n+ def __init__(\n+ self,\n+ out: typing.IO = sys.stdout,\n+ formatter: typing.Callable[[Span], str] = str,\n+ ):\n+ self.out = out\n+ self.formatter = formatter\n+\n def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:\n for span in spans:\n- print(span)\n+ self.out.write(self.formatter(span))\n return SpanExportResult.SUCCESS\n", "issue": "Custom output function in console exporter\nWhat do folks think of making the output function used by `ConsoleSpanExporter.export` customizable? Currently it uses `print(span)` but there are situations where a user may want to perform a transform on `span` before outputting it, or to output another data structure entirely.\r\n\r\nTo be more concrete, we currently have downstream infrastructure that requires our telemetry data to be in a specific dictionary format when written to stdout. We'd like to still use the `ConsoleSpanExporter` to write to stdout, but need to convert the `span` to our `dict` object before doing so.\r\n\r\nCurrently we are addressing this by writing a custom exporter class that inherits from `SpanExporter` and writes our custom data to stdout in its `export` method. Another solution though would be to allow `ConsoleSpanExporter` to take a keyword argument to its `__init__` method that would default to `print` but would also allow custom output functions. It would then use the passed function in `ConsoleSpanExporter.export` to output to stdout.\r\n\r\nDo folks see any value in adding this functionality here, rather than requiring users who want to do custom console output to write their own exporter classes? Also happy to write up the proposed solution as a PR if folks think that would be helpful.\r\n\n", "before_files": [{"content": "# Copyright 2019, OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport logging\nimport threading\nimport typing\nfrom enum import Enum\n\nfrom opentelemetry.context import Context\nfrom opentelemetry.trace import DefaultSpan\nfrom opentelemetry.util import time_ns\n\nfrom .. import Span, SpanProcessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass SpanExportResult(Enum):\n SUCCESS = 0\n FAILED_RETRYABLE = 1\n FAILED_NOT_RETRYABLE = 2\n\n\nclass SpanExporter:\n \"\"\"Interface for exporting spans.\n\n Interface to be implemented by services that want to export recorded in\n its own format.\n\n To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a\n `SimpleExportSpanProcessor` or a `BatchExportSpanProcessor`.\n \"\"\"\n\n def export(self, spans: typing.Sequence[Span]) -> \"SpanExportResult\":\n \"\"\"Exports a batch of telemetry data.\n\n Args:\n spans: The list of `opentelemetry.trace.Span` objects to be exported\n\n Returns:\n The result of the export\n \"\"\"\n\n def shutdown(self) -> None:\n \"\"\"Shuts down the exporter.\n\n Called when the SDK is shut down.\n \"\"\"\n\n\nclass SimpleExportSpanProcessor(SpanProcessor):\n \"\"\"Simple SpanProcessor implementation.\n\n SimpleExportSpanProcessor is an implementation of `SpanProcessor` that\n passes ended spans directly to the configured `SpanExporter`.\n \"\"\"\n\n def __init__(self, span_exporter: SpanExporter):\n self.span_exporter = span_exporter\n\n def on_start(self, span: Span) -> None:\n pass\n\n def on_end(self, span: Span) -> None:\n with Context.use(suppress_instrumentation=True):\n try:\n self.span_exporter.export((span,))\n # pylint: disable=broad-except\n except Exception:\n logger.exception(\"Exception while exporting Span.\")\n\n def shutdown(self) -> None:\n self.span_exporter.shutdown()\n\n def force_flush(self, timeout_millis: int = 30000) -> bool:\n # pylint: disable=unused-argument\n return True\n\n\nclass BatchExportSpanProcessor(SpanProcessor):\n \"\"\"Batch span processor implementation.\n\n BatchExportSpanProcessor is an implementation of `SpanProcessor` that\n batches ended spans and pushes them to the configured `SpanExporter`.\n \"\"\"\n\n _FLUSH_TOKEN_SPAN = DefaultSpan(context=None)\n\n def __init__(\n self,\n span_exporter: SpanExporter,\n max_queue_size: int = 2048,\n schedule_delay_millis: float = 5000,\n max_export_batch_size: int = 512,\n ):\n if max_queue_size <= 0:\n raise ValueError(\"max_queue_size must be a positive integer.\")\n\n if schedule_delay_millis <= 0:\n raise ValueError(\"schedule_delay_millis must be positive.\")\n\n if max_export_batch_size <= 0:\n raise ValueError(\n \"max_export_batch_size must be a positive integer.\"\n )\n\n if max_export_batch_size > max_queue_size:\n raise ValueError(\n \"max_export_batch_size must be less than and equal to max_export_batch_size.\"\n )\n\n self.span_exporter = span_exporter\n self.queue = collections.deque(\n [], max_queue_size\n ) # type: typing.Deque[Span]\n self.worker_thread = threading.Thread(target=self.worker, daemon=True)\n self.condition = threading.Condition(threading.Lock())\n self.flush_condition = threading.Condition(threading.Lock())\n # flag to indicate that there is a flush operation on progress\n self._flushing = False\n self.schedule_delay_millis = schedule_delay_millis\n self.max_export_batch_size = max_export_batch_size\n self.max_queue_size = max_queue_size\n self.done = False\n # flag that indicates that spans are being dropped\n self._spans_dropped = False\n # precallocated list to send spans to exporter\n self.spans_list = [\n None\n ] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]\n self.worker_thread.start()\n\n def on_start(self, span: Span) -> None:\n pass\n\n def on_end(self, span: Span) -> None:\n if self.done:\n logger.warning(\"Already shutdown, dropping span.\")\n return\n if len(self.queue) == self.max_queue_size:\n if not self._spans_dropped:\n logger.warning(\"Queue is full, likely spans will be dropped.\")\n self._spans_dropped = True\n\n self.queue.appendleft(span)\n\n if len(self.queue) >= self.max_queue_size // 2:\n with self.condition:\n self.condition.notify()\n\n def worker(self):\n timeout = self.schedule_delay_millis / 1e3\n while not self.done:\n if (\n len(self.queue) < self.max_export_batch_size\n and not self._flushing\n ):\n with self.condition:\n self.condition.wait(timeout)\n if not self.queue:\n # spurious notification, let's wait again\n continue\n if self.done:\n # missing spans will be sent when calling flush\n break\n\n # substract the duration of this export call to the next timeout\n start = time_ns()\n self.export()\n end = time_ns()\n duration = (end - start) / 1e9\n timeout = self.schedule_delay_millis / 1e3 - duration\n\n # be sure that all spans are sent\n self._drain_queue()\n\n def export(self) -> None:\n \"\"\"Exports at most max_export_batch_size spans.\"\"\"\n idx = 0\n notify_flush = False\n # currently only a single thread acts as consumer, so queue.pop() will\n # not raise an exception\n while idx < self.max_export_batch_size and self.queue:\n span = self.queue.pop()\n if span is self._FLUSH_TOKEN_SPAN:\n notify_flush = True\n else:\n self.spans_list[idx] = span\n idx += 1\n with Context.use(suppress_instrumentation=True):\n try:\n # Ignore type b/c the Optional[None]+slicing is too \"clever\"\n # for mypy\n self.span_exporter.export(\n self.spans_list[:idx]\n ) # type: ignore\n # pylint: disable=broad-except\n except Exception:\n logger.exception(\"Exception while exporting Span batch.\")\n\n if notify_flush:\n with self.flush_condition:\n self.flush_condition.notify()\n\n # clean up list\n for index in range(idx):\n self.spans_list[index] = None\n\n def _drain_queue(self):\n \"\"\"\"Export all elements until queue is empty.\n\n Can only be called from the worker thread context because it invokes\n `export` that is not thread safe.\n \"\"\"\n while self.queue:\n self.export()\n\n def force_flush(self, timeout_millis: int = 30000) -> bool:\n if self.done:\n logger.warning(\"Already shutdown, ignoring call to force_flush().\")\n return True\n\n self._flushing = True\n self.queue.appendleft(self._FLUSH_TOKEN_SPAN)\n\n # wake up worker thread\n with self.condition:\n self.condition.notify_all()\n\n # wait for token to be processed\n with self.flush_condition:\n ret = self.flush_condition.wait(timeout_millis / 1e3)\n\n self._flushing = False\n\n if not ret:\n logger.warning(\"Timeout was exceeded in force_flush().\")\n return ret\n\n def shutdown(self) -> None:\n # signal the worker thread to finish and then wait for it\n self.done = True\n with self.condition:\n self.condition.notify_all()\n self.worker_thread.join()\n self.span_exporter.shutdown()\n\n\nclass ConsoleSpanExporter(SpanExporter):\n \"\"\"Implementation of :class:`SpanExporter` that prints spans to the\n console.\n\n This class can be used for diagnostic purposes. It prints the exported\n spans to the console STDOUT.\n \"\"\"\n\n def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:\n for span in spans:\n print(span)\n return SpanExportResult.SUCCESS\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py"}]} | 3,466 | 234 |
gh_patches_debug_22027 | rasdani/github-patches | git_diff | sopel-irc__sopel-1385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
admin: .set command can't handle spaces
I think I once discovered this a long, long time ago and forgot to document it. I vaguely remember hitting this problem way back when I first started using Sopel (at the time, Willie)… But here we are. I hit it again, and this time it's getting an issue so it can be fixed!
When PMing Sopel `.set section.key spam eggs sausage and spam`, the value in `section.key` of the config will become simply `spam`. Because `admin.py` uses `trigger.group(4)` as the value, it ignores anything after the first whitespace.
The `.set` command probably isn't used a whole lot, but it should still work as expected. As-is, it's useless for updating things like `core.default_time_format`, which almost _always_ contain spaces.
</issue>
<code>
[start of sopel/modules/admin.py]
1 # coding=utf-8
2 """
3 admin.py - Sopel Admin Module
4 Copyright 2010-2011, Sean B. Palmer (inamidst.com) and Michael Yanovich
5 (yanovich.net)
6 Copyright © 2012, Elad Alfassa, <[email protected]>
7 Copyright 2013, Ari Koivula <[email protected]>
8
9 Licensed under the Eiffel Forum License 2.
10
11 https://sopel.chat
12 """
13 from __future__ import unicode_literals, absolute_import, print_function, division
14
15 from sopel.config.types import (
16 StaticSection, ValidatedAttribute, FilenameAttribute
17 )
18 import sopel.module
19
20
21 class AdminSection(StaticSection):
22 hold_ground = ValidatedAttribute('hold_ground', bool, default=False)
23 """Auto re-join on kick"""
24 auto_accept_invite = ValidatedAttribute('auto_accept_invite', bool,
25 default=True)
26
27
28 def configure(config):
29 config.define_section('admin', AdminSection)
30 config.admin.configure_setting('hold_ground',
31 "Automatically re-join after being kicked?")
32 config.admin.configure_setting('auto_accept_invite',
33 'Automatically join channels when invited?')
34
35
36 def setup(bot):
37 bot.config.define_section('admin', AdminSection)
38
39
40 @sopel.module.require_privmsg
41 @sopel.module.require_admin
42 @sopel.module.commands('join')
43 @sopel.module.priority('low')
44 @sopel.module.example('.join #example or .join #example key')
45 def join(bot, trigger):
46 """Join the specified channel. This is an admin-only command."""
47 channel, key = trigger.group(3), trigger.group(4)
48 if not channel:
49 return
50 elif not key:
51 bot.join(channel)
52 else:
53 bot.join(channel, key)
54
55
56 @sopel.module.require_privmsg
57 @sopel.module.require_admin
58 @sopel.module.commands('part')
59 @sopel.module.priority('low')
60 @sopel.module.example('.part #example')
61 def part(bot, trigger):
62 """Part the specified channel. This is an admin-only command."""
63 channel, _sep, part_msg = trigger.group(2).partition(' ')
64 if part_msg:
65 bot.part(channel, part_msg)
66 else:
67 bot.part(channel)
68
69
70 @sopel.module.require_privmsg
71 @sopel.module.require_owner
72 @sopel.module.commands('quit')
73 @sopel.module.priority('low')
74 def quit(bot, trigger):
75 """Quit from the server. This is an owner-only command."""
76 quit_message = trigger.group(2)
77 if not quit_message:
78 quit_message = 'Quitting on command from %s' % trigger.nick
79
80 bot.quit(quit_message)
81
82
83 @sopel.module.require_privmsg
84 @sopel.module.require_admin
85 @sopel.module.commands('msg')
86 @sopel.module.priority('low')
87 @sopel.module.example('.msg #YourPants Does anyone else smell neurotoxin?')
88 def msg(bot, trigger):
89 """
90 Send a message to a given channel or nick. Can only be done in privmsg by an
91 admin.
92 """
93 if trigger.group(2) is None:
94 return
95
96 channel, _sep, message = trigger.group(2).partition(' ')
97 message = message.strip()
98 if not channel or not message:
99 return
100
101 bot.msg(channel, message)
102
103
104 @sopel.module.require_privmsg
105 @sopel.module.require_admin
106 @sopel.module.commands('me')
107 @sopel.module.priority('low')
108 def me(bot, trigger):
109 """
110 Send an ACTION (/me) to a given channel or nick. Can only be done in privmsg
111 by an admin.
112 """
113 if trigger.group(2) is None:
114 return
115
116 channel, _sep, action = trigger.group(2).partition(' ')
117 action = action.strip()
118 if not channel or not action:
119 return
120
121 msg = '\x01ACTION %s\x01' % action
122 bot.msg(channel, msg)
123
124
125 @sopel.module.event('INVITE')
126 @sopel.module.rule('.*')
127 @sopel.module.priority('low')
128 def invite_join(bot, trigger):
129 """
130 Join a channel sopel is invited to, if the inviter is an admin.
131 """
132 if trigger.admin or bot.config.admin.auto_accept_invite:
133 bot.join(trigger.args[1])
134 return
135
136
137 @sopel.module.event('KICK')
138 @sopel.module.rule(r'.*')
139 @sopel.module.priority('low')
140 def hold_ground(bot, trigger):
141 """
142 This function monitors all kicks across all channels sopel is in. If it
143 detects that it is the one kicked it'll automatically join that channel.
144
145 WARNING: This may not be needed and could cause problems if sopel becomes
146 annoying. Please use this with caution.
147 """
148 if bot.config.admin.hold_ground:
149 channel = trigger.sender
150 if trigger.args[1] == bot.nick:
151 bot.join(channel)
152
153
154 @sopel.module.require_privmsg
155 @sopel.module.require_admin
156 @sopel.module.commands('mode')
157 @sopel.module.priority('low')
158 def mode(bot, trigger):
159 """Set a user mode on Sopel. Can only be done in privmsg by an admin."""
160 mode = trigger.group(3)
161 bot.write(('MODE ', bot.nick + ' ' + mode))
162
163
164 @sopel.module.require_privmsg("This command only works as a private message.")
165 @sopel.module.require_admin("This command requires admin privileges.")
166 @sopel.module.commands('set')
167 @sopel.module.example('.set core.owner Me')
168 def set_config(bot, trigger):
169 """See and modify values of sopels config object.
170
171 Trigger args:
172 arg1 - section and option, in the form "section.option"
173 arg2 - value
174
175 If there is no section, section will default to "core".
176 If value is None, the option will be deleted.
177 """
178 # Get section and option from first argument.
179 arg1 = trigger.group(3).split('.')
180 if len(arg1) == 1:
181 section_name, option = "core", arg1[0]
182 elif len(arg1) == 2:
183 section_name, option = arg1
184 else:
185 bot.reply("Usage: .set section.option value")
186 return
187 section = getattr(bot.config, section_name)
188 static_sec = isinstance(section, StaticSection)
189
190 if static_sec and not hasattr(section, option):
191 bot.say('[{}] section has no option {}.'.format(section_name, option))
192 return
193
194 # Display current value if no value is given.
195 value = trigger.group(4)
196 if not value:
197 if not static_sec and bot.config.parser.has_option(section, option):
198 bot.reply("Option %s.%s does not exist." % (section_name, option))
199 return
200 # Except if the option looks like a password. Censor those to stop them
201 # from being put on log files.
202 if option.endswith("password") or option.endswith("pass"):
203 value = "(password censored)"
204 else:
205 value = getattr(section, option)
206 bot.reply("%s.%s = %s" % (section_name, option, value))
207 return
208
209 # Otherwise, set the value to one given as argument 2.
210 if static_sec:
211 descriptor = getattr(section.__class__, option)
212 try:
213 if isinstance(descriptor, FilenameAttribute):
214 value = descriptor.parse(bot.config, descriptor, value)
215 else:
216 value = descriptor.parse(value)
217 except ValueError as exc:
218 bot.say("Can't set attribute: " + str(exc))
219 return
220 setattr(section, option, value)
221
222
223 @sopel.module.require_privmsg
224 @sopel.module.require_admin
225 @sopel.module.commands('save')
226 @sopel.module.example('.save')
227 def save_config(bot, trigger):
228 """Save state of sopels config object to the configuration file."""
229 bot.config.save()
230
[end of sopel/modules/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/admin.py b/sopel/modules/admin.py
--- a/sopel/modules/admin.py
+++ b/sopel/modules/admin.py
@@ -191,9 +191,13 @@
bot.say('[{}] section has no option {}.'.format(section_name, option))
return
+ delim = trigger.group(2).find(' ')
+ # Skip preceeding whitespaces, if any.
+ while delim > 0 and delim < len(trigger.group(2)) and trigger.group(2)[delim] == ' ':
+ delim = delim + 1
+
# Display current value if no value is given.
- value = trigger.group(4)
- if not value:
+ if delim == -1 or delim == len(trigger.group(2)):
if not static_sec and bot.config.parser.has_option(section, option):
bot.reply("Option %s.%s does not exist." % (section_name, option))
return
@@ -207,6 +211,7 @@
return
# Otherwise, set the value to one given as argument 2.
+ value = trigger.group(2)[delim:]
if static_sec:
descriptor = getattr(section.__class__, option)
try:
| {"golden_diff": "diff --git a/sopel/modules/admin.py b/sopel/modules/admin.py\n--- a/sopel/modules/admin.py\n+++ b/sopel/modules/admin.py\n@@ -191,9 +191,13 @@\n bot.say('[{}] section has no option {}.'.format(section_name, option))\n return\n \n+ delim = trigger.group(2).find(' ')\n+ # Skip preceeding whitespaces, if any.\n+ while delim > 0 and delim < len(trigger.group(2)) and trigger.group(2)[delim] == ' ':\n+ delim = delim + 1\n+\n # Display current value if no value is given.\n- value = trigger.group(4)\n- if not value:\n+ if delim == -1 or delim == len(trigger.group(2)):\n if not static_sec and bot.config.parser.has_option(section, option):\n bot.reply(\"Option %s.%s does not exist.\" % (section_name, option))\n return\n@@ -207,6 +211,7 @@\n return\n \n # Otherwise, set the value to one given as argument 2.\n+ value = trigger.group(2)[delim:]\n if static_sec:\n descriptor = getattr(section.__class__, option)\n try:\n", "issue": "admin: .set command can't handle spaces\nI think I once discovered this a long, long time ago and forgot to document it. I vaguely remember hitting this problem way back when I first started using Sopel (at the time, Willie)\u2026 But here we are. I hit it again, and this time it's getting an issue so it can be fixed!\r\n\r\nWhen PMing Sopel `.set section.key spam eggs sausage and spam`, the value in `section.key` of the config will become simply `spam`. Because `admin.py` uses `trigger.group(4)` as the value, it ignores anything after the first whitespace.\r\n\r\nThe `.set` command probably isn't used a whole lot, but it should still work as expected. As-is, it's useless for updating things like `core.default_time_format`, which almost _always_ contain spaces.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nadmin.py - Sopel Admin Module\nCopyright 2010-2011, Sean B. Palmer (inamidst.com) and Michael Yanovich\n(yanovich.net)\nCopyright \u00a9 2012, Elad Alfassa, <[email protected]>\nCopyright 2013, Ari Koivula <[email protected]>\n\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, FilenameAttribute\n)\nimport sopel.module\n\n\nclass AdminSection(StaticSection):\n hold_ground = ValidatedAttribute('hold_ground', bool, default=False)\n \"\"\"Auto re-join on kick\"\"\"\n auto_accept_invite = ValidatedAttribute('auto_accept_invite', bool,\n default=True)\n\n\ndef configure(config):\n config.define_section('admin', AdminSection)\n config.admin.configure_setting('hold_ground',\n \"Automatically re-join after being kicked?\")\n config.admin.configure_setting('auto_accept_invite',\n 'Automatically join channels when invited?')\n\n\ndef setup(bot):\n bot.config.define_section('admin', AdminSection)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('join')\[email protected]('low')\[email protected]('.join #example or .join #example key')\ndef join(bot, trigger):\n \"\"\"Join the specified channel. This is an admin-only command.\"\"\"\n channel, key = trigger.group(3), trigger.group(4)\n if not channel:\n return\n elif not key:\n bot.join(channel)\n else:\n bot.join(channel, key)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('part')\[email protected]('low')\[email protected]('.part #example')\ndef part(bot, trigger):\n \"\"\"Part the specified channel. This is an admin-only command.\"\"\"\n channel, _sep, part_msg = trigger.group(2).partition(' ')\n if part_msg:\n bot.part(channel, part_msg)\n else:\n bot.part(channel)\n\n\[email protected]_privmsg\[email protected]_owner\[email protected]('quit')\[email protected]('low')\ndef quit(bot, trigger):\n \"\"\"Quit from the server. This is an owner-only command.\"\"\"\n quit_message = trigger.group(2)\n if not quit_message:\n quit_message = 'Quitting on command from %s' % trigger.nick\n\n bot.quit(quit_message)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('msg')\[email protected]('low')\[email protected]('.msg #YourPants Does anyone else smell neurotoxin?')\ndef msg(bot, trigger):\n \"\"\"\n Send a message to a given channel or nick. Can only be done in privmsg by an\n admin.\n \"\"\"\n if trigger.group(2) is None:\n return\n\n channel, _sep, message = trigger.group(2).partition(' ')\n message = message.strip()\n if not channel or not message:\n return\n\n bot.msg(channel, message)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('me')\[email protected]('low')\ndef me(bot, trigger):\n \"\"\"\n Send an ACTION (/me) to a given channel or nick. Can only be done in privmsg\n by an admin.\n \"\"\"\n if trigger.group(2) is None:\n return\n\n channel, _sep, action = trigger.group(2).partition(' ')\n action = action.strip()\n if not channel or not action:\n return\n\n msg = '\\x01ACTION %s\\x01' % action\n bot.msg(channel, msg)\n\n\[email protected]('INVITE')\[email protected]('.*')\[email protected]('low')\ndef invite_join(bot, trigger):\n \"\"\"\n Join a channel sopel is invited to, if the inviter is an admin.\n \"\"\"\n if trigger.admin or bot.config.admin.auto_accept_invite:\n bot.join(trigger.args[1])\n return\n\n\[email protected]('KICK')\[email protected](r'.*')\[email protected]('low')\ndef hold_ground(bot, trigger):\n \"\"\"\n This function monitors all kicks across all channels sopel is in. If it\n detects that it is the one kicked it'll automatically join that channel.\n\n WARNING: This may not be needed and could cause problems if sopel becomes\n annoying. Please use this with caution.\n \"\"\"\n if bot.config.admin.hold_ground:\n channel = trigger.sender\n if trigger.args[1] == bot.nick:\n bot.join(channel)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('mode')\[email protected]('low')\ndef mode(bot, trigger):\n \"\"\"Set a user mode on Sopel. Can only be done in privmsg by an admin.\"\"\"\n mode = trigger.group(3)\n bot.write(('MODE ', bot.nick + ' ' + mode))\n\n\[email protected]_privmsg(\"This command only works as a private message.\")\[email protected]_admin(\"This command requires admin privileges.\")\[email protected]('set')\[email protected]('.set core.owner Me')\ndef set_config(bot, trigger):\n \"\"\"See and modify values of sopels config object.\n\n Trigger args:\n arg1 - section and option, in the form \"section.option\"\n arg2 - value\n\n If there is no section, section will default to \"core\".\n If value is None, the option will be deleted.\n \"\"\"\n # Get section and option from first argument.\n arg1 = trigger.group(3).split('.')\n if len(arg1) == 1:\n section_name, option = \"core\", arg1[0]\n elif len(arg1) == 2:\n section_name, option = arg1\n else:\n bot.reply(\"Usage: .set section.option value\")\n return\n section = getattr(bot.config, section_name)\n static_sec = isinstance(section, StaticSection)\n\n if static_sec and not hasattr(section, option):\n bot.say('[{}] section has no option {}.'.format(section_name, option))\n return\n\n # Display current value if no value is given.\n value = trigger.group(4)\n if not value:\n if not static_sec and bot.config.parser.has_option(section, option):\n bot.reply(\"Option %s.%s does not exist.\" % (section_name, option))\n return\n # Except if the option looks like a password. Censor those to stop them\n # from being put on log files.\n if option.endswith(\"password\") or option.endswith(\"pass\"):\n value = \"(password censored)\"\n else:\n value = getattr(section, option)\n bot.reply(\"%s.%s = %s\" % (section_name, option, value))\n return\n\n # Otherwise, set the value to one given as argument 2.\n if static_sec:\n descriptor = getattr(section.__class__, option)\n try:\n if isinstance(descriptor, FilenameAttribute):\n value = descriptor.parse(bot.config, descriptor, value)\n else:\n value = descriptor.parse(value)\n except ValueError as exc:\n bot.say(\"Can't set attribute: \" + str(exc))\n return\n setattr(section, option, value)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('save')\[email protected]('.save')\ndef save_config(bot, trigger):\n \"\"\"Save state of sopels config object to the configuration file.\"\"\"\n bot.config.save()\n", "path": "sopel/modules/admin.py"}]} | 3,055 | 277 |
gh_patches_debug_15693 | rasdani/github-patches | git_diff | pypa__pip-2396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip.utils.filesystem.check_path_owner should treat group membership properly
I have two users who share same .pip/cache directory: `a` and `b`. Both users are members of the group `pip`. There is third user, `pip` who is also member of group `pip` and that user handles all download and caching for users a and b. `.pip/cache` directory configured to have group writeable permission and sticky bit, to make sure all newly created files and folders are owned by group `pip`.
Function [check_path_owner](https://github.com/pypa/pip/blob/6.0.6/pip/utils/filesystem.py#L7) only checks if owner is same user, but it is somewhat wrong. You should check group membership as well.
Another option, is to create new command-line option, which will allow to ignore this check.
Either solution will work.
</issue>
<code>
[start of pip/utils/filesystem.py]
1 import os
2 import os.path
3
4 from pip.compat import get_path_uid
5
6
7 def check_path_owner(path):
8 # If we don't have a way to check the effective uid of this process, then
9 # we'll just assume that we own the directory.
10 if not hasattr(os, "geteuid"):
11 return True
12
13 previous = None
14 while path != previous:
15 if os.path.lexists(path):
16 # Actually do the ownership check
17 try:
18 if get_path_uid(path) != os.geteuid():
19 return False
20 except OSError:
21 return False
22 return True
23 else:
24 previous, path = path, os.path.dirname(path)
25
[end of pip/utils/filesystem.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py
--- a/pip/utils/filesystem.py
+++ b/pip/utils/filesystem.py
@@ -13,12 +13,16 @@
previous = None
while path != previous:
if os.path.lexists(path):
- # Actually do the ownership check
- try:
- if get_path_uid(path) != os.geteuid():
+ # Check if path is writable by current user.
+ if os.geteuid() == 0:
+ # Special handling for root user in order to handle properly
+ # cases where users use sudo without -H flag.
+ try:
+ path_uid = get_path_uid(path)
+ except OSError:
return False
- except OSError:
- return False
- return True
+ return path_uid == 0
+ else:
+ return os.access(path, os.W_OK)
else:
previous, path = path, os.path.dirname(path)
| {"golden_diff": "diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py\n--- a/pip/utils/filesystem.py\n+++ b/pip/utils/filesystem.py\n@@ -13,12 +13,16 @@\n previous = None\n while path != previous:\n if os.path.lexists(path):\n- # Actually do the ownership check\n- try:\n- if get_path_uid(path) != os.geteuid():\n+ # Check if path is writable by current user.\n+ if os.geteuid() == 0:\n+ # Special handling for root user in order to handle properly\n+ # cases where users use sudo without -H flag.\n+ try:\n+ path_uid = get_path_uid(path)\n+ except OSError:\n return False\n- except OSError:\n- return False\n- return True\n+ return path_uid == 0\n+ else:\n+ return os.access(path, os.W_OK)\n else:\n previous, path = path, os.path.dirname(path)\n", "issue": "pip.utils.filesystem.check_path_owner should treat group membership properly\nI have two users who share same .pip/cache directory: `a` and `b`. Both users are members of the group `pip`. There is third user, `pip` who is also member of group `pip` and that user handles all download and caching for users a and b. `.pip/cache` directory configured to have group writeable permission and sticky bit, to make sure all newly created files and folders are owned by group `pip`. \n\nFunction [check_path_owner](https://github.com/pypa/pip/blob/6.0.6/pip/utils/filesystem.py#L7) only checks if owner is same user, but it is somewhat wrong. You should check group membership as well. \n\nAnother option, is to create new command-line option, which will allow to ignore this check. \n\nEither solution will work.\n\n", "before_files": [{"content": "import os\nimport os.path\n\nfrom pip.compat import get_path_uid\n\n\ndef check_path_owner(path):\n # If we don't have a way to check the effective uid of this process, then\n # we'll just assume that we own the directory.\n if not hasattr(os, \"geteuid\"):\n return True\n\n previous = None\n while path != previous:\n if os.path.lexists(path):\n # Actually do the ownership check\n try:\n if get_path_uid(path) != os.geteuid():\n return False\n except OSError:\n return False\n return True\n else:\n previous, path = path, os.path.dirname(path)\n", "path": "pip/utils/filesystem.py"}]} | 908 | 224 |
gh_patches_debug_5522 | rasdani/github-patches | git_diff | conan-io__conan-5102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Conan Tools does not export detected_os
To help us debug your issue please explain:
When I was updating cmake_installer, I just found that `detected_os` is not exported by conans.tools
Conan Version 1.15.0
```python
from conans import tools
if __name__ == "__main__":
tools.detected_os()
```
**ERROR**:
```
Traceback (most recent call last):
File "conanfile.py", line 4, in <module>
tools.detected_os()
AttributeError: module 'conans.tools' has no attribute 'detected_os'
```
- [X] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
- [X] I've specified the Conan version, operating system version and any tool that can be relevant.
- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
</issue>
<code>
[start of conans/tools.py]
1 """
2 Conan tools: classes and function in this module are intended to be used out of the box
3 with the Conan configuration already currified into them. This configuration refers
4 mainly to two items:
5 - requester: on network calls, this will include proxy definition.
6 - output: the output configuration
7
8 Here in this module there should be no logic, all functions and classes must be implemented
9 elsewhere (mainly in conans.util or conans.client.tools) and ready to be used without
10 the currification.
11 """
12
13 import requests
14
15 from conans.client.output import ConanOutput
16 # Tools from conans.client.tools
17 from conans.client.tools import files as tools_files, net as tools_net, oss as tools_oss, \
18 system_pm as tools_system_pm, win as tools_win
19 from conans.client.tools.env import * # pylint: disable=unused-import
20 from conans.client.tools.pkg_config import * # pylint: disable=unused-import
21 from conans.client.tools.scm import * # pylint: disable=unused-import
22 from conans.client.tools.apple import *
23 # Tools form conans.util
24 from conans.util.env_reader import get_env
25 from conans.util.files import _generic_algorithm_sum, load, md5, md5sum, mkdir, relative_dirs, \
26 rmdir, save as files_save, save_append, sha1sum, sha256sum, touch, sha1sum, sha256sum, \
27 to_file_bytes, touch
28 from conans.util.log import logger
29 from conans.client.tools.version import Version
30
31
32 # This global variables are intended to store the configuration of the running Conan application
33 _global_output = None
34 _global_requester = None
35
36
37 def set_global_instances(the_output, the_requester):
38 global _global_output
39 global _global_requester
40
41 old_output, old_requester = _global_output, _global_requester
42
43 # TODO: pass here the configuration file, and make the work here (explicit!)
44 _global_output = the_output
45 _global_requester = the_requester
46
47 return old_output, old_requester
48
49
50 def get_global_instances():
51 return _global_output, _global_requester
52
53
54 # Assign a default, will be overwritten in the factory of the ConanAPI
55 set_global_instances(the_output=ConanOutput(sys.stdout, True), the_requester=requests)
56
57
58 """
59 From here onwards only currification is expected, no logic
60 """
61
62
63 def save(path, content, append=False):
64 # TODO: All this three functions: save, save_append and this one should be merged into one.
65 if append:
66 save_append(path=path, content=content)
67 else:
68 files_save(path=path, content=content, only_if_modified=False)
69
70
71 # From conans.client.tools.net
72 ftp_download = tools_net.ftp_download
73
74
75 def download(*args, **kwargs):
76 return tools_net.download(out=_global_output, requester=_global_requester, *args, **kwargs)
77
78
79 def get(*args, **kwargs):
80 return tools_net.get(output=_global_output, requester=_global_requester, *args, **kwargs)
81
82
83 # from conans.client.tools.files
84 chdir = tools_files.chdir
85 human_size = tools_files.human_size
86 untargz = tools_files.untargz
87 check_with_algorithm_sum = tools_files.check_with_algorithm_sum
88 check_sha1 = tools_files.check_sha1
89 check_md5 = tools_files.check_md5
90 check_sha256 = tools_files.check_sha256
91 patch = tools_files.patch
92 replace_prefix_in_pc_file = tools_files.replace_prefix_in_pc_file
93 collect_libs = tools_files.collect_libs
94 which = tools_files.which
95 unix2dos = tools_files.unix2dos
96 dos2unix = tools_files.dos2unix
97
98
99 def unzip(*args, **kwargs):
100 return tools_files.unzip(output=_global_output, *args, **kwargs)
101
102
103 def replace_in_file(*args, **kwargs):
104 return tools_files.replace_in_file(output=_global_output, *args, **kwargs)
105
106
107 def replace_path_in_file(*args, **kwargs):
108 return tools_files.replace_path_in_file(output=_global_output, *args, **kwargs)
109
110
111 # from conans.client.tools.oss
112 args_to_string = tools_oss.args_to_string
113 detected_architecture = tools_oss.detected_architecture
114 OSInfo = tools_oss.OSInfo
115 cross_building = tools_oss.cross_building
116 get_cross_building_settings = tools_oss.get_cross_building_settings
117 get_gnu_triplet = tools_oss.get_gnu_triplet
118
119
120 def cpu_count(*args, **kwargs):
121 return tools_oss.cpu_count(output=_global_output, *args, **kwargs)
122
123
124 # from conans.client.tools.system_pm
125 class SystemPackageTool(tools_system_pm.SystemPackageTool):
126 def __init__(self, *args, **kwargs):
127 super(SystemPackageTool, self).__init__(output=_global_output, *args, **kwargs)
128
129
130 class NullTool(tools_system_pm.NullTool):
131 def __init__(self, *args, **kwargs):
132 super(NullTool, self).__init__(output=_global_output, *args, **kwargs)
133
134
135 class AptTool(tools_system_pm.AptTool):
136 def __init__(self, *args, **kwargs):
137 super(AptTool, self).__init__(output=_global_output, *args, **kwargs)
138
139
140 class YumTool(tools_system_pm.YumTool):
141 def __init__(self, *args, **kwargs):
142 super(YumTool, self).__init__(output=_global_output, *args, **kwargs)
143
144
145 class BrewTool(tools_system_pm.BrewTool):
146 def __init__(self, *args, **kwargs):
147 super(BrewTool, self).__init__(output=_global_output, *args, **kwargs)
148
149
150 class PkgTool(tools_system_pm.PkgTool):
151 def __init__(self, *args, **kwargs):
152 super(PkgTool, self).__init__(output=_global_output, *args, **kwargs)
153
154
155 class ChocolateyTool(tools_system_pm.ChocolateyTool):
156 def __init__(self, *args, **kwargs):
157 super(ChocolateyTool, self).__init__(output=_global_output, *args, **kwargs)
158
159
160 class PkgUtilTool(tools_system_pm.PkgUtilTool):
161 def __init__(self, *args, **kwargs):
162 super(PkgUtilTool, self).__init__(output=_global_output, *args, **kwargs)
163
164
165 class PacManTool(tools_system_pm.PacManTool):
166 def __init__(self, *args, **kwargs):
167 super(PacManTool, self).__init__(output=_global_output, *args, **kwargs)
168
169
170 class ZypperTool(tools_system_pm.ZypperTool):
171 def __init__(self, *args, **kwargs):
172 super(ZypperTool, self).__init__(output=_global_output, *args, **kwargs)
173
174
175 # from conans.client.tools.win
176 vs_installation_path = tools_win.vs_installation_path
177 vswhere = tools_win.vswhere
178 vs_comntools = tools_win.vs_comntools
179 find_windows_10_sdk = tools_win.find_windows_10_sdk
180 escape_windows_cmd = tools_win.escape_windows_cmd
181 get_cased_path = tools_win.get_cased_path
182 MSYS2 = tools_win.MSYS2
183 MSYS = tools_win.MSYS
184 CYGWIN = tools_win.CYGWIN
185 WSL = tools_win.WSL
186 SFU = tools_win.SFU
187 unix_path = tools_win.unix_path
188 run_in_windows_bash = tools_win.run_in_windows_bash
189
190
191 @contextmanager
192 def vcvars(*args, **kwargs):
193 with tools_win.vcvars(output=_global_output, *args, **kwargs):
194 yield
195
196
197 def msvc_build_command(*args, **kwargs):
198 return tools_win.msvc_build_command(output=_global_output, *args, **kwargs)
199
200
201 def build_sln_command(*args, **kwargs):
202 return tools_win.build_sln_command(output=_global_output, *args, **kwargs)
203
204
205 def vcvars_command(*args, **kwargs):
206 return tools_win.vcvars_command(output=_global_output, *args, **kwargs)
207
208
209 def vcvars_dict(*args, **kwargs):
210 return tools_win.vcvars_dict(output=_global_output, *args, **kwargs)
211
212
213 def latest_vs_version_installed(*args, **kwargs):
214 return tools_win.latest_vs_version_installed(output=_global_output, *args, **kwargs)
215
216
217 # Ready to use objects.
218 try:
219 os_info = OSInfo()
220 except Exception as exc:
221 logger.error(exc)
222 _global_output.error("Error detecting os_info")
223
[end of conans/tools.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/tools.py b/conans/tools.py
--- a/conans/tools.py
+++ b/conans/tools.py
@@ -111,6 +111,7 @@
# from conans.client.tools.oss
args_to_string = tools_oss.args_to_string
detected_architecture = tools_oss.detected_architecture
+detected_os = tools_oss.detected_os
OSInfo = tools_oss.OSInfo
cross_building = tools_oss.cross_building
get_cross_building_settings = tools_oss.get_cross_building_settings
| {"golden_diff": "diff --git a/conans/tools.py b/conans/tools.py\n--- a/conans/tools.py\n+++ b/conans/tools.py\n@@ -111,6 +111,7 @@\n # from conans.client.tools.oss\n args_to_string = tools_oss.args_to_string\n detected_architecture = tools_oss.detected_architecture\n+detected_os = tools_oss.detected_os\n OSInfo = tools_oss.OSInfo\n cross_building = tools_oss.cross_building\n get_cross_building_settings = tools_oss.get_cross_building_settings\n", "issue": "[BUG] Conan Tools does not export detected_os\nTo help us debug your issue please explain:\r\n\r\nWhen I was updating cmake_installer, I just found that `detected_os` is not exported by conans.tools\r\n\r\nConan Version 1.15.0\r\n\r\n```python\r\nfrom conans import tools\r\n\r\nif __name__ == \"__main__\":\r\n\ttools.detected_os()\r\n```\r\n\r\n**ERROR**: \r\n```\r\nTraceback (most recent call last):\r\n File \"conanfile.py\", line 4, in <module>\r\n tools.detected_os()\r\nAttributeError: module 'conans.tools' has no attribute 'detected_os'\r\n```\r\n\r\n- [X] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n- [X] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Conan tools: classes and function in this module are intended to be used out of the box\n with the Conan configuration already currified into them. This configuration refers\n mainly to two items:\n - requester: on network calls, this will include proxy definition.\n - output: the output configuration\n\n Here in this module there should be no logic, all functions and classes must be implemented\n elsewhere (mainly in conans.util or conans.client.tools) and ready to be used without\n the currification.\n\"\"\"\n\nimport requests\n\nfrom conans.client.output import ConanOutput\n# Tools from conans.client.tools\nfrom conans.client.tools import files as tools_files, net as tools_net, oss as tools_oss, \\\n system_pm as tools_system_pm, win as tools_win\nfrom conans.client.tools.env import * # pylint: disable=unused-import\nfrom conans.client.tools.pkg_config import * # pylint: disable=unused-import\nfrom conans.client.tools.scm import * # pylint: disable=unused-import\nfrom conans.client.tools.apple import *\n# Tools form conans.util\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import _generic_algorithm_sum, load, md5, md5sum, mkdir, relative_dirs, \\\n rmdir, save as files_save, save_append, sha1sum, sha256sum, touch, sha1sum, sha256sum, \\\n to_file_bytes, touch\nfrom conans.util.log import logger\nfrom conans.client.tools.version import Version\n\n\n# This global variables are intended to store the configuration of the running Conan application\n_global_output = None\n_global_requester = None\n\n\ndef set_global_instances(the_output, the_requester):\n global _global_output\n global _global_requester\n\n old_output, old_requester = _global_output, _global_requester\n\n # TODO: pass here the configuration file, and make the work here (explicit!)\n _global_output = the_output\n _global_requester = the_requester\n\n return old_output, old_requester\n\n\ndef get_global_instances():\n return _global_output, _global_requester\n\n\n# Assign a default, will be overwritten in the factory of the ConanAPI\nset_global_instances(the_output=ConanOutput(sys.stdout, True), the_requester=requests)\n\n\n\"\"\"\nFrom here onwards only currification is expected, no logic\n\"\"\"\n\n\ndef save(path, content, append=False):\n # TODO: All this three functions: save, save_append and this one should be merged into one.\n if append:\n save_append(path=path, content=content)\n else:\n files_save(path=path, content=content, only_if_modified=False)\n\n\n# From conans.client.tools.net\nftp_download = tools_net.ftp_download\n\n\ndef download(*args, **kwargs):\n return tools_net.download(out=_global_output, requester=_global_requester, *args, **kwargs)\n\n\ndef get(*args, **kwargs):\n return tools_net.get(output=_global_output, requester=_global_requester, *args, **kwargs)\n\n\n# from conans.client.tools.files\nchdir = tools_files.chdir\nhuman_size = tools_files.human_size\nuntargz = tools_files.untargz\ncheck_with_algorithm_sum = tools_files.check_with_algorithm_sum\ncheck_sha1 = tools_files.check_sha1\ncheck_md5 = tools_files.check_md5\ncheck_sha256 = tools_files.check_sha256\npatch = tools_files.patch\nreplace_prefix_in_pc_file = tools_files.replace_prefix_in_pc_file\ncollect_libs = tools_files.collect_libs\nwhich = tools_files.which\nunix2dos = tools_files.unix2dos\ndos2unix = tools_files.dos2unix\n\n\ndef unzip(*args, **kwargs):\n return tools_files.unzip(output=_global_output, *args, **kwargs)\n\n\ndef replace_in_file(*args, **kwargs):\n return tools_files.replace_in_file(output=_global_output, *args, **kwargs)\n\n\ndef replace_path_in_file(*args, **kwargs):\n return tools_files.replace_path_in_file(output=_global_output, *args, **kwargs)\n\n\n# from conans.client.tools.oss\nargs_to_string = tools_oss.args_to_string\ndetected_architecture = tools_oss.detected_architecture\nOSInfo = tools_oss.OSInfo\ncross_building = tools_oss.cross_building\nget_cross_building_settings = tools_oss.get_cross_building_settings\nget_gnu_triplet = tools_oss.get_gnu_triplet\n\n\ndef cpu_count(*args, **kwargs):\n return tools_oss.cpu_count(output=_global_output, *args, **kwargs)\n\n\n# from conans.client.tools.system_pm\nclass SystemPackageTool(tools_system_pm.SystemPackageTool):\n def __init__(self, *args, **kwargs):\n super(SystemPackageTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass NullTool(tools_system_pm.NullTool):\n def __init__(self, *args, **kwargs):\n super(NullTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass AptTool(tools_system_pm.AptTool):\n def __init__(self, *args, **kwargs):\n super(AptTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass YumTool(tools_system_pm.YumTool):\n def __init__(self, *args, **kwargs):\n super(YumTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass BrewTool(tools_system_pm.BrewTool):\n def __init__(self, *args, **kwargs):\n super(BrewTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass PkgTool(tools_system_pm.PkgTool):\n def __init__(self, *args, **kwargs):\n super(PkgTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass ChocolateyTool(tools_system_pm.ChocolateyTool):\n def __init__(self, *args, **kwargs):\n super(ChocolateyTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass PkgUtilTool(tools_system_pm.PkgUtilTool):\n def __init__(self, *args, **kwargs):\n super(PkgUtilTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass PacManTool(tools_system_pm.PacManTool):\n def __init__(self, *args, **kwargs):\n super(PacManTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\nclass ZypperTool(tools_system_pm.ZypperTool):\n def __init__(self, *args, **kwargs):\n super(ZypperTool, self).__init__(output=_global_output, *args, **kwargs)\n\n\n# from conans.client.tools.win\nvs_installation_path = tools_win.vs_installation_path\nvswhere = tools_win.vswhere\nvs_comntools = tools_win.vs_comntools\nfind_windows_10_sdk = tools_win.find_windows_10_sdk\nescape_windows_cmd = tools_win.escape_windows_cmd\nget_cased_path = tools_win.get_cased_path\nMSYS2 = tools_win.MSYS2\nMSYS = tools_win.MSYS\nCYGWIN = tools_win.CYGWIN\nWSL = tools_win.WSL\nSFU = tools_win.SFU\nunix_path = tools_win.unix_path\nrun_in_windows_bash = tools_win.run_in_windows_bash\n\n\n@contextmanager\ndef vcvars(*args, **kwargs):\n with tools_win.vcvars(output=_global_output, *args, **kwargs):\n yield\n\n\ndef msvc_build_command(*args, **kwargs):\n return tools_win.msvc_build_command(output=_global_output, *args, **kwargs)\n\n\ndef build_sln_command(*args, **kwargs):\n return tools_win.build_sln_command(output=_global_output, *args, **kwargs)\n\n\ndef vcvars_command(*args, **kwargs):\n return tools_win.vcvars_command(output=_global_output, *args, **kwargs)\n\n\ndef vcvars_dict(*args, **kwargs):\n return tools_win.vcvars_dict(output=_global_output, *args, **kwargs)\n\n\ndef latest_vs_version_installed(*args, **kwargs):\n return tools_win.latest_vs_version_installed(output=_global_output, *args, **kwargs)\n\n\n# Ready to use objects.\ntry:\n os_info = OSInfo()\nexcept Exception as exc:\n logger.error(exc)\n _global_output.error(\"Error detecting os_info\")\n", "path": "conans/tools.py"}]} | 3,202 | 117 |
gh_patches_debug_1046 | rasdani/github-patches | git_diff | enthought__chaco-424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Demo quiver.py not working
**Problem Description**
Zooming in will ends with the following and blank plot.
**Reproduction Steps:**
Run the file and zoom in until the plot breaks.
**Expected behavior:**
Plot disappear if keep zooming in and ends with following trace.
```
Traceback (most recent call last):
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py", line 202, in paintEvent
self.handler.paintEvent(event)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py", line 54, in paintEvent
self._enable_window._paint(event)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/abstract_window.py", line 468, in _paint
self.component.draw(gc, view_bounds=(0, 0, size[0], size[1]))
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py", line 427, in draw
self._draw(gc, view_bounds, mode)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py", line 779, in _draw
self._dispatch_draw(layer, gc, view_bounds, mode)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py", line 272, in _dispatch_draw
component._dispatch_draw(layer, gc, new_bounds, mode)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py", line 272, in _dispatch_draw
component._dispatch_draw(layer, gc, new_bounds, mode)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py", line 799, in _dispatch_draw
handler(gc, view_bounds, mode)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py", line 466, in _draw_plot
self._draw_component(gc, view_bounds, mode)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py", line 474, in _draw_component
self._render(gc, pts)
File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/quiverplot.py", line 80, in _render
ends = points + self._cached_vector_data
ValueError: operands could not be broadcast together with shapes (0,) (0,2)
```
**OS, Python version:**
OSX, Python 2.7
splits from #385
</issue>
<code>
[start of chaco/quiverplot.py]
1
2 from __future__ import with_statement
3
4 from numpy import array, compress, matrix, newaxis, sqrt, zeros
5
6 # Enthought library imports
7 from enable.api import ColorTrait
8 from traits.api import Array, Enum, Float, Instance, Int
9
10 # Chaco relative imports
11 from .abstract_data_source import AbstractDataSource
12 from .scatterplot import ScatterPlot
13
14 class QuiverPlot(ScatterPlot):
15
16 # Determines how to interpret the data in the **vectors** data source.
17 # "vector": each tuple is a (dx, dy)
18 # "radial": each tuple is an (r, theta)
19 data_type = Enum("vector", "radial") # TODO: implement "radial"
20
21 # A datasource that returns an Nx2 array array indicating directions
22 # of the vectors. The interpretation of this array is dependent on
23 # the setting of the **data_type** attribute.
24 #
25 # Usually this will be a MultiArrayDataSource.
26 vectors = Instance(AbstractDataSource)
27
28 #------------------------------------------------------------------------
29 # Visual attributes of the vector
30 #------------------------------------------------------------------------
31
32 # The color of the lines
33 line_color = ColorTrait("black")
34
35 # The width of the lines
36 line_width = Float(1.0)
37
38 # The length, in pixels, of the arrowhead
39 arrow_size = Int(5)
40
41 #------------------------------------------------------------------------
42 # Private traits
43 #------------------------------------------------------------------------
44
45 _cached_vector_data = Array
46 _selected_vector_data = Array
47
48 def _gather_points_old(self):
49 # In addition to the standard scatterplot _gather_points, we need
50 # to also grab the vectors that fall inside the view range
51 super(QuiverPlot, self)._gather_points_old()
52
53 if not self.index or not self.value:
54 return
55
56 if len(self._cached_point_mask) == 0:
57 self._cached_vector_data = []
58 return
59
60 vectors = self.vectors.get_data()
61 self._cached_vector_data = compress(self._cached_point_mask, vectors, axis=0)
62
63 if self._cached_selected_pts is not None:
64 indices = self._cached_selection_point_mask
65 self._selected_vector_data = compress(indices, vectors, axis=0)
66 else:
67 self._selected_vector_data = None
68 return
69
70
71 def _render(self, gc, points, icon_mode=False):
72 with gc:
73 gc.clip_to_rect(self.x, self.y, self.width, self.height)
74
75 gc.set_stroke_color(self.line_color_)
76 gc.set_line_width(self.line_width)
77
78 # Draw the body of the arrow
79 starts = points
80 ends = points + self._cached_vector_data
81 gc.begin_path()
82 gc.line_set(starts, ends)
83 gc.stroke_path()
84
85 if self.arrow_size > 0:
86 vec = self._cached_vector_data
87 unit_vec = vec / sqrt(vec[:,0] ** 2 + vec[:,1] ** 2)[:, newaxis]
88 a = 0.707106781 # sqrt(2)/2
89
90 # Draw the left arrowhead (for an arrow pointing straight up)
91 arrow_ends = ends - array(unit_vec * matrix([[a, a], [-a, a]])) * self.arrow_size
92 gc.begin_path()
93 gc.line_set(ends, arrow_ends)
94 gc.stroke_path()
95
96 # Draw the left arrowhead (for an arrow pointing straight up)
97 arrow_ends = ends - array(unit_vec * matrix([[a, -a], [a, a]])) * self.arrow_size
98 gc.begin_path()
99 gc.line_set(ends, arrow_ends)
100 gc.stroke_path()
101
[end of chaco/quiverplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chaco/quiverplot.py b/chaco/quiverplot.py
--- a/chaco/quiverplot.py
+++ b/chaco/quiverplot.py
@@ -69,6 +69,9 @@
def _render(self, gc, points, icon_mode=False):
+ if len(points) < 1:
+ return
+
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
| {"golden_diff": "diff --git a/chaco/quiverplot.py b/chaco/quiverplot.py\n--- a/chaco/quiverplot.py\n+++ b/chaco/quiverplot.py\n@@ -69,6 +69,9 @@\n \n \n def _render(self, gc, points, icon_mode=False):\n+ if len(points) < 1:\n+ return\n+\n with gc:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n", "issue": "Demo quiver.py not working\n**Problem Description**\r\nZooming in will ends with the following and blank plot.\r\n\r\n**Reproduction Steps:**\r\n\r\nRun the file and zoom in until the plot breaks.\r\n\r\n**Expected behavior:**\r\n\r\nPlot disappear if keep zooming in and ends with following trace.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py\", line 202, in paintEvent\r\n self.handler.paintEvent(event)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py\", line 54, in paintEvent\r\n self._enable_window._paint(event)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/abstract_window.py\", line 468, in _paint\r\n self.component.draw(gc, view_bounds=(0, 0, size[0], size[1]))\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py\", line 427, in draw\r\n self._draw(gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py\", line 779, in _draw\r\n self._dispatch_draw(layer, gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py\", line 272, in _dispatch_draw\r\n component._dispatch_draw(layer, gc, new_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py\", line 272, in _dispatch_draw\r\n component._dispatch_draw(layer, gc, new_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py\", line 799, in _dispatch_draw\r\n handler(gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py\", line 466, in _draw_plot\r\n self._draw_component(gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py\", line 474, in _draw_component\r\n self._render(gc, pts)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/quiverplot.py\", line 80, in _render\r\n ends = points + self._cached_vector_data\r\nValueError: operands could not be broadcast together with shapes (0,) (0,2)\r\n```\r\n\r\n**OS, Python version:** \r\nOSX, Python 2.7\r\n\r\nsplits from #385 \n", "before_files": [{"content": "\nfrom __future__ import with_statement\n\nfrom numpy import array, compress, matrix, newaxis, sqrt, zeros\n\n# Enthought library imports\nfrom enable.api import ColorTrait\nfrom traits.api import Array, Enum, Float, Instance, Int\n\n# Chaco relative imports\nfrom .abstract_data_source import AbstractDataSource\nfrom .scatterplot import ScatterPlot\n\nclass QuiverPlot(ScatterPlot):\n\n # Determines how to interpret the data in the **vectors** data source.\n # \"vector\": each tuple is a (dx, dy)\n # \"radial\": each tuple is an (r, theta)\n data_type = Enum(\"vector\", \"radial\") # TODO: implement \"radial\"\n\n # A datasource that returns an Nx2 array array indicating directions\n # of the vectors. The interpretation of this array is dependent on\n # the setting of the **data_type** attribute.\n #\n # Usually this will be a MultiArrayDataSource.\n vectors = Instance(AbstractDataSource)\n\n #------------------------------------------------------------------------\n # Visual attributes of the vector\n #------------------------------------------------------------------------\n\n # The color of the lines\n line_color = ColorTrait(\"black\")\n\n # The width of the lines\n line_width = Float(1.0)\n\n # The length, in pixels, of the arrowhead\n arrow_size = Int(5)\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n _cached_vector_data = Array\n _selected_vector_data = Array\n\n def _gather_points_old(self):\n # In addition to the standard scatterplot _gather_points, we need\n # to also grab the vectors that fall inside the view range\n super(QuiverPlot, self)._gather_points_old()\n\n if not self.index or not self.value:\n return\n\n if len(self._cached_point_mask) == 0:\n self._cached_vector_data = []\n return\n\n vectors = self.vectors.get_data()\n self._cached_vector_data = compress(self._cached_point_mask, vectors, axis=0)\n\n if self._cached_selected_pts is not None:\n indices = self._cached_selection_point_mask\n self._selected_vector_data = compress(indices, vectors, axis=0)\n else:\n self._selected_vector_data = None\n return\n\n\n def _render(self, gc, points, icon_mode=False):\n with gc:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n\n gc.set_stroke_color(self.line_color_)\n gc.set_line_width(self.line_width)\n\n # Draw the body of the arrow\n starts = points\n ends = points + self._cached_vector_data\n gc.begin_path()\n gc.line_set(starts, ends)\n gc.stroke_path()\n\n if self.arrow_size > 0:\n vec = self._cached_vector_data\n unit_vec = vec / sqrt(vec[:,0] ** 2 + vec[:,1] ** 2)[:, newaxis]\n a = 0.707106781 # sqrt(2)/2\n\n # Draw the left arrowhead (for an arrow pointing straight up)\n arrow_ends = ends - array(unit_vec * matrix([[a, a], [-a, a]])) * self.arrow_size\n gc.begin_path()\n gc.line_set(ends, arrow_ends)\n gc.stroke_path()\n\n # Draw the left arrowhead (for an arrow pointing straight up)\n arrow_ends = ends - array(unit_vec * matrix([[a, -a], [a, a]])) * self.arrow_size\n gc.begin_path()\n gc.line_set(ends, arrow_ends)\n gc.stroke_path()\n", "path": "chaco/quiverplot.py"}]} | 2,198 | 99 |
gh_patches_debug_21949 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Capture Celery Queue Name
</issue>
<code>
[start of src/scout_apm/celery.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from celery.signals import task_postrun, task_prerun
5
6 import scout_apm.core
7 from scout_apm.core.tracked_request import TrackedRequest
8
9
10 # TODO: Capture queue.
11 # https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
12 def prerun_callback(sender=None, headers=None, body=None, **kwargs):
13 name = kwargs["task"].name
14
15 tr = TrackedRequest.instance()
16 tr.mark_real_request()
17 span = tr.start_span(operation=("Job/" + name))
18 span.tag("queue", "default")
19
20
21 def postrun_callback(sender=None, headers=None, body=None, **kwargs):
22 tr = TrackedRequest.instance()
23 tr.stop_span()
24
25
26 def install():
27 installed = scout_apm.core.install()
28 if not installed:
29 return
30
31 task_prerun.connect(prerun_callback)
32 task_postrun.connect(postrun_callback)
33
34
35 def uninstall():
36 task_prerun.disconnect(prerun_callback)
37 task_postrun.disconnect(postrun_callback)
38
[end of src/scout_apm/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py
--- a/src/scout_apm/celery.py
+++ b/src/scout_apm/celery.py
@@ -7,20 +7,22 @@
from scout_apm.core.tracked_request import TrackedRequest
-# TODO: Capture queue.
-# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
-def prerun_callback(sender=None, headers=None, body=None, **kwargs):
- name = kwargs["task"].name
+def prerun_callback(task=None, **kwargs):
+ tracked_request = TrackedRequest.instance()
+ tracked_request.mark_real_request()
- tr = TrackedRequest.instance()
- tr.mark_real_request()
- span = tr.start_span(operation=("Job/" + name))
- span.tag("queue", "default")
+ delivery_info = task.request.delivery_info
+ tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
+ tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
+ tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown"))
+ tracked_request.tag("queue", delivery_info.get("queue", "unknown"))
+ tracked_request.start_span(operation=("Job/" + task.name))
-def postrun_callback(sender=None, headers=None, body=None, **kwargs):
- tr = TrackedRequest.instance()
- tr.stop_span()
+
+def postrun_callback(task=None, **kwargs):
+ tracked_request = TrackedRequest.instance()
+ tracked_request.stop_span()
def install():
| {"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -7,20 +7,22 @@\n from scout_apm.core.tracked_request import TrackedRequest\n \n \n-# TODO: Capture queue.\n-# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\n-def prerun_callback(sender=None, headers=None, body=None, **kwargs):\n- name = kwargs[\"task\"].name\n+def prerun_callback(task=None, **kwargs):\n+ tracked_request = TrackedRequest.instance()\n+ tracked_request.mark_real_request()\n \n- tr = TrackedRequest.instance()\n- tr.mark_real_request()\n- span = tr.start_span(operation=(\"Job/\" + name))\n- span.tag(\"queue\", \"default\")\n+ delivery_info = task.request.delivery_info\n+ tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n+ tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n+ tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n+ tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n \n+ tracked_request.start_span(operation=(\"Job/\" + task.name))\n \n-def postrun_callback(sender=None, headers=None, body=None, **kwargs):\n- tr = TrackedRequest.instance()\n- tr.stop_span()\n+\n+def postrun_callback(task=None, **kwargs):\n+ tracked_request = TrackedRequest.instance()\n+ tracked_request.stop_span()\n \n \n def install():\n", "issue": "Capture Celery Queue Name\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom celery.signals import task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\n# TODO: Capture queue.\n# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\ndef prerun_callback(sender=None, headers=None, body=None, **kwargs):\n name = kwargs[\"task\"].name\n\n tr = TrackedRequest.instance()\n tr.mark_real_request()\n span = tr.start_span(operation=(\"Job/\" + name))\n span.tag(\"queue\", \"default\")\n\n\ndef postrun_callback(sender=None, headers=None, body=None, **kwargs):\n tr = TrackedRequest.instance()\n tr.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}]} | 901 | 397 |
gh_patches_debug_24589 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-2024 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update seed script to add broker url in challenge
## Current Behaviour:
Currently, when the challenge is created using the seed script, the `queue` field is not populated.
## Expected Behaviour:
When the seed script is run, the `queue` field must be populated with the random string of 75 characters in length.
</issue>
<code>
[start of scripts/seed.py]
1 # Command to run : python manage.py shell < scripts/seed.py
2 import os
3
4 from datetime import timedelta
5
6 from django.conf import settings
7 from django.contrib.auth.models import User
8 from django.core.files.uploadedfile import SimpleUploadedFile
9 from django.utils import timezone
10
11 from allauth.account.models import EmailAddress
12 from faker import Factory
13
14 from challenges.models import Challenge, ChallengePhase, DatasetSplit, Leaderboard, ChallengePhaseSplit
15 from hosts.models import ChallengeHostTeam, ChallengeHost
16 from participants.models import Participant, ParticipantTeam
17
18 fake = Factory.create()
19
20 NUMBER_OF_CHALLENGES = 1
21 NUMBER_OF_PHASES = 2
22 NUMBER_OF_DATASET_SPLITS = 2
23 DATASET_SPLIT_ITERATOR = 0
24
25 try:
26 xrange # Python 2
27 except NameError:
28 xrange = range # Python 3
29
30
31 def check_database():
32 if len(EmailAddress.objects.all()) > 0:
33 print("Are you sure you want to wipe the existing development database and reseed it? (Y/N)")
34 if settings.TEST or input().lower() == "y":
35 destroy_database()
36 return True
37 else:
38 return False
39 else:
40 return True
41
42
43 def destroy_database():
44 print("Destroying existing database...")
45 print("Destroying Participant objects...")
46 Participant.objects.all().delete()
47 print("Destroying ParticipantTeam objects...")
48 ParticipantTeam.objects.all().delete()
49 print("Destroying ChallengePhaseSplit objects...")
50 ChallengePhaseSplit.objects.all().delete()
51 print("Destroying DatasetSplit objects...")
52 DatasetSplit.objects.all().delete()
53 print("Destroying ChallengePhase objects...")
54 ChallengePhase.objects.all().delete()
55 print("Destroying Leaderboard objects...")
56 Leaderboard.objects.all().delete()
57 print("Destroying Challenge objects...")
58 Challenge.objects.all().delete()
59 print("Destroying ChallengeHostTeam objects...")
60 ChallengeHostTeam.objects.all().delete()
61 print("Destroying ChallengeHost objects...")
62 ChallengeHost.objects.all().delete()
63 print("Destroying User objects...")
64 User.objects.all().delete()
65 print("Destroying EmailAddress objects...")
66 EmailAddress.objects.all().delete()
67 return True
68
69
70 def create_user(is_admin, username=""):
71 """
72 Creates superuser, participant user, host user and returns it.
73 """
74 if is_admin:
75 username = "admin"
76 email = "[email protected]"
77 else:
78 email = "%[email protected]" % (username)
79 user = User.objects.create_user(
80 email=email,
81 username=username,
82 password="password",
83 is_staff=is_admin,
84 is_superuser=is_admin,
85 )
86 EmailAddress.objects.create(user=user, email=email, verified=True, primary=True)
87 print("{} was created with username: {} password: password".format("Super user" if is_admin else "User", username))
88 return user
89
90
91 def create_challenge_host_team(user):
92 """
93 Creates challenge host team and returns it.
94 """
95 team_name = "{} Host Team".format(fake.city())
96 team = ChallengeHostTeam.objects.create(
97 team_name=team_name,
98 created_by=user,
99 )
100 print("Challenge Host Team created with team_name: {} created_by: {}".format(team_name, user.username))
101 ChallengeHost.objects.create(user=user, team_name=team, status=ChallengeHost.SELF, permissions=ChallengeHost.ADMIN)
102 print("Challenge Host created with user: {} team_name: {}".format(user.username, team_name))
103 return team
104
105
106 def create_challenges(number_of_challenges, host_team=None):
107 """
108 Creates past challenge, on-going challenge and upcoming challenge.
109 """
110 for i in xrange(number_of_challenges):
111 if (i % 3 == 0):
112 create_challenge("{} Challenge".format(fake.first_name()),
113 timezone.now() - timedelta(days=100),
114 timezone.now() + timedelta(days=500),
115 host_team
116 )
117 elif (i % 3 == 1):
118 create_challenge("{} Challenge".format(fake.first_name()),
119 timezone.now() - timedelta(days=500),
120 timezone.now() - timedelta(days=100),
121 host_team
122 )
123 elif (i % 3 == 2):
124 create_challenge("{} Challenge".format(fake.first_name()),
125 timezone.now() + timedelta(days=100),
126 timezone.now() + timedelta(days=500),
127 host_team
128 )
129
130
131 def create_challenge(title, start_date, end_date, host_team):
132 """
133 Creates a challenge.
134 """
135 evaluation_script = open(
136 os.path.join(settings.BASE_DIR, 'examples', 'example1', 'sample_evaluation_script.zip'), 'rb')
137 Challenge.objects.create(
138 title=title,
139 short_description=fake.paragraph(),
140 description=fake.paragraph(),
141 terms_and_conditions=fake.paragraph(),
142 submission_guidelines=fake.paragraph(),
143 evaluation_details=fake.paragraph(),
144 evaluation_script=SimpleUploadedFile(evaluation_script.name, evaluation_script.read()),
145 approved_by_admin=True,
146 creator=host_team,
147 published=True,
148 enable_forum=True,
149 anonymous_leaderboard=False,
150 start_date=start_date,
151 end_date=end_date,
152 )
153 print("Challenge created with title: {} creator: {} start_date: {} end_date: {}".format(title,
154 host_team.team_name,
155 start_date, end_date))
156
157
158 def create_challenge_phases(challenge, number_of_phases=1):
159 """
160 Creates challenge phases for the created challenges and returns it.
161 """
162 challenge_phases = []
163 for i in range(number_of_phases):
164 name = "{} Phase".format(fake.first_name())
165 with open(os.path.join(settings.BASE_DIR, 'examples', 'example1', 'test_annotation.txt'), 'rb') as data_file:
166 data = data_file.read()
167 data = data or None
168 challenge_phase = ChallengePhase.objects.create(
169 name=name,
170 description=fake.paragraph(),
171 leaderboard_public=True,
172 is_public=True,
173 start_date=challenge.start_date,
174 end_date=challenge.end_date,
175 challenge=challenge,
176 test_annotation=SimpleUploadedFile(fake.file_name(extension="txt"), data, content_type="text/plain"),
177 codename="{}{}".format("phase", i + 1),
178 )
179 challenge_phases.append(challenge_phase)
180 print("Challenge Phase created with name: {} challenge: {}".format(name, challenge.title))
181 return challenge_phases
182
183
184 def create_leaderboard():
185 """
186 Creates Leaderboard schema and returns it.
187 """
188 schema = {
189 'labels': ['score', ],
190 'default_order_by': 'score',
191 }
192 leaderboard = Leaderboard.objects.create(
193 schema=schema
194 )
195 print("Leaderboard created")
196 return leaderboard
197
198
199 def create_dataset_splits(number_of_splits):
200 """
201 Creates dataset splits and returns it.
202 """
203 dataset_splits = []
204 for i in range(number_of_splits):
205 global DATASET_SPLIT_ITERATOR
206 name = "Split {}".format(DATASET_SPLIT_ITERATOR + 1)
207 codename = "{}{}".format('split', DATASET_SPLIT_ITERATOR + 1)
208 dataset_split = DatasetSplit.objects.create(
209 name=name,
210 codename=codename,
211 )
212 dataset_splits.append(dataset_split)
213 DATASET_SPLIT_ITERATOR += 1
214 print("Dataset Split created with name: {} codename: {}".format(name, codename))
215 return dataset_splits
216
217
218 def create_challenge_phase_splits(challenge_phase, leaderboard, dataset_split):
219 """
220 Creates a challenge phase split.
221 """
222 ChallengePhaseSplit.objects.create(
223 challenge_phase=challenge_phase,
224 leaderboard=leaderboard,
225 dataset_split=dataset_split,
226 visibility=ChallengePhaseSplit.PUBLIC
227 )
228 print("Challenge Phase Split created with challenge_phase: {} dataset_split: {}".format(challenge_phase.name,
229 dataset_split.name))
230
231
232 def create_participant_team(user):
233 """
234 Creates participant team and returns it.
235 """
236 team_name = "{} Participant Team".format(fake.city())
237 team = ParticipantTeam.objects.create(
238 team_name=team_name,
239 created_by=user,
240 )
241 print("Participant Team created with team_name: {} created_by: {}".format(team_name, user.username))
242 Participant.objects.create(user=user, team=team, status="Self")
243 print("Participant created with user: {} team_name: {}".format(user.username, team_name))
244 return team
245
246
247 def run(*args):
248 NUMBER_OF_CHALLENGES = int(args[0])
249 status = check_database()
250 if status is False:
251 print("Seeding aborted.")
252 return 0
253 print("Seeding...")
254 # Create superuser
255 create_user(is_admin=True)
256 # Create host user
257 host_user = create_user(is_admin=False, username="host")
258 # Create challenge host team with challenge host
259 challenge_host_team = create_challenge_host_team(user=host_user)
260 # Create challenge
261 create_challenges(number_of_challenges=NUMBER_OF_CHALLENGES, host_team=challenge_host_team)
262
263 # Fetch all the created challenges
264 challenges = Challenge.objects.all()
265 for challenge in challenges:
266 # Create a leaderboard object for each challenge
267 leaderboard = create_leaderboard()
268 # Create Phases for a challenge
269 challenge_phases = create_challenge_phases(challenge, number_of_phases=NUMBER_OF_PHASES)
270 # Create Dataset Split for each Challenge
271 dataset_splits = create_dataset_splits(number_of_splits=NUMBER_OF_DATASET_SPLITS)
272 # Create Challenge Phase Split for each Phase and Dataset Split
273 for challenge_phase in challenge_phases:
274 for dataset_split in dataset_splits:
275 create_challenge_phase_splits(challenge_phase, leaderboard, dataset_split)
276 participant_user = create_user(is_admin=False, username="participant")
277 create_participant_team(user=participant_user)
278 print('Database successfully seeded.')
279
[end of scripts/seed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/seed.py b/scripts/seed.py
--- a/scripts/seed.py
+++ b/scripts/seed.py
@@ -1,5 +1,7 @@
# Command to run : python manage.py shell < scripts/seed.py
import os
+import random
+import string
from datetime import timedelta
@@ -134,6 +136,7 @@
"""
evaluation_script = open(
os.path.join(settings.BASE_DIR, 'examples', 'example1', 'sample_evaluation_script.zip'), 'rb')
+ queue = ''.join(random.choice(string.ascii_letters) for _ in range(75))
Challenge.objects.create(
title=title,
short_description=fake.paragraph(),
@@ -149,6 +152,7 @@
anonymous_leaderboard=False,
start_date=start_date,
end_date=end_date,
+ queue=queue,
)
print("Challenge created with title: {} creator: {} start_date: {} end_date: {}".format(title,
host_team.team_name,
| {"golden_diff": "diff --git a/scripts/seed.py b/scripts/seed.py\n--- a/scripts/seed.py\n+++ b/scripts/seed.py\n@@ -1,5 +1,7 @@\n # Command to run : python manage.py shell < scripts/seed.py\n import os\n+import random\n+import string\n \n from datetime import timedelta\n \n@@ -134,6 +136,7 @@\n \"\"\"\n evaluation_script = open(\n os.path.join(settings.BASE_DIR, 'examples', 'example1', 'sample_evaluation_script.zip'), 'rb')\n+ queue = ''.join(random.choice(string.ascii_letters) for _ in range(75))\n Challenge.objects.create(\n title=title,\n short_description=fake.paragraph(),\n@@ -149,6 +152,7 @@\n anonymous_leaderboard=False,\n start_date=start_date,\n end_date=end_date,\n+ queue=queue,\n )\n print(\"Challenge created with title: {} creator: {} start_date: {} end_date: {}\".format(title,\n host_team.team_name,\n", "issue": "Update seed script to add broker url in challenge\n## Current Behaviour:\r\n\r\nCurrently, when the challenge is created using the seed script, the `queue` field is not populated.\r\n\r\n## Expected Behaviour:\r\nWhen the seed script is run, the `queue` field must be populated with the random string of 75 characters in length.\n", "before_files": [{"content": "# Command to run : python manage.py shell < scripts/seed.py\nimport os\n\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.utils import timezone\n\nfrom allauth.account.models import EmailAddress\nfrom faker import Factory\n\nfrom challenges.models import Challenge, ChallengePhase, DatasetSplit, Leaderboard, ChallengePhaseSplit\nfrom hosts.models import ChallengeHostTeam, ChallengeHost\nfrom participants.models import Participant, ParticipantTeam\n\nfake = Factory.create()\n\nNUMBER_OF_CHALLENGES = 1\nNUMBER_OF_PHASES = 2\nNUMBER_OF_DATASET_SPLITS = 2\nDATASET_SPLIT_ITERATOR = 0\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef check_database():\n if len(EmailAddress.objects.all()) > 0:\n print(\"Are you sure you want to wipe the existing development database and reseed it? (Y/N)\")\n if settings.TEST or input().lower() == \"y\":\n destroy_database()\n return True\n else:\n return False\n else:\n return True\n\n\ndef destroy_database():\n print(\"Destroying existing database...\")\n print(\"Destroying Participant objects...\")\n Participant.objects.all().delete()\n print(\"Destroying ParticipantTeam objects...\")\n ParticipantTeam.objects.all().delete()\n print(\"Destroying ChallengePhaseSplit objects...\")\n ChallengePhaseSplit.objects.all().delete()\n print(\"Destroying DatasetSplit objects...\")\n DatasetSplit.objects.all().delete()\n print(\"Destroying ChallengePhase objects...\")\n ChallengePhase.objects.all().delete()\n print(\"Destroying Leaderboard objects...\")\n Leaderboard.objects.all().delete()\n print(\"Destroying Challenge objects...\")\n Challenge.objects.all().delete()\n print(\"Destroying ChallengeHostTeam objects...\")\n ChallengeHostTeam.objects.all().delete()\n print(\"Destroying ChallengeHost objects...\")\n ChallengeHost.objects.all().delete()\n print(\"Destroying User objects...\")\n User.objects.all().delete()\n print(\"Destroying EmailAddress objects...\")\n EmailAddress.objects.all().delete()\n return True\n\n\ndef create_user(is_admin, username=\"\"):\n \"\"\"\n Creates superuser, participant user, host user and returns it.\n \"\"\"\n if is_admin:\n username = \"admin\"\n email = \"[email protected]\"\n else:\n email = \"%[email protected]\" % (username)\n user = User.objects.create_user(\n email=email,\n username=username,\n password=\"password\",\n is_staff=is_admin,\n is_superuser=is_admin,\n )\n EmailAddress.objects.create(user=user, email=email, verified=True, primary=True)\n print(\"{} was created with username: {} password: password\".format(\"Super user\" if is_admin else \"User\", username))\n return user\n\n\ndef create_challenge_host_team(user):\n \"\"\"\n Creates challenge host team and returns it.\n \"\"\"\n team_name = \"{} Host Team\".format(fake.city())\n team = ChallengeHostTeam.objects.create(\n team_name=team_name,\n created_by=user,\n )\n print(\"Challenge Host Team created with team_name: {} created_by: {}\".format(team_name, user.username))\n ChallengeHost.objects.create(user=user, team_name=team, status=ChallengeHost.SELF, permissions=ChallengeHost.ADMIN)\n print(\"Challenge Host created with user: {} team_name: {}\".format(user.username, team_name))\n return team\n\n\ndef create_challenges(number_of_challenges, host_team=None):\n \"\"\"\n Creates past challenge, on-going challenge and upcoming challenge.\n \"\"\"\n for i in xrange(number_of_challenges):\n if (i % 3 == 0):\n create_challenge(\"{} Challenge\".format(fake.first_name()),\n timezone.now() - timedelta(days=100),\n timezone.now() + timedelta(days=500),\n host_team\n )\n elif (i % 3 == 1):\n create_challenge(\"{} Challenge\".format(fake.first_name()),\n timezone.now() - timedelta(days=500),\n timezone.now() - timedelta(days=100),\n host_team\n )\n elif (i % 3 == 2):\n create_challenge(\"{} Challenge\".format(fake.first_name()),\n timezone.now() + timedelta(days=100),\n timezone.now() + timedelta(days=500),\n host_team\n )\n\n\ndef create_challenge(title, start_date, end_date, host_team):\n \"\"\"\n Creates a challenge.\n \"\"\"\n evaluation_script = open(\n os.path.join(settings.BASE_DIR, 'examples', 'example1', 'sample_evaluation_script.zip'), 'rb')\n Challenge.objects.create(\n title=title,\n short_description=fake.paragraph(),\n description=fake.paragraph(),\n terms_and_conditions=fake.paragraph(),\n submission_guidelines=fake.paragraph(),\n evaluation_details=fake.paragraph(),\n evaluation_script=SimpleUploadedFile(evaluation_script.name, evaluation_script.read()),\n approved_by_admin=True,\n creator=host_team,\n published=True,\n enable_forum=True,\n anonymous_leaderboard=False,\n start_date=start_date,\n end_date=end_date,\n )\n print(\"Challenge created with title: {} creator: {} start_date: {} end_date: {}\".format(title,\n host_team.team_name,\n start_date, end_date))\n\n\ndef create_challenge_phases(challenge, number_of_phases=1):\n \"\"\"\n Creates challenge phases for the created challenges and returns it.\n \"\"\"\n challenge_phases = []\n for i in range(number_of_phases):\n name = \"{} Phase\".format(fake.first_name())\n with open(os.path.join(settings.BASE_DIR, 'examples', 'example1', 'test_annotation.txt'), 'rb') as data_file:\n data = data_file.read()\n data = data or None\n challenge_phase = ChallengePhase.objects.create(\n name=name,\n description=fake.paragraph(),\n leaderboard_public=True,\n is_public=True,\n start_date=challenge.start_date,\n end_date=challenge.end_date,\n challenge=challenge,\n test_annotation=SimpleUploadedFile(fake.file_name(extension=\"txt\"), data, content_type=\"text/plain\"),\n codename=\"{}{}\".format(\"phase\", i + 1),\n )\n challenge_phases.append(challenge_phase)\n print(\"Challenge Phase created with name: {} challenge: {}\".format(name, challenge.title))\n return challenge_phases\n\n\ndef create_leaderboard():\n \"\"\"\n Creates Leaderboard schema and returns it.\n \"\"\"\n schema = {\n 'labels': ['score', ],\n 'default_order_by': 'score',\n }\n leaderboard = Leaderboard.objects.create(\n schema=schema\n )\n print(\"Leaderboard created\")\n return leaderboard\n\n\ndef create_dataset_splits(number_of_splits):\n \"\"\"\n Creates dataset splits and returns it.\n \"\"\"\n dataset_splits = []\n for i in range(number_of_splits):\n global DATASET_SPLIT_ITERATOR\n name = \"Split {}\".format(DATASET_SPLIT_ITERATOR + 1)\n codename = \"{}{}\".format('split', DATASET_SPLIT_ITERATOR + 1)\n dataset_split = DatasetSplit.objects.create(\n name=name,\n codename=codename,\n )\n dataset_splits.append(dataset_split)\n DATASET_SPLIT_ITERATOR += 1\n print(\"Dataset Split created with name: {} codename: {}\".format(name, codename))\n return dataset_splits\n\n\ndef create_challenge_phase_splits(challenge_phase, leaderboard, dataset_split):\n \"\"\"\n Creates a challenge phase split.\n \"\"\"\n ChallengePhaseSplit.objects.create(\n challenge_phase=challenge_phase,\n leaderboard=leaderboard,\n dataset_split=dataset_split,\n visibility=ChallengePhaseSplit.PUBLIC\n )\n print(\"Challenge Phase Split created with challenge_phase: {} dataset_split: {}\".format(challenge_phase.name,\n dataset_split.name))\n\n\ndef create_participant_team(user):\n \"\"\"\n Creates participant team and returns it.\n \"\"\"\n team_name = \"{} Participant Team\".format(fake.city())\n team = ParticipantTeam.objects.create(\n team_name=team_name,\n created_by=user,\n )\n print(\"Participant Team created with team_name: {} created_by: {}\".format(team_name, user.username))\n Participant.objects.create(user=user, team=team, status=\"Self\")\n print(\"Participant created with user: {} team_name: {}\".format(user.username, team_name))\n return team\n\n\ndef run(*args):\n NUMBER_OF_CHALLENGES = int(args[0])\n status = check_database()\n if status is False:\n print(\"Seeding aborted.\")\n return 0\n print(\"Seeding...\")\n # Create superuser\n create_user(is_admin=True)\n # Create host user\n host_user = create_user(is_admin=False, username=\"host\")\n # Create challenge host team with challenge host\n challenge_host_team = create_challenge_host_team(user=host_user)\n # Create challenge\n create_challenges(number_of_challenges=NUMBER_OF_CHALLENGES, host_team=challenge_host_team)\n\n # Fetch all the created challenges\n challenges = Challenge.objects.all()\n for challenge in challenges:\n # Create a leaderboard object for each challenge\n leaderboard = create_leaderboard()\n # Create Phases for a challenge\n challenge_phases = create_challenge_phases(challenge, number_of_phases=NUMBER_OF_PHASES)\n # Create Dataset Split for each Challenge\n dataset_splits = create_dataset_splits(number_of_splits=NUMBER_OF_DATASET_SPLITS)\n # Create Challenge Phase Split for each Phase and Dataset Split\n for challenge_phase in challenge_phases:\n for dataset_split in dataset_splits:\n create_challenge_phase_splits(challenge_phase, leaderboard, dataset_split)\n participant_user = create_user(is_admin=False, username=\"participant\")\n create_participant_team(user=participant_user)\n print('Database successfully seeded.')\n", "path": "scripts/seed.py"}]} | 3,419 | 223 |
gh_patches_debug_33985 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in RichConsoleSpanExporter
**Describe your environment**
Python 3.9.7, Snippet from `pipenv graph`:
```
opentelemetry-exporter-richconsole==0.25b2
- opentelemetry-api [required: ~=1.3, installed: 1.10a0]
- opentelemetry-sdk [required: ~=1.3, installed: 1.10a0]
- opentelemetry-api [required: ==1.10a0, installed: 1.10a0]
- opentelemetry-semantic-conventions [required: ==0.25b2, installed: 0.25b2]
- rich [required: >=10.0.0, installed: 10.12.0]
- colorama [required: >=0.4.0,<0.5.0, installed: 0.4.4]
- commonmark [required: >=0.9.0,<0.10.0, installed: 0.9.1]
- pygments [required: >=2.6.0,<3.0.0, installed: 2.10.0]
```
**Steps to reproduce**
Given this code:
```
from opentelemetry import trace
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.exporter.richconsole import RichConsoleSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.sdk.trace.export import ConsoleSpanExporter
APP_SERVICE_NAME = "fastapi-goofing"
trace.set_tracer_provider(
TracerProvider(
resource=Resource.create({SERVICE_NAME: APP_SERVICE_NAME})
)
)
rich_console_exporter = RichConsoleSpanExporter(
service_name=APP_SERVICE_NAME,
)
console_exporter = ConsoleSpanExporter(
service_name=APP_SERVICE_NAME
)
trace.get_tracer_provider().add_span_processor(
BatchSpanProcessor(rich_console_exporter)
#BatchSpanProcessor(console_exporter)
)
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("foo"):
with tracer.start_as_current_span("bar"):
with tracer.start_as_current_span("baz"):
print("Hello world from OpenTelemetry Python!")
```
The `RichConsoleSpanExporter` throws this error:
```
Exception while exporting Span batch.
Traceback (most recent call last):
File "/home/trond/Documents/projects/fastapi-goofring/.venv/lib/python3.9/site-packages/opentelemetry/sdk/trace/export/__init__.py", line 331, in _export_batch
self.span_exporter.export(self.spans_list[:idx]) # type: ignore
File "/home/trond/Documents/projects/fastapi-goofring/.venv/lib/python3.9/site-packages/opentelemetry/exporter/richconsole/__init__.py", line 166, in export
child = parents[span.parent.span_id].add(
AttributeError: 'NoneType' object has no attribute 'span_id'
```
If I replace the Rich exporter with the regular Console exporter, everything runs nicely without problems
**What is the expected behavior?**
Code runs without exceptions
**What is the actual behavior?**
Exception is thrown
**Additional context**
</issue>
<code>
[start of exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The **OpenTelemetry Rich Console Exporter** provides a span exporter from a batch span processor
17 to print `OpenTelemetry`_ traces using `Rich`_.
18
19 Installation
20 ------------
21
22 ::
23
24 pip install opentelemetry-exporter-richconsole
25
26
27 Usage
28 -----
29
30 The Rich Console Exporter is a console exporter that prints a tree view onto stdout of the traces
31 with the related spans and properties as children of that tree. For the tree view, the Rich
32 Console Exporter should be used with a BatchSpanProcessor. If used within a SimpleSpanProcessor,
33 all spans will be printed in a list.
34
35 .. code:: python
36
37 from opentelemetry import trace
38 from opentelemetry.sdk.trace.export import BatchSpanProcessor
39 from opentelemetry.exporter.richconsole import RichConsoleExporter
40 from opentelemetry.sdk.trace import TracerProvider
41
42 trace.set_tracer_provider(TracerProvider())
43 tracer = trace.get_tracer(__name__)
44
45 tracer.add_span_processor(BatchSpanProcessor(RichConsoleExporter()))
46
47
48 API
49 ---
50 .. _Rich: https://rich.readthedocs.io/
51 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
52 """
53 # pylint: disable=import-error
54
55 import datetime
56 import typing
57 from typing import Optional
58
59 from rich.console import Console
60 from rich.syntax import Syntax
61 from rich.text import Text
62 from rich.tree import Tree
63
64 import opentelemetry.trace
65 from opentelemetry.sdk.trace import ReadableSpan
66 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
67 from opentelemetry.semconv.trace import SpanAttributes
68
69
70 def _ns_to_time(nanoseconds):
71 ts = datetime.datetime.utcfromtimestamp(nanoseconds / 1e9)
72 return ts.strftime("%H:%M:%S.%f")
73
74
75 def _child_to_tree(child: Tree, span: ReadableSpan):
76 child.add(
77 Text.from_markup(f"[bold cyan]Kind :[/bold cyan] {span.kind.name}")
78 )
79 if not span.status.is_unset:
80 if not span.status.is_ok:
81 child.add(
82 Text.from_markup(
83 f"[bold cyan]Status :[/bold cyan] [red]{span.status.status_code}[/red]"
84 )
85 )
86 else:
87 child.add(
88 Text.from_markup(
89 f"[bold cyan]Status :[/bold cyan] {span.status.status_code}"
90 )
91 )
92 if span.status.description:
93 child.add(
94 Text.from_markup(
95 f"[bold cyan]Description :[/bold cyan] {span.status.description}"
96 )
97 )
98
99 if span.events:
100 events = child.add(
101 label=Text.from_markup("[bold cyan]Events :[/bold cyan] ")
102 )
103 for event in span.events:
104 event_node = events.add(Text(event.name))
105 for key, val in event.attributes.items():
106 event_node.add(
107 Text.from_markup(f"[bold cyan]{key} :[/bold cyan] {val}")
108 )
109 if span.attributes:
110 attributes = child.add(
111 label=Text.from_markup("[bold cyan]Attributes :[/bold cyan] ")
112 )
113 for attribute in span.attributes:
114 if attribute == SpanAttributes.DB_STATEMENT:
115 attributes.add(
116 Text.from_markup(f"[bold cyan]{attribute} :[/bold cyan] ")
117 )
118 attributes.add(Syntax(span.attributes[attribute], "sql"))
119 else:
120 attributes.add(
121 Text.from_markup(
122 f"[bold cyan]{attribute} :[/bold cyan] {span.attributes[attribute]}"
123 )
124 )
125
126
127 class RichConsoleSpanExporter(SpanExporter):
128 """Implementation of :class:`SpanExporter` that prints spans to the
129 console.
130
131 Should be used within a BatchSpanProcessor
132 """
133
134 def __init__(
135 self,
136 service_name: Optional[str] = None,
137 ):
138 self.service_name = service_name
139 self.console = Console()
140
141 def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
142 if not spans:
143 return SpanExportResult.SUCCESS
144 tree = Tree(
145 label=f"Trace {opentelemetry.trace.format_trace_id(spans[0].context.trace_id)}"
146 )
147 parents = {}
148 for span in spans:
149 child = tree.add(
150 label=Text.from_markup(
151 f"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}"
152 )
153 )
154 parents[span.context.span_id] = child
155 _child_to_tree(child, span)
156
157 for span in spans:
158 if span.parent and span.parent.span_id not in parents:
159 child = tree.add(
160 label=Text.from_markup(
161 f"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}"
162 )
163 )
164 else:
165 child = parents[span.parent.span_id].add(
166 label=Text.from_markup(
167 f"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}"
168 )
169 )
170 parents[span.context.span_id] = child
171 _child_to_tree(child, span)
172
173 self.console.print(tree)
174 return SpanExportResult.SUCCESS
175
[end of exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py b/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py
--- a/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py
+++ b/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py
@@ -36,13 +36,13 @@
from opentelemetry import trace
from opentelemetry.sdk.trace.export import BatchSpanProcessor
- from opentelemetry.exporter.richconsole import RichConsoleExporter
+ from opentelemetry.exporter.richconsole import RichConsoleSpanExporter
from opentelemetry.sdk.trace import TracerProvider
trace.set_tracer_provider(TracerProvider())
tracer = trace.get_tracer(__name__)
- tracer.add_span_processor(BatchSpanProcessor(RichConsoleExporter()))
+ tracer.add_span_processor(BatchSpanProcessor(RichConsoleSpanExporter()))
API
@@ -155,18 +155,19 @@
_child_to_tree(child, span)
for span in spans:
- if span.parent and span.parent.span_id not in parents:
- child = tree.add(
+ if span.parent and span.parent.span_id in parents:
+ child = parents[span.parent.span_id].add(
label=Text.from_markup(
f"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}"
)
)
else:
- child = parents[span.parent.span_id].add(
+ child = tree.add(
label=Text.from_markup(
f"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}"
)
)
+
parents[span.context.span_id] = child
_child_to_tree(child, span)
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py b/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py\n--- a/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py\n+++ b/exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py\n@@ -36,13 +36,13 @@\n \n from opentelemetry import trace\n from opentelemetry.sdk.trace.export import BatchSpanProcessor\n- from opentelemetry.exporter.richconsole import RichConsoleExporter\n+ from opentelemetry.exporter.richconsole import RichConsoleSpanExporter\n from opentelemetry.sdk.trace import TracerProvider\n \n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer(__name__)\n \n- tracer.add_span_processor(BatchSpanProcessor(RichConsoleExporter()))\n+ tracer.add_span_processor(BatchSpanProcessor(RichConsoleSpanExporter()))\n \n \n API\n@@ -155,18 +155,19 @@\n _child_to_tree(child, span)\n \n for span in spans:\n- if span.parent and span.parent.span_id not in parents:\n- child = tree.add(\n+ if span.parent and span.parent.span_id in parents:\n+ child = parents[span.parent.span_id].add(\n label=Text.from_markup(\n f\"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}\"\n )\n )\n else:\n- child = parents[span.parent.span_id].add(\n+ child = tree.add(\n label=Text.from_markup(\n f\"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}\"\n )\n )\n+\n parents[span.context.span_id] = child\n _child_to_tree(child, span)\n", "issue": "Bug in RichConsoleSpanExporter\n**Describe your environment**\r\nPython 3.9.7, Snippet from `pipenv graph`:\r\n```\r\nopentelemetry-exporter-richconsole==0.25b2\r\n - opentelemetry-api [required: ~=1.3, installed: 1.10a0]\r\n - opentelemetry-sdk [required: ~=1.3, installed: 1.10a0]\r\n - opentelemetry-api [required: ==1.10a0, installed: 1.10a0]\r\n - opentelemetry-semantic-conventions [required: ==0.25b2, installed: 0.25b2]\r\n - rich [required: >=10.0.0, installed: 10.12.0]\r\n - colorama [required: >=0.4.0,<0.5.0, installed: 0.4.4]\r\n - commonmark [required: >=0.9.0,<0.10.0, installed: 0.9.1]\r\n - pygments [required: >=2.6.0,<3.0.0, installed: 2.10.0]\r\n```\r\n\r\n**Steps to reproduce**\r\nGiven this code:\r\n```\r\nfrom opentelemetry import trace\r\nfrom opentelemetry.sdk.resources import SERVICE_NAME, Resource\r\nfrom opentelemetry.exporter.richconsole import RichConsoleSpanExporter\r\nfrom opentelemetry.sdk.trace import TracerProvider\r\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\r\nfrom opentelemetry.sdk.trace.export import ConsoleSpanExporter\r\n\r\nAPP_SERVICE_NAME = \"fastapi-goofing\"\r\n\r\ntrace.set_tracer_provider(\r\n TracerProvider(\r\n resource=Resource.create({SERVICE_NAME: APP_SERVICE_NAME})\r\n )\r\n)\r\n\r\nrich_console_exporter = RichConsoleSpanExporter(\r\n service_name=APP_SERVICE_NAME,\r\n)\r\n\r\n\r\nconsole_exporter = ConsoleSpanExporter(\r\n service_name=APP_SERVICE_NAME\r\n)\r\n\r\ntrace.get_tracer_provider().add_span_processor(\r\n BatchSpanProcessor(rich_console_exporter)\r\n #BatchSpanProcessor(console_exporter)\r\n)\r\ntracer = trace.get_tracer(__name__)\r\n\r\nwith tracer.start_as_current_span(\"foo\"):\r\n with tracer.start_as_current_span(\"bar\"):\r\n with tracer.start_as_current_span(\"baz\"):\r\n print(\"Hello world from OpenTelemetry Python!\")\r\n\r\n\r\n```\r\nThe `RichConsoleSpanExporter` throws this error:\r\n```\r\nException while exporting Span batch.\r\nTraceback (most recent call last):\r\n File \"/home/trond/Documents/projects/fastapi-goofring/.venv/lib/python3.9/site-packages/opentelemetry/sdk/trace/export/__init__.py\", line 331, in _export_batch\r\n self.span_exporter.export(self.spans_list[:idx]) # type: ignore\r\n File \"/home/trond/Documents/projects/fastapi-goofring/.venv/lib/python3.9/site-packages/opentelemetry/exporter/richconsole/__init__.py\", line 166, in export\r\n child = parents[span.parent.span_id].add(\r\nAttributeError: 'NoneType' object has no attribute 'span_id'\r\n```\r\nIf I replace the Rich exporter with the regular Console exporter, everything runs nicely without problems\r\n\r\n**What is the expected behavior?**\r\nCode runs without exceptions\r\n\r\n**What is the actual behavior?**\r\nException is thrown\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe **OpenTelemetry Rich Console Exporter** provides a span exporter from a batch span processor\nto print `OpenTelemetry`_ traces using `Rich`_.\n\nInstallation\n------------\n\n::\n\n pip install opentelemetry-exporter-richconsole\n\n\nUsage\n-----\n\nThe Rich Console Exporter is a console exporter that prints a tree view onto stdout of the traces\nwith the related spans and properties as children of that tree. For the tree view, the Rich\nConsole Exporter should be used with a BatchSpanProcessor. If used within a SimpleSpanProcessor,\nall spans will be printed in a list.\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.sdk.trace.export import BatchSpanProcessor\n from opentelemetry.exporter.richconsole import RichConsoleExporter\n from opentelemetry.sdk.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer(__name__)\n\n tracer.add_span_processor(BatchSpanProcessor(RichConsoleExporter()))\n\n\nAPI\n---\n.. _Rich: https://rich.readthedocs.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\"\"\"\n# pylint: disable=import-error\n\nimport datetime\nimport typing\nfrom typing import Optional\n\nfrom rich.console import Console\nfrom rich.syntax import Syntax\nfrom rich.text import Text\nfrom rich.tree import Tree\n\nimport opentelemetry.trace\nfrom opentelemetry.sdk.trace import ReadableSpan\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.semconv.trace import SpanAttributes\n\n\ndef _ns_to_time(nanoseconds):\n ts = datetime.datetime.utcfromtimestamp(nanoseconds / 1e9)\n return ts.strftime(\"%H:%M:%S.%f\")\n\n\ndef _child_to_tree(child: Tree, span: ReadableSpan):\n child.add(\n Text.from_markup(f\"[bold cyan]Kind :[/bold cyan] {span.kind.name}\")\n )\n if not span.status.is_unset:\n if not span.status.is_ok:\n child.add(\n Text.from_markup(\n f\"[bold cyan]Status :[/bold cyan] [red]{span.status.status_code}[/red]\"\n )\n )\n else:\n child.add(\n Text.from_markup(\n f\"[bold cyan]Status :[/bold cyan] {span.status.status_code}\"\n )\n )\n if span.status.description:\n child.add(\n Text.from_markup(\n f\"[bold cyan]Description :[/bold cyan] {span.status.description}\"\n )\n )\n\n if span.events:\n events = child.add(\n label=Text.from_markup(\"[bold cyan]Events :[/bold cyan] \")\n )\n for event in span.events:\n event_node = events.add(Text(event.name))\n for key, val in event.attributes.items():\n event_node.add(\n Text.from_markup(f\"[bold cyan]{key} :[/bold cyan] {val}\")\n )\n if span.attributes:\n attributes = child.add(\n label=Text.from_markup(\"[bold cyan]Attributes :[/bold cyan] \")\n )\n for attribute in span.attributes:\n if attribute == SpanAttributes.DB_STATEMENT:\n attributes.add(\n Text.from_markup(f\"[bold cyan]{attribute} :[/bold cyan] \")\n )\n attributes.add(Syntax(span.attributes[attribute], \"sql\"))\n else:\n attributes.add(\n Text.from_markup(\n f\"[bold cyan]{attribute} :[/bold cyan] {span.attributes[attribute]}\"\n )\n )\n\n\nclass RichConsoleSpanExporter(SpanExporter):\n \"\"\"Implementation of :class:`SpanExporter` that prints spans to the\n console.\n\n Should be used within a BatchSpanProcessor\n \"\"\"\n\n def __init__(\n self,\n service_name: Optional[str] = None,\n ):\n self.service_name = service_name\n self.console = Console()\n\n def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:\n if not spans:\n return SpanExportResult.SUCCESS\n tree = Tree(\n label=f\"Trace {opentelemetry.trace.format_trace_id(spans[0].context.trace_id)}\"\n )\n parents = {}\n for span in spans:\n child = tree.add(\n label=Text.from_markup(\n f\"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}\"\n )\n )\n parents[span.context.span_id] = child\n _child_to_tree(child, span)\n\n for span in spans:\n if span.parent and span.parent.span_id not in parents:\n child = tree.add(\n label=Text.from_markup(\n f\"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}\"\n )\n )\n else:\n child = parents[span.parent.span_id].add(\n label=Text.from_markup(\n f\"[blue][{_ns_to_time(span.start_time)}][/blue] [bold]{span.name}[/bold], span {opentelemetry.trace.format_span_id(span.context.span_id)}\"\n )\n )\n parents[span.context.span_id] = child\n _child_to_tree(child, span)\n\n self.console.print(tree)\n return SpanExportResult.SUCCESS\n", "path": "exporter/opentelemetry-exporter-richconsole/src/opentelemetry/exporter/richconsole/__init__.py"}]} | 2,985 | 460 |
gh_patches_debug_9447 | rasdani/github-patches | git_diff | databricks__koalas-1903 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support for Array type hints in APIs that take Python native functions (e.g., DataFrame.apply)
```python
import databricks.koalas as ks
def tokenizeDF(col1) -> ks.Series[np.array(... ???)]:
pass
```
</issue>
<code>
[start of databricks/koalas/typedef/typehints.py]
1 #
2 # Copyright (C) 2019 Databricks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 """
18 Utilities to deal with types. This is mostly focused on python3.
19 """
20 import typing
21 import datetime
22 import decimal
23 from inspect import getfullargspec, isclass
24
25 import numpy as np
26 import pandas as pd
27 from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
28 import pyarrow as pa
29 import pyspark.sql.types as types
30
31 try:
32 from pyspark.sql.types import to_arrow_type, from_arrow_type
33 except ImportError:
34 from pyspark.sql.pandas.types import to_arrow_type, from_arrow_type
35
36 from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
37 from databricks.koalas.typedef.string_typehints import resolve_string_type_hint
38
39 T = typing.TypeVar("T")
40
41 Scalar = typing.Union[
42 int, float, bool, str, bytes, decimal.Decimal, datetime.date, datetime.datetime, None
43 ]
44
45
46 # A column of data, with the data type.
47 class SeriesType(typing.Generic[T]):
48 def __init__(self, tpe):
49 self.tpe = tpe # type: types.DataType
50
51 def __repr__(self):
52 return "SeriesType[{}]".format(self.tpe)
53
54
55 class DataFrameType(object):
56 def __init__(self, tpe, names=None):
57 if names is None:
58 # Default names `c0, c1, ... cn`.
59 self.tpe = types.StructType(
60 [types.StructField("c%s" % i, tpe[i]) for i in range(len(tpe))]
61 ) # type: types.StructType
62 else:
63 self.tpe = types.StructType(
64 [types.StructField(n, t) for n, t in zip(names, tpe)]
65 ) # type: types.StructType
66
67 def __repr__(self):
68 return "DataFrameType[{}]".format(self.tpe)
69
70
71 # The type is a scalar type that is furthermore understood by Spark.
72 class ScalarType(object):
73 def __init__(self, tpe):
74 self.tpe = tpe # type: types.DataType
75
76 def __repr__(self):
77 return "ScalarType[{}]".format(self.tpe)
78
79
80 # The type is left unspecified or we do not know about this type.
81 class UnknownType(object):
82 def __init__(self, tpe):
83 self.tpe = tpe
84
85 def __repr__(self):
86 return "UnknownType[{}]".format(self.tpe)
87
88
89 class NameTypeHolder(object):
90 name = None
91 tpe = None
92
93
94 def as_spark_type(tpe) -> types.DataType:
95 """
96 Given a Python type, returns the equivalent spark type.
97 Accepts:
98 - the built-in types in Python
99 - the built-in types in numpy
100 - list of pairs of (field_name, type)
101 - dictionaries of field_name -> type
102 - Python3's typing system
103 """
104 # TODO: Add "boolean" and "string" types.
105 # ArrayType
106 if tpe in (np.ndarray,):
107 # TODO: support other child types
108 return types.ArrayType(types.StringType())
109 # BinaryType
110 elif tpe in (bytes, np.character, np.bytes_, np.string_):
111 return types.BinaryType()
112 # BooleanType
113 elif tpe in (bool, np.bool, "bool", "?"):
114 return types.BooleanType()
115 # DateType
116 elif tpe in (datetime.date,):
117 return types.DateType()
118 # NumericType
119 elif tpe in (np.int8, np.byte, "int8", "byte", "b"):
120 return types.ByteType()
121 elif tpe in (decimal.Decimal,):
122 # TODO: considering about the precision & scale for decimal type.
123 return types.DecimalType(38, 18)
124 elif tpe in (float, np.float, np.float64, "float", "float64", "double"):
125 return types.DoubleType()
126 elif tpe in (np.float32, "float32", "f"):
127 return types.FloatType()
128 elif tpe in (np.int32, "int32", "i"):
129 return types.IntegerType()
130 elif tpe in (int, np.int, np.int64, "int", "int64", "long", "bigint"):
131 return types.LongType()
132 elif tpe in (np.int16, "int16", "short"):
133 return types.ShortType()
134 # StringType
135 elif tpe in (str, np.unicode_, "str", "U"):
136 return types.StringType()
137 # TimestampType
138 elif tpe in (datetime.datetime, np.datetime64, "datetime64[ns]", "M"):
139 return types.TimestampType()
140 else:
141 raise TypeError("Type %s was not understood." % tpe)
142
143
144 def spark_type_to_pandas_dtype(spark_type):
145 """ Return the given Spark DataType to pandas dtype. """
146 if isinstance(spark_type, (types.DateType, types.StructType, types.UserDefinedType)):
147 return np.dtype("object")
148 elif isinstance(spark_type, types.TimestampType):
149 return np.dtype("datetime64[ns]")
150 else:
151 return np.dtype(to_arrow_type(spark_type).to_pandas_dtype())
152
153
154 def infer_pd_series_spark_type(s: pd.Series) -> types.DataType:
155 """Infer Spark DataType from pandas Series dtype.
156
157 :param s: :class:`pandas.Series` to be inferred
158 :return: the inferred Spark data type
159 """
160 dt = s.dtype
161 if dt == np.dtype("object"):
162 if len(s) == 0 or s.isnull().all():
163 raise ValueError("can not infer schema from empty or null dataset")
164 elif hasattr(s[0], "__UDT__"):
165 return s[0].__UDT__
166 else:
167 return from_arrow_type(pa.Array.from_pandas(s).type)
168 elif is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
169 return types.TimestampType()
170 else:
171 return from_arrow_type(pa.from_numpy_dtype(dt))
172
173
174 def infer_return_type(f) -> typing.Union[SeriesType, DataFrameType, ScalarType, UnknownType]:
175 """
176 >>> def func() -> int:
177 ... pass
178 >>> infer_return_type(func).tpe
179 LongType
180
181 >>> def func() -> ks.Series[int]:
182 ... pass
183 >>> infer_return_type(func).tpe
184 LongType
185
186 >>> def func() -> ks.DataFrame[np.float, str]:
187 ... pass
188 >>> infer_return_type(func).tpe
189 StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
190
191 >>> def func() -> ks.DataFrame[np.float]:
192 ... pass
193 >>> infer_return_type(func).tpe
194 StructType(List(StructField(c0,DoubleType,true)))
195
196 >>> def func() -> 'int':
197 ... pass
198 >>> infer_return_type(func).tpe
199 LongType
200
201 >>> def func() -> 'ks.Series[int]':
202 ... pass
203 >>> infer_return_type(func).tpe
204 LongType
205
206 >>> def func() -> 'ks.DataFrame[np.float, str]':
207 ... pass
208 >>> infer_return_type(func).tpe
209 StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))
210
211 >>> def func() -> 'ks.DataFrame[np.float]':
212 ... pass
213 >>> infer_return_type(func).tpe
214 StructType(List(StructField(c0,DoubleType,true)))
215
216 >>> def func() -> ks.DataFrame['a': np.float, 'b': int]:
217 ... pass
218 >>> infer_return_type(func).tpe
219 StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
220
221 >>> def func() -> "ks.DataFrame['a': np.float, 'b': int]":
222 ... pass
223 >>> infer_return_type(func).tpe
224 StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))
225
226 >>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
227 >>> def func() -> ks.DataFrame[pdf.dtypes]:
228 ... pass
229 >>> infer_return_type(func).tpe
230 StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))
231
232 >>> pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
233 >>> def func() -> ks.DataFrame[zip(pdf.columns, pdf.dtypes)]:
234 ... pass
235 >>> infer_return_type(func).tpe
236 StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))
237 """
238 # We should re-import to make sure the class 'SeriesType' is not treated as a class
239 # within this module locally. See Series.__class_getitem__ which imports this class
240 # canonically.
241 from databricks.koalas.typedef import SeriesType, NameTypeHolder
242
243 spec = getfullargspec(f)
244 tpe = spec.annotations.get("return", None)
245 if isinstance(tpe, str):
246 # This type hint can happen when given hints are string to avoid forward reference.
247 tpe = resolve_string_type_hint(tpe)
248 if hasattr(tpe, "__origin__") and (
249 issubclass(tpe.__origin__, SeriesType) or tpe.__origin__ == ks.Series
250 ):
251 # TODO: remove "tpe.__origin__ == ks.Series" when we drop Python 3.5 and 3.6.
252 inner = as_spark_type(tpe.__args__[0])
253 return SeriesType(inner)
254
255 if hasattr(tpe, "__origin__") and tpe.__origin__ == ks.DataFrame:
256 # When Python version is lower then 3.7. Unwrap it to a Tuple type
257 # hints.
258 tpe = tpe.__args__[0]
259
260 # Note that, DataFrame type hints will create a Tuple.
261 # Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
262 # Check if the name is Tuple.
263 name = getattr(tpe, "_name", getattr(tpe, "__name__", None))
264 if name == "Tuple":
265 tuple_type = tpe
266 if hasattr(tuple_type, "__tuple_params__"):
267 # Python 3.5.0 to 3.5.2 has '__tuple_params__' instead.
268 # See https://github.com/python/cpython/blob/v3.5.2/Lib/typing.py
269 parameters = getattr(tuple_type, "__tuple_params__")
270 else:
271 parameters = getattr(tuple_type, "__args__")
272 if len(parameters) > 0 and all(
273 isclass(p) and issubclass(p, NameTypeHolder) for p in parameters
274 ):
275 names = [p.name for p in parameters if issubclass(p, NameTypeHolder)]
276 types = [p.tpe for p in parameters if issubclass(p, NameTypeHolder)]
277 return DataFrameType([as_spark_type(t) for t in types], names)
278 return DataFrameType([as_spark_type(t) for t in parameters])
279 inner = as_spark_type(tpe)
280 if inner is None:
281 return UnknownType(tpe)
282 else:
283 return ScalarType(inner)
284
[end of databricks/koalas/typedef/typehints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/databricks/koalas/typedef/typehints.py b/databricks/koalas/typedef/typehints.py
--- a/databricks/koalas/typedef/typehints.py
+++ b/databricks/koalas/typedef/typehints.py
@@ -104,8 +104,9 @@
# TODO: Add "boolean" and "string" types.
# ArrayType
if tpe in (np.ndarray,):
- # TODO: support other child types
return types.ArrayType(types.StringType())
+ elif hasattr(tpe, "__origin__") and issubclass(tpe.__origin__, list):
+ return types.ArrayType(as_spark_type(tpe.__args__[0]))
# BinaryType
elif tpe in (bytes, np.character, np.bytes_, np.string_):
return types.BinaryType()
| {"golden_diff": "diff --git a/databricks/koalas/typedef/typehints.py b/databricks/koalas/typedef/typehints.py\n--- a/databricks/koalas/typedef/typehints.py\n+++ b/databricks/koalas/typedef/typehints.py\n@@ -104,8 +104,9 @@\n # TODO: Add \"boolean\" and \"string\" types.\n # ArrayType\n if tpe in (np.ndarray,):\n- # TODO: support other child types\n return types.ArrayType(types.StringType())\n+ elif hasattr(tpe, \"__origin__\") and issubclass(tpe.__origin__, list):\n+ return types.ArrayType(as_spark_type(tpe.__args__[0]))\n # BinaryType\n elif tpe in (bytes, np.character, np.bytes_, np.string_):\n return types.BinaryType()\n", "issue": "Support for Array type hints in APIs that take Python native functions (e.g., DataFrame.apply)\n```python\r\nimport databricks.koalas as ks\r\n\r\ndef tokenizeDF(col1) -> ks.Series[np.array(... ???)]:\r\n pass\r\n```\n", "before_files": [{"content": "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUtilities to deal with types. This is mostly focused on python3.\n\"\"\"\nimport typing\nimport datetime\nimport decimal\nfrom inspect import getfullargspec, isclass\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype\nimport pyarrow as pa\nimport pyspark.sql.types as types\n\ntry:\n from pyspark.sql.types import to_arrow_type, from_arrow_type\nexcept ImportError:\n from pyspark.sql.pandas.types import to_arrow_type, from_arrow_type\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas.typedef.string_typehints import resolve_string_type_hint\n\nT = typing.TypeVar(\"T\")\n\nScalar = typing.Union[\n int, float, bool, str, bytes, decimal.Decimal, datetime.date, datetime.datetime, None\n]\n\n\n# A column of data, with the data type.\nclass SeriesType(typing.Generic[T]):\n def __init__(self, tpe):\n self.tpe = tpe # type: types.DataType\n\n def __repr__(self):\n return \"SeriesType[{}]\".format(self.tpe)\n\n\nclass DataFrameType(object):\n def __init__(self, tpe, names=None):\n if names is None:\n # Default names `c0, c1, ... cn`.\n self.tpe = types.StructType(\n [types.StructField(\"c%s\" % i, tpe[i]) for i in range(len(tpe))]\n ) # type: types.StructType\n else:\n self.tpe = types.StructType(\n [types.StructField(n, t) for n, t in zip(names, tpe)]\n ) # type: types.StructType\n\n def __repr__(self):\n return \"DataFrameType[{}]\".format(self.tpe)\n\n\n# The type is a scalar type that is furthermore understood by Spark.\nclass ScalarType(object):\n def __init__(self, tpe):\n self.tpe = tpe # type: types.DataType\n\n def __repr__(self):\n return \"ScalarType[{}]\".format(self.tpe)\n\n\n# The type is left unspecified or we do not know about this type.\nclass UnknownType(object):\n def __init__(self, tpe):\n self.tpe = tpe\n\n def __repr__(self):\n return \"UnknownType[{}]\".format(self.tpe)\n\n\nclass NameTypeHolder(object):\n name = None\n tpe = None\n\n\ndef as_spark_type(tpe) -> types.DataType:\n \"\"\"\n Given a Python type, returns the equivalent spark type.\n Accepts:\n - the built-in types in Python\n - the built-in types in numpy\n - list of pairs of (field_name, type)\n - dictionaries of field_name -> type\n - Python3's typing system\n \"\"\"\n # TODO: Add \"boolean\" and \"string\" types.\n # ArrayType\n if tpe in (np.ndarray,):\n # TODO: support other child types\n return types.ArrayType(types.StringType())\n # BinaryType\n elif tpe in (bytes, np.character, np.bytes_, np.string_):\n return types.BinaryType()\n # BooleanType\n elif tpe in (bool, np.bool, \"bool\", \"?\"):\n return types.BooleanType()\n # DateType\n elif tpe in (datetime.date,):\n return types.DateType()\n # NumericType\n elif tpe in (np.int8, np.byte, \"int8\", \"byte\", \"b\"):\n return types.ByteType()\n elif tpe in (decimal.Decimal,):\n # TODO: considering about the precision & scale for decimal type.\n return types.DecimalType(38, 18)\n elif tpe in (float, np.float, np.float64, \"float\", \"float64\", \"double\"):\n return types.DoubleType()\n elif tpe in (np.float32, \"float32\", \"f\"):\n return types.FloatType()\n elif tpe in (np.int32, \"int32\", \"i\"):\n return types.IntegerType()\n elif tpe in (int, np.int, np.int64, \"int\", \"int64\", \"long\", \"bigint\"):\n return types.LongType()\n elif tpe in (np.int16, \"int16\", \"short\"):\n return types.ShortType()\n # StringType\n elif tpe in (str, np.unicode_, \"str\", \"U\"):\n return types.StringType()\n # TimestampType\n elif tpe in (datetime.datetime, np.datetime64, \"datetime64[ns]\", \"M\"):\n return types.TimestampType()\n else:\n raise TypeError(\"Type %s was not understood.\" % tpe)\n\n\ndef spark_type_to_pandas_dtype(spark_type):\n \"\"\" Return the given Spark DataType to pandas dtype. \"\"\"\n if isinstance(spark_type, (types.DateType, types.StructType, types.UserDefinedType)):\n return np.dtype(\"object\")\n elif isinstance(spark_type, types.TimestampType):\n return np.dtype(\"datetime64[ns]\")\n else:\n return np.dtype(to_arrow_type(spark_type).to_pandas_dtype())\n\n\ndef infer_pd_series_spark_type(s: pd.Series) -> types.DataType:\n \"\"\"Infer Spark DataType from pandas Series dtype.\n\n :param s: :class:`pandas.Series` to be inferred\n :return: the inferred Spark data type\n \"\"\"\n dt = s.dtype\n if dt == np.dtype(\"object\"):\n if len(s) == 0 or s.isnull().all():\n raise ValueError(\"can not infer schema from empty or null dataset\")\n elif hasattr(s[0], \"__UDT__\"):\n return s[0].__UDT__\n else:\n return from_arrow_type(pa.Array.from_pandas(s).type)\n elif is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):\n return types.TimestampType()\n else:\n return from_arrow_type(pa.from_numpy_dtype(dt))\n\n\ndef infer_return_type(f) -> typing.Union[SeriesType, DataFrameType, ScalarType, UnknownType]:\n \"\"\"\n >>> def func() -> int:\n ... pass\n >>> infer_return_type(func).tpe\n LongType\n\n >>> def func() -> ks.Series[int]:\n ... pass\n >>> infer_return_type(func).tpe\n LongType\n\n >>> def func() -> ks.DataFrame[np.float, str]:\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))\n\n >>> def func() -> ks.DataFrame[np.float]:\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(c0,DoubleType,true)))\n\n >>> def func() -> 'int':\n ... pass\n >>> infer_return_type(func).tpe\n LongType\n\n >>> def func() -> 'ks.Series[int]':\n ... pass\n >>> infer_return_type(func).tpe\n LongType\n\n >>> def func() -> 'ks.DataFrame[np.float, str]':\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))\n\n >>> def func() -> 'ks.DataFrame[np.float]':\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(c0,DoubleType,true)))\n\n >>> def func() -> ks.DataFrame['a': np.float, 'b': int]:\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))\n\n >>> def func() -> \"ks.DataFrame['a': np.float, 'b': int]\":\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))\n\n >>> pdf = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]})\n >>> def func() -> ks.DataFrame[pdf.dtypes]:\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))\n\n >>> pdf = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]})\n >>> def func() -> ks.DataFrame[zip(pdf.columns, pdf.dtypes)]:\n ... pass\n >>> infer_return_type(func).tpe\n StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))\n \"\"\"\n # We should re-import to make sure the class 'SeriesType' is not treated as a class\n # within this module locally. See Series.__class_getitem__ which imports this class\n # canonically.\n from databricks.koalas.typedef import SeriesType, NameTypeHolder\n\n spec = getfullargspec(f)\n tpe = spec.annotations.get(\"return\", None)\n if isinstance(tpe, str):\n # This type hint can happen when given hints are string to avoid forward reference.\n tpe = resolve_string_type_hint(tpe)\n if hasattr(tpe, \"__origin__\") and (\n issubclass(tpe.__origin__, SeriesType) or tpe.__origin__ == ks.Series\n ):\n # TODO: remove \"tpe.__origin__ == ks.Series\" when we drop Python 3.5 and 3.6.\n inner = as_spark_type(tpe.__args__[0])\n return SeriesType(inner)\n\n if hasattr(tpe, \"__origin__\") and tpe.__origin__ == ks.DataFrame:\n # When Python version is lower then 3.7. Unwrap it to a Tuple type\n # hints.\n tpe = tpe.__args__[0]\n\n # Note that, DataFrame type hints will create a Tuple.\n # Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.\n # Check if the name is Tuple.\n name = getattr(tpe, \"_name\", getattr(tpe, \"__name__\", None))\n if name == \"Tuple\":\n tuple_type = tpe\n if hasattr(tuple_type, \"__tuple_params__\"):\n # Python 3.5.0 to 3.5.2 has '__tuple_params__' instead.\n # See https://github.com/python/cpython/blob/v3.5.2/Lib/typing.py\n parameters = getattr(tuple_type, \"__tuple_params__\")\n else:\n parameters = getattr(tuple_type, \"__args__\")\n if len(parameters) > 0 and all(\n isclass(p) and issubclass(p, NameTypeHolder) for p in parameters\n ):\n names = [p.name for p in parameters if issubclass(p, NameTypeHolder)]\n types = [p.tpe for p in parameters if issubclass(p, NameTypeHolder)]\n return DataFrameType([as_spark_type(t) for t in types], names)\n return DataFrameType([as_spark_type(t) for t in parameters])\n inner = as_spark_type(tpe)\n if inner is None:\n return UnknownType(tpe)\n else:\n return ScalarType(inner)\n", "path": "databricks/koalas/typedef/typehints.py"}]} | 3,997 | 189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.