problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_19644 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py]
1 import pandas as pd
2
3 from great_expectations.execution_engine import (
4 PandasExecutionEngine,
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
9 ColumnAggregateMetricProvider,
10 column_aggregate_partial,
11 column_aggregate_value,
12 )
13 from great_expectations.expectations.metrics.import_manager import F, sa
14
15
16 class ColumnValuesLengthMin(ColumnAggregateMetricProvider):
17 metric_name = "column_values.length.min"
18
19 @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
20 def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
21 return column.map(len).min()
22
23 @column_aggregate_partial(
24 engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
25 )
26 def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
27 return sa.func.min(sa.func.length(column))
28
29 @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
30 def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
31 return F.min(F.length(F.col(column)))
32
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py]
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py]
1 import pandas as pd
2
3 from great_expectations.execution_engine import (
4 PandasExecutionEngine,
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
9 ColumnAggregateMetricProvider,
10 column_aggregate_partial,
11 column_aggregate_value,
12 )
13 from great_expectations.expectations.metrics.import_manager import F, sa
14
15
16 class ColumnValuesLengthMax(ColumnAggregateMetricProvider):
17 metric_name = "column_values.length.max"
18
19 @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
20 def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
21 return column.map(len).max()
22
23 @column_aggregate_partial(
24 engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
25 )
26 def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
27 return sa.func.max(sa.func.length(column))
28
29 @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
30 def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
31 return F.max(F.length(F.col(column)))
32
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
@@ -27,5 +27,5 @@
return sa.func.max(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
- return F.max(F.length(F.col(column)))
+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
+ return F.max(F.length(column))
diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
@@ -27,5 +27,5 @@
return sa.func.min(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
- return F.min(F.length(F.col(column)))
+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
+ return F.min(F.length(column))
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n@@ -27,5 +27,5 @@\n return sa.func.max(sa.func.length(column))\n \n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n- return F.max(F.length(F.col(column)))\n+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n+ return F.max(F.length(column))\ndiff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n@@ -27,5 +27,5 @@\n return sa.func.min(sa.func.length(column))\n \n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n- return F.min(F.length(F.col(column)))\n+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n+ return F.min(F.length(column))\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMin(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.min\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).min()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.min(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.min(F.length(F.col(column)))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py"}, {"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMax(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.max\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).max()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.max(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.max(F.length(F.col(column)))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py"}]} | 1,269 | 396 |
gh_patches_debug_6766 | rasdani/github-patches | git_diff | coala__coala-1445 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spelling error in ContextManager
`coalib/misc/ContextManagers.py` function `prepare_file` has a spelling error - `Can creates a temporary file ...` should be `Can create a temporary file ...`
</issue>
<code>
[start of coalib/misc/ContextManagers.py]
1 from contextlib import contextmanager, closing
2 import sys
3 import os
4 from io import StringIO
5 import builtins
6 import signal
7 import threading
8 import platform
9 import tempfile
10
11 from coalib.misc.MutableValue import MutableValue
12
13
14 @contextmanager
15 def subprocess_timeout(sub_process, seconds, kill_pg=False):
16 """
17 Kill subprocess if the sub process takes more the than the timeout.
18
19 :param sub_process: The sub process to run.
20 :param seconds: The number of seconds to allow the test to run for. If
21 set to 0 or a negative value, it waits indefinitely.
22 Floats can be used to specify units smaller than
23 seconds.
24 :param kill_pg: Boolean whether to kill the process group or only this
25 process. (not applicable for windows)
26 """
27 timedout = MutableValue(False)
28
29 if seconds <= 0:
30 yield timedout
31 return
32
33 finished = threading.Event()
34
35 if platform.system() == "Windows": # pragma: no cover
36 kill_pg = False
37
38 def kill_it():
39 finished.wait(seconds)
40 if not finished.is_set():
41 timedout.value = True
42 if kill_pg:
43 pgid = os.getpgid(sub_process.pid)
44 os.kill(sub_process.pid, signal.SIGINT)
45 if kill_pg:
46 os.killpg(pgid, signal.SIGINT)
47
48 thread = threading.Thread(name='timeout-killer', target=kill_it)
49 try:
50 thread.start()
51 yield timedout
52 finally:
53 finished.set()
54 thread.join()
55
56
57 @contextmanager
58 def replace_stdout(replacement):
59 """
60 Replaces stdout with the replacement, yields back to the caller and then
61 reverts everything back.
62 """
63 _stdout = sys.stdout
64 sys.stdout = replacement
65 try:
66 yield
67 finally:
68 sys.stdout = _stdout
69
70
71 @contextmanager
72 def suppress_stdout():
73 """
74 Suppresses everything going to stdout.
75 """
76 with open(os.devnull, "w") as devnull, replace_stdout(devnull):
77 yield
78
79
80 @contextmanager
81 def retrieve_stdout():
82 """
83 Yields a StringIO object from which one can read everything that was
84 printed to stdout. (It won't be printed to the real stdout!)
85
86 Example usage:
87
88 with retrieve_stdout() as stdout:
89 print("something") # Won't print to the console
90 what_was_printed = stdout.getvalue() # Save the value
91 """
92 with closing(StringIO()) as sio, replace_stdout(sio):
93 oldprint = builtins.print
94 try:
95 # Overriding stdout doesn't work with libraries, this ensures even
96 # cached variables take this up. Well... it works.
97 def newprint(*args, **kwargs):
98 kwargs['file'] = sio
99 oldprint(*args, **kwargs)
100
101 builtins.print = newprint
102 yield sio
103 finally:
104 builtins.print = oldprint
105
106
107 @contextmanager
108 def simulate_console_inputs(*inputs):
109 """
110 Does some magic to simulate the given inputs to any calls to the `input`
111 builtin. This yields back an InputGenerator object so you can check
112 which input was already used and append any additional inputs you want.
113 Example:
114
115 with simulate_console_inputs(0, 1, 2) as generator:
116 assert(input() == 0)
117 assert(generator.last_input == 0)
118 generator.inputs.append(3)
119 assert(input() == 1)
120 assert(input() == 2)
121 assert(input() == 3)
122 assert(generator.last_input == 3)
123
124 :param inputs: Any inputs to simulate.
125 :raises ValueError: Raised when was asked for more input but there's no
126 more provided.
127 """
128 class InputGenerator:
129
130 def __init__(self, inputs):
131 self.last_input = -1
132 self.inputs = inputs
133
134 def generate_input(self, prompt=''):
135 print(prompt, end="")
136 self.last_input += 1
137 try:
138 return self.inputs[self.last_input]
139 except IndexError:
140 raise ValueError("Asked for more input, but no more was "
141 "provided from `simulate_console_inputs`.")
142
143 input_generator = InputGenerator(list(inputs))
144 _input = builtins.input
145 builtins.input = input_generator.generate_input
146 try:
147 yield input_generator
148 finally:
149 builtins.input = _input
150
151
152 @contextmanager
153 def make_temp(suffix="", prefix="tmp", dir=None):
154 """
155 Creates a temporary file with a closed stream and deletes it when done.
156
157 :return: A contextmanager retrieving the file path.
158 """
159 temporary = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
160 os.close(temporary[0])
161 try:
162 yield temporary[1]
163 finally:
164 os.remove(temporary[1])
165
166
167 @contextmanager
168 def prepare_file(lines,
169 filename,
170 force_linebreaks=True,
171 create_tempfile=True,
172 tempfile_kwargs={}):
173 """
174 Can creates a temporary file (if filename is None) with the lines.
175 Can also add a trailing newline to each line specified if needed.
176
177 :param lines: The lines from the file. (list of strings)
178 :param filename: The filename to be prepared.
179 :param force_linebreaks: Whether to append newlines at each line if needed.
180 :param create_tempfile: Whether to save lines in tempfile if needed.
181 :param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
182 """
183 if force_linebreaks:
184 for i, line in enumerate(lines):
185 lines[i] = line if line.endswith("\n") else line + "\n"
186
187 if not create_tempfile and filename is None:
188 filename = "dummy_file_name"
189
190 if not isinstance(filename, str) and create_tempfile:
191 with make_temp(**tempfile_kwargs) as filename:
192 with open(filename, 'w') as file:
193 file.writelines(lines)
194 yield lines, filename
195 else:
196 yield lines, filename
197
[end of coalib/misc/ContextManagers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/misc/ContextManagers.py b/coalib/misc/ContextManagers.py
--- a/coalib/misc/ContextManagers.py
+++ b/coalib/misc/ContextManagers.py
@@ -171,7 +171,7 @@
create_tempfile=True,
tempfile_kwargs={}):
"""
- Can creates a temporary file (if filename is None) with the lines.
+ Can create a temporary file (if filename is None) with the lines.
Can also add a trailing newline to each line specified if needed.
:param lines: The lines from the file. (list of strings)
| {"golden_diff": "diff --git a/coalib/misc/ContextManagers.py b/coalib/misc/ContextManagers.py\n--- a/coalib/misc/ContextManagers.py\n+++ b/coalib/misc/ContextManagers.py\n@@ -171,7 +171,7 @@\n create_tempfile=True,\n tempfile_kwargs={}):\n \"\"\"\n- Can creates a temporary file (if filename is None) with the lines.\n+ Can create a temporary file (if filename is None) with the lines.\n Can also add a trailing newline to each line specified if needed.\n \n :param lines: The lines from the file. (list of strings)\n", "issue": "Spelling error in ContextManager\n`coalib/misc/ContextManagers.py` function `prepare_file` has a spelling error - `Can creates a temporary file ...` should be `Can create a temporary file ...`\n\n", "before_files": [{"content": "from contextlib import contextmanager, closing\nimport sys\nimport os\nfrom io import StringIO\nimport builtins\nimport signal\nimport threading\nimport platform\nimport tempfile\n\nfrom coalib.misc.MutableValue import MutableValue\n\n\n@contextmanager\ndef subprocess_timeout(sub_process, seconds, kill_pg=False):\n \"\"\"\n Kill subprocess if the sub process takes more the than the timeout.\n\n :param sub_process: The sub process to run.\n :param seconds: The number of seconds to allow the test to run for. If\n set to 0 or a negative value, it waits indefinitely.\n Floats can be used to specify units smaller than\n seconds.\n :param kill_pg: Boolean whether to kill the process group or only this\n process. (not applicable for windows)\n \"\"\"\n timedout = MutableValue(False)\n\n if seconds <= 0:\n yield timedout\n return\n\n finished = threading.Event()\n\n if platform.system() == \"Windows\": # pragma: no cover\n kill_pg = False\n\n def kill_it():\n finished.wait(seconds)\n if not finished.is_set():\n timedout.value = True\n if kill_pg:\n pgid = os.getpgid(sub_process.pid)\n os.kill(sub_process.pid, signal.SIGINT)\n if kill_pg:\n os.killpg(pgid, signal.SIGINT)\n\n thread = threading.Thread(name='timeout-killer', target=kill_it)\n try:\n thread.start()\n yield timedout\n finally:\n finished.set()\n thread.join()\n\n\n@contextmanager\ndef replace_stdout(replacement):\n \"\"\"\n Replaces stdout with the replacement, yields back to the caller and then\n reverts everything back.\n \"\"\"\n _stdout = sys.stdout\n sys.stdout = replacement\n try:\n yield\n finally:\n sys.stdout = _stdout\n\n\n@contextmanager\ndef suppress_stdout():\n \"\"\"\n Suppresses everything going to stdout.\n \"\"\"\n with open(os.devnull, \"w\") as devnull, replace_stdout(devnull):\n yield\n\n\n@contextmanager\ndef retrieve_stdout():\n \"\"\"\n Yields a StringIO object from which one can read everything that was\n printed to stdout. (It won't be printed to the real stdout!)\n\n Example usage:\n\n with retrieve_stdout() as stdout:\n print(\"something\") # Won't print to the console\n what_was_printed = stdout.getvalue() # Save the value\n \"\"\"\n with closing(StringIO()) as sio, replace_stdout(sio):\n oldprint = builtins.print\n try:\n # Overriding stdout doesn't work with libraries, this ensures even\n # cached variables take this up. Well... it works.\n def newprint(*args, **kwargs):\n kwargs['file'] = sio\n oldprint(*args, **kwargs)\n\n builtins.print = newprint\n yield sio\n finally:\n builtins.print = oldprint\n\n\n@contextmanager\ndef simulate_console_inputs(*inputs):\n \"\"\"\n Does some magic to simulate the given inputs to any calls to the `input`\n builtin. This yields back an InputGenerator object so you can check\n which input was already used and append any additional inputs you want.\n Example:\n\n with simulate_console_inputs(0, 1, 2) as generator:\n assert(input() == 0)\n assert(generator.last_input == 0)\n generator.inputs.append(3)\n assert(input() == 1)\n assert(input() == 2)\n assert(input() == 3)\n assert(generator.last_input == 3)\n\n :param inputs: Any inputs to simulate.\n :raises ValueError: Raised when was asked for more input but there's no\n more provided.\n \"\"\"\n class InputGenerator:\n\n def __init__(self, inputs):\n self.last_input = -1\n self.inputs = inputs\n\n def generate_input(self, prompt=''):\n print(prompt, end=\"\")\n self.last_input += 1\n try:\n return self.inputs[self.last_input]\n except IndexError:\n raise ValueError(\"Asked for more input, but no more was \"\n \"provided from `simulate_console_inputs`.\")\n\n input_generator = InputGenerator(list(inputs))\n _input = builtins.input\n builtins.input = input_generator.generate_input\n try:\n yield input_generator\n finally:\n builtins.input = _input\n\n\n@contextmanager\ndef make_temp(suffix=\"\", prefix=\"tmp\", dir=None):\n \"\"\"\n Creates a temporary file with a closed stream and deletes it when done.\n\n :return: A contextmanager retrieving the file path.\n \"\"\"\n temporary = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)\n os.close(temporary[0])\n try:\n yield temporary[1]\n finally:\n os.remove(temporary[1])\n\n\n@contextmanager\ndef prepare_file(lines,\n filename,\n force_linebreaks=True,\n create_tempfile=True,\n tempfile_kwargs={}):\n \"\"\"\n Can creates a temporary file (if filename is None) with the lines.\n Can also add a trailing newline to each line specified if needed.\n\n :param lines: The lines from the file. (list of strings)\n :param filename: The filename to be prepared.\n :param force_linebreaks: Whether to append newlines at each line if needed.\n :param create_tempfile: Whether to save lines in tempfile if needed.\n :param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().\n \"\"\"\n if force_linebreaks:\n for i, line in enumerate(lines):\n lines[i] = line if line.endswith(\"\\n\") else line + \"\\n\"\n\n if not create_tempfile and filename is None:\n filename = \"dummy_file_name\"\n\n if not isinstance(filename, str) and create_tempfile:\n with make_temp(**tempfile_kwargs) as filename:\n with open(filename, 'w') as file:\n file.writelines(lines)\n yield lines, filename\n else:\n yield lines, filename\n", "path": "coalib/misc/ContextManagers.py"}]} | 2,380 | 141 |
gh_patches_debug_2335 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1841 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update `test` dependency from `nteract-scrapbook` to `scrapbook`
### Summary
Running the notebook tests generates the warning
```pytb
warnings.warn("'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.", FutureWarning)
```
as [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:
https://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42
### Additional Information
_No response_
### Code of Conduct
- [X] I agree to follow the Code of Conduct
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789
7 'tensorflow-probability>=0.11.0', # c.f. PR #1657
8 ],
9 'torch': ['torch>=1.10.0'], # c.f. PR #1657
10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501
11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567
12 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
23 extras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'scikit-hep-testdata>=0.4.11',
33 'pytest>=6.0',
34 'pytest-cov>=2.5.1',
35 'pytest-mock',
36 'requests-mock>=1.9.0',
37 'pytest-benchmark[histogram]',
38 'pytest-console-scripts',
39 'pytest-mpl',
40 'pydocstyle',
41 'papermill~=2.0',
42 'nteract-scrapbook~=0.2',
43 'jupyter',
44 'graphviz',
45 ]
46 )
47 )
48 extras_require['docs'] = sorted(
49 set(
50 extras_require['xmlio']
51 + extras_require['contrib']
52 + [
53 'sphinx>=4.0.0',
54 'sphinxcontrib-bibtex~=2.1',
55 'sphinx-click',
56 'sphinx_rtd_theme',
57 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620
58 'ipywidgets',
59 'sphinx-issues',
60 'sphinx-copybutton>=0.3.2',
61 'sphinx-togglebutton>=0.3.0',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['lint']
69 + extras_require['test']
70 + [
71 'nbdime',
72 'tbump>=6.7.0',
73 'ipython',
74 'pre-commit',
75 'check-manifest',
76 'codemetapy>=0.3.4',
77 'twine',
78 ]
79 )
80 )
81 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
82
83
84 setup(
85 extras_require=extras_require,
86 use_scm_version=lambda: {'local_scheme': lambda version: ''},
87 )
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,8 +38,8 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'papermill~=2.0',
- 'nteract-scrapbook~=0.2',
+ 'papermill~=2.3.4',
+ 'scrapbook~=0.5.0',
'jupyter',
'graphviz',
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,8 +38,8 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'papermill~=2.0',\n- 'nteract-scrapbook~=0.2',\n+ 'papermill~=2.3.4',\n+ 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n ]\n", "issue": "Update `test` dependency from `nteract-scrapbook` to `scrapbook`\n### Summary\n\nRunning the notebook tests generates the warning\r\n\r\n```pytb\r\nwarnings.warn(\"'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.\", FutureWarning)\r\n```\r\n\r\nas [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42\n\n### Additional Information\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,639 | 111 |
gh_patches_debug_60747 | rasdani/github-patches | git_diff | hi-primus__optimus-872 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Json file exploration/profiling
Unstructured data as JSON can not be explored as regular tabular data. I have been exploring using tree depth and count to highlight the user in which nodes could have important data.
Some work in progress, here. https://github.com/ironmussa/Optimus/blob/develop-3.0/optimus/engines/pandas/io/json.py
</issue>
<code>
[start of optimus/engines/pandas/io/json.py]
1 import glob
2
3 import pandas as pd
4 import ujson
5 from glom import glom
6
7 from optimus.infer import is_dict, is_list, is_str, is_int
8
9 META = "_meta"
10 PROPERTIES = "_properties"
11 ITEMS = "_items"
12
13 COL_DEPTH = "depth"
14
15
16 class JSON:
17 def __init__(self):
18 self.data = None
19
20 def load(self, path):
21 """
22 Load a file in JSON format
23 :param path:
24 :return:
25 """
26 all_json = glob.glob(path, recursive=True)
27 # pd.read_json("data/corona.json")
28 with open(all_json[0]) as f:
29 self.data = ujson.load(f)
30
31 def schema(self):
32 """
33 Return a JSON with the count, dtype and nested structure
34 :return:
35 """
36
37 def _schema(_data, _keys):
38 if isinstance(_data, dict):
39 for x, y in _data.items():
40 if is_dict(y):
41 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
42 if len(y) > 0:
43 _keys[x][PROPERTIES] = {}
44 _schema(y, _keys[x][PROPERTIES])
45 elif is_list(y):
46 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
47 if len(y) > 0:
48 _keys[x] = {ITEMS: {PROPERTIES: {}, META: {"count": len(y), "dtype": type(y)}}}
49 _schema(y, _keys[x][ITEMS][PROPERTIES])
50 elif is_str(y):
51 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
52 _schema(y, _keys[x])
53 elif is_int(y):
54 _keys[x] = {META: {"dtype": type(y)}}
55 _schema(y, _keys[x])
56
57 elif is_list(_data):
58 for x in _data:
59 _schema(x, _keys)
60
61 keys = {}
62 _schema(self.data, keys)
63 return keys
64
65 def freq(self, n=100):
66 """
67 Calculate the count on every dict or list in the json
68 :param n:
69 :return:
70 """
71
72 def _profile(keys, parent, result=None):
73 for key, values in keys.items():
74 if values.get(PROPERTIES):
75 _meta = values.get(META)
76 _properties = values.get(PROPERTIES)
77 elif values.get(ITEMS):
78 _meta = values.get(ITEMS).get(META)
79 _properties = values.get(ITEMS).get(PROPERTIES)
80
81 if values.get(PROPERTIES) or values.get(ITEMS):
82 result.append([key, _meta["count"], _meta["dtype"], parent, len(parent)])
83 _profile(_properties, parent + [key], result=result)
84
85 data = []
86 _profile(self.schema(), [], data)
87 df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])
88 df = df.sort_values(by=["count", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')
89 return df
90
91 def flatten(self, path):
92 """
93 Flatten a JSON from a json path
94 :param path:
95 :return:
96 """
97
98 def _flatten_json(_values):
99 out = {}
100
101 def flatten(x, name=''):
102 if type(x) is dict:
103 for a in x:
104 flatten(x[a], name + a + '_')
105 elif type(x) is list:
106 # i = 0
107 for a in x:
108 # flatten(a, name + str(i) + '_')
109 flatten(a, name + '_')
110 # i += 1
111 else:
112 out[name[:-1]] = x
113
114 flatten(_values)
115 return out
116
117 result = []
118 value = glom(self.data, path, skip_exc=KeyError)
119 if is_list(value):
120 for i in value:
121 result.append((_flatten_json(i)))
122 elif is_dict(value):
123 for i, j in value.items():
124 a = {"col": i}
125 a.update(_flatten_json(j))
126 result.append(a)
127 return result
128
129 def to_pandas(self, path):
130 result = self.flatten(path)
131 return pd.DataFrame(data=result)
132
[end of optimus/engines/pandas/io/json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optimus/engines/pandas/io/json.py b/optimus/engines/pandas/io/json.py
--- a/optimus/engines/pandas/io/json.py
+++ b/optimus/engines/pandas/io/json.py
@@ -121,7 +121,7 @@
result.append((_flatten_json(i)))
elif is_dict(value):
for i, j in value.items():
- a = {"col": i}
+ a = {path: i}
a.update(_flatten_json(j))
result.append(a)
return result
| {"golden_diff": "diff --git a/optimus/engines/pandas/io/json.py b/optimus/engines/pandas/io/json.py\n--- a/optimus/engines/pandas/io/json.py\n+++ b/optimus/engines/pandas/io/json.py\n@@ -121,7 +121,7 @@\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n- a = {\"col\": i}\n+ a = {path: i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n", "issue": "Json file exploration/profiling\nUnstructured data as JSON can not be explored as regular tabular data. I have been exploring using tree depth and count to highlight the user in which nodes could have important data.\r\n\r\nSome work in progress, here. https://github.com/ironmussa/Optimus/blob/develop-3.0/optimus/engines/pandas/io/json.py\n", "before_files": [{"content": "import glob\n\nimport pandas as pd\nimport ujson\nfrom glom import glom\n\nfrom optimus.infer import is_dict, is_list, is_str, is_int\n\nMETA = \"_meta\"\nPROPERTIES = \"_properties\"\nITEMS = \"_items\"\n\nCOL_DEPTH = \"depth\"\n\n\nclass JSON:\n def __init__(self):\n self.data = None\n\n def load(self, path):\n \"\"\"\n Load a file in JSON format\n :param path:\n :return:\n \"\"\"\n all_json = glob.glob(path, recursive=True)\n # pd.read_json(\"data/corona.json\")\n with open(all_json[0]) as f:\n self.data = ujson.load(f)\n\n def schema(self):\n \"\"\"\n Return a JSON with the count, dtype and nested structure\n :return:\n \"\"\"\n\n def _schema(_data, _keys):\n if isinstance(_data, dict):\n for x, y in _data.items():\n if is_dict(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x][PROPERTIES] = {}\n _schema(y, _keys[x][PROPERTIES])\n elif is_list(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x] = {ITEMS: {PROPERTIES: {}, META: {\"count\": len(y), \"dtype\": type(y)}}}\n _schema(y, _keys[x][ITEMS][PROPERTIES])\n elif is_str(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n _schema(y, _keys[x])\n elif is_int(y):\n _keys[x] = {META: {\"dtype\": type(y)}}\n _schema(y, _keys[x])\n\n elif is_list(_data):\n for x in _data:\n _schema(x, _keys)\n\n keys = {}\n _schema(self.data, keys)\n return keys\n\n def freq(self, n=100):\n \"\"\"\n Calculate the count on every dict or list in the json\n :param n:\n :return:\n \"\"\"\n\n def _profile(keys, parent, result=None):\n for key, values in keys.items():\n if values.get(PROPERTIES):\n _meta = values.get(META)\n _properties = values.get(PROPERTIES)\n elif values.get(ITEMS):\n _meta = values.get(ITEMS).get(META)\n _properties = values.get(ITEMS).get(PROPERTIES)\n\n if values.get(PROPERTIES) or values.get(ITEMS):\n result.append([key, _meta[\"count\"], _meta[\"dtype\"], parent, len(parent)])\n _profile(_properties, parent + [key], result=result)\n\n data = []\n _profile(self.schema(), [], data)\n df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])\n df = df.sort_values(by=[\"count\", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')\n return df\n\n def flatten(self, path):\n \"\"\"\n Flatten a JSON from a json path\n :param path:\n :return:\n \"\"\"\n\n def _flatten_json(_values):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n # i = 0\n for a in x:\n # flatten(a, name + str(i) + '_')\n flatten(a, name + '_')\n # i += 1\n else:\n out[name[:-1]] = x\n\n flatten(_values)\n return out\n\n result = []\n value = glom(self.data, path, skip_exc=KeyError)\n if is_list(value):\n for i in value:\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n a = {\"col\": i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n\n def to_pandas(self, path):\n result = self.flatten(path)\n return pd.DataFrame(data=result)\n", "path": "optimus/engines/pandas/io/json.py"}]} | 1,865 | 128 |
gh_patches_debug_8107 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2643 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transparent mode fail with looking up failure.
##### Steps to reproduce the problem:
1. Launch Wifi Access Point(OS X)
2. Setup pfctl configuration so that http packet will be forwarded.
3. Launch mitmproxy ( `sudo mitmproxy -p 8080 -m transparent --showhost` )
4. Access web page after connecting to AP which launched before.
5. See event log.
##### Any other comments? What have you tried so far?
When I tried to use transparent mode with OS X(10.11.6).
RuntimeError("Could not resolve original destination.") raised.
I investigated this bug.
And I found that this is caused by difference between AF_INET's and AF_INET6's peername.
https://github.com/mitmproxy/mitmproxy/blob/de006ea8adc08b9a8a6aa94eda2b30468727c307/mitmproxy/net/tcp.py#L567
If we use AF_INET, getpeername() return string like `"192.168.2.5:45670"`.
But if we use AF_INET6, getpeername() return string like `"::ffff:192.168.2.5:45670"`.
`pfctl -s state` 's result is like below.
`ALL tcp 192.168.2.5:45670 -> xx.xx.xx.xx:33291 -> xx.xx.xx.xx:443 ESTABLISHED:ESTABLISHED`
As you see, `::ffff:` doesn't exist.
So [lookup](https://github.com/mitmproxy/mitmproxy/blob/f17c0fdac636f7269f4885294e2a8d2c52c23590/mitmproxy/platform/pf.py#L4) function raises RuntimeError() because `spec in i` condition won't become true.
##### System information
Mitmproxy version: 3.0.0 (release version)
Python version: 3.6.2
Platform: Darwin-15.6.0-x86_64-i386-64bit
SSL version: OpenSSL 1.0.2l 25 May 2017
Mac version: 10.11.6 ('', '', '') x86_64
</issue>
<code>
[start of mitmproxy/platform/pf.py]
1 import sys
2
3
4 def lookup(address, port, s):
5 """
6 Parse the pfctl state output s, to look up the destination host
7 matching the client (address, port).
8
9 Returns an (address, port) tuple, or None.
10 """
11 s = s.decode()
12 spec = "%s:%s" % (address, port)
13 for i in s.split("\n"):
14 if "ESTABLISHED:ESTABLISHED" in i and spec in i:
15 s = i.split()
16 if len(s) > 4:
17 if sys.platform.startswith("freebsd"):
18 # strip parentheses for FreeBSD pfctl
19 s = s[3][1:-1].split(":")
20 else:
21 s = s[4].split(":")
22
23 if len(s) == 2:
24 return s[0], int(s[1])
25 raise RuntimeError("Could not resolve original destination.")
26
[end of mitmproxy/platform/pf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/platform/pf.py b/mitmproxy/platform/pf.py
--- a/mitmproxy/platform/pf.py
+++ b/mitmproxy/platform/pf.py
@@ -1,3 +1,4 @@
+import re
import sys
@@ -8,6 +9,9 @@
Returns an (address, port) tuple, or None.
"""
+ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
+ # Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
+ address = re.sub("^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
spec = "%s:%s" % (address, port)
for i in s.split("\n"):
| {"golden_diff": "diff --git a/mitmproxy/platform/pf.py b/mitmproxy/platform/pf.py\n--- a/mitmproxy/platform/pf.py\n+++ b/mitmproxy/platform/pf.py\n@@ -1,3 +1,4 @@\n+import re\n import sys\n \n \n@@ -8,6 +9,9 @@\n \n Returns an (address, port) tuple, or None.\n \"\"\"\n+ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.\n+ # Those still appear as \"127.0.0.1\" in the table, so we need to strip the prefix.\n+ address = re.sub(\"^::ffff:(?=\\d+.\\d+.\\d+.\\d+$)\", \"\", address)\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n", "issue": "Transparent mode fail with looking up failure.\n##### Steps to reproduce the problem:\r\n\r\n1. Launch Wifi Access Point(OS X)\r\n2. Setup pfctl configuration so that http packet will be forwarded.\r\n3. Launch mitmproxy ( `sudo mitmproxy -p 8080 -m transparent --showhost` )\r\n4. Access web page after connecting to AP which launched before.\r\n5. See event log.\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nWhen I tried to use transparent mode with OS X(10.11.6).\r\nRuntimeError(\"Could not resolve original destination.\") raised.\r\n\r\nI investigated this bug.\r\nAnd I found that this is caused by difference between AF_INET's and AF_INET6's peername.\r\nhttps://github.com/mitmproxy/mitmproxy/blob/de006ea8adc08b9a8a6aa94eda2b30468727c307/mitmproxy/net/tcp.py#L567\r\n\r\nIf we use AF_INET, getpeername() return string like `\"192.168.2.5:45670\"`.\r\nBut if we use AF_INET6, getpeername() return string like `\"::ffff:192.168.2.5:45670\"`.\r\n\r\n`pfctl -s state` 's result is like below.\r\n`ALL tcp 192.168.2.5:45670 -> xx.xx.xx.xx:33291 -> xx.xx.xx.xx:443 ESTABLISHED:ESTABLISHED`\r\n\r\nAs you see, `::ffff:` doesn't exist.\r\n\r\nSo [lookup](https://github.com/mitmproxy/mitmproxy/blob/f17c0fdac636f7269f4885294e2a8d2c52c23590/mitmproxy/platform/pf.py#L4) function raises RuntimeError() because `spec in i` condition won't become true.\r\n\r\n##### System information\r\n\r\nMitmproxy version: 3.0.0 (release version)\r\nPython version: 3.6.2\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2l 25 May 2017\r\nMac version: 10.11.6 ('', '', '') x86_64\n", "before_files": [{"content": "import sys\n\n\ndef lookup(address, port, s):\n \"\"\"\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n \"\"\"\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n if \"ESTABLISHED:ESTABLISHED\" in i and spec in i:\n s = i.split()\n if len(s) > 4:\n if sys.platform.startswith(\"freebsd\"):\n # strip parentheses for FreeBSD pfctl\n s = s[3][1:-1].split(\":\")\n else:\n s = s[4].split(\":\")\n\n if len(s) == 2:\n return s[0], int(s[1])\n raise RuntimeError(\"Could not resolve original destination.\")\n", "path": "mitmproxy/platform/pf.py"}]} | 1,312 | 202 |
gh_patches_debug_40372 | rasdani/github-patches | git_diff | wright-group__WrightTools-1044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
from_solis: import fails when no metadata
It is possible to export asc image from Solis software without metadata (clearly this is not preferred, but it should be handled gracefully).
from_solis assumes "Data and Time" field exists in metadata (for attrs timestamp). Fall back on file creation date as an alternative.
</issue>
<code>
[start of WrightTools/data/_solis.py]
1 """Andor."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import os
8 import pathlib
9 import time
10
11 import numpy as np
12
13 from ._data import Data
14 from .. import exceptions as wt_exceptions
15 from ..kit import _timestamp as timestamp
16
17
18 # --- define --------------------------------------------------------------------------------------
19
20
21 __all__ = ["from_Solis"]
22
23
24 # --- from function -------------------------------------------------------------------------------
25
26
27 def from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:
28 """Create a data object from Andor Solis software (ascii exports).
29
30 Parameters
31 ----------
32 filepath : path-like
33 Path to file (should be .asc format).
34 Can be either a local or remote file (http/ftp).
35 Can be compressed with gz/bz2, decompression based on file name.
36 name : string (optional)
37 Name to give to the created data object. If None, filename is used.
38 Default is None.
39 parent : WrightTools.Collection (optional)
40 Collection to place new data object within. Default is None.
41 verbose : boolean (optional)
42 Toggle talkback. Default is True.
43
44 Returns
45 -------
46 data
47 New data object.
48 """
49 # parse filepath
50 filestr = os.fspath(filepath)
51 filepath = pathlib.Path(filepath)
52
53 if not ".asc" in filepath.suffixes:
54 wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
55 # parse name
56 if not name:
57 name = filepath.name.split(".")[0]
58 # create data
59 ds = np.DataSource(None)
60 f = ds.open(filestr, "rt")
61 axis0 = []
62 arr = []
63 attrs = {}
64
65 line0 = f.readline().strip()[:-1]
66 line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma
67 axis0.append(line0.pop(0))
68 arr.append(line0)
69
70 def get_frames(f, arr, axis0):
71 axis0_written = False
72 while True:
73 line = f.readline().strip()[:-1]
74 if len(line) == 0:
75 break
76 else:
77 line = [float(x) for x in line.split(",")]
78 # signature of new frames is restart of axis0
79 if not axis0_written and (line[0] == axis0[0]):
80 axis0_written = True
81 if axis0_written:
82 line.pop(0)
83 else:
84 axis0.append(line.pop(0))
85 arr.append(line)
86 return arr, axis0
87
88 arr, axis0 = get_frames(f, arr, axis0)
89 nframes = len(arr) // len(axis0)
90
91 i = 0
92 while i < 3:
93 line = f.readline().strip()
94 if len(line) == 0:
95 i += 1
96 else:
97 try:
98 key, val = line.split(":", 1)
99 except ValueError:
100 pass
101 else:
102 attrs[key.strip()] = val.strip()
103
104 f.close()
105 created = attrs["Date and Time"] # is this UTC?
106 created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
107 created = timestamp.TimeStamp(time.mktime(created)).RFC3339
108
109 kwargs = {"name": name, "kind": "Solis", "source": filestr, "created": created}
110 if parent is None:
111 data = Data(**kwargs)
112 else:
113 data = parent.create_data(**kwargs)
114
115 axis0 = np.array(axis0)
116 if float(attrs["Grating Groove Density (l/mm)"]) == 0:
117 xname = "xindex"
118 xunits = None
119 else:
120 xname = "wm"
121 xunits = "nm"
122 axes = [xname, "yindex"]
123
124 if nframes == 1:
125 arr = np.array(arr)
126 data.create_variable(name=xname, values=axis0[:, None], units=xunits)
127 data.create_variable(name="yindex", values=np.arange(arr.shape[-1])[None, :], units=None)
128 else:
129 arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))
130 data.create_variable(name="frame", values=np.arange(nframes)[:, None, None], units=None)
131 data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)
132 data.create_variable(
133 name="yindex", values=np.arange(arr.shape[-1])[None, None, :], units=None
134 )
135 axes = ["frame"] + axes
136
137 data.transform(*axes)
138 arr /= float(attrs["Exposure Time (secs)"])
139 # signal has units of Hz because time normalized
140 data.create_channel(name="signal", values=arr, signed=False, units="Hz")
141
142 for key, val in attrs.items():
143 data.attrs[key] = val
144
145 # finish
146 if verbose:
147 print("data created at {0}".format(data.fullpath))
148 print(" axes: {0}".format(data.axis_names))
149 print(" shape: {0}".format(data.shape))
150 return data
151
[end of WrightTools/data/_solis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/data/_solis.py b/WrightTools/data/_solis.py
--- a/WrightTools/data/_solis.py
+++ b/WrightTools/data/_solis.py
@@ -7,6 +7,7 @@
import os
import pathlib
import time
+import warnings
import numpy as np
@@ -43,8 +44,20 @@
Returns
-------
- data
+ data : WrightTools.Data
New data object.
+ Channels: `signal`. If exposure time is in metadata, signal is given as a count rate (Hz).
+ Variables, Axes: `yindex` and `xindex` (no grating) or `wm` (grating)
+
+ Notes
+ -----
+ When exporting as ascii, including metadata is optional.
+ It is _strongly recommended_ that you include metadata in exports.
+ Metadata informs the image creation date, exposure time, and axes.
+ However, if metadata is not present, this importer will make its best guesses to populate these fields accurately.
+
+ Saving processed data (e.g. vertically-binned data) in Solis software can remove/omit important metadata, so we advise exporting the raw camera images.
+
"""
# parse filepath
filestr = os.fspath(filepath)
@@ -102,9 +115,17 @@
attrs[key.strip()] = val.strip()
f.close()
- created = attrs["Date and Time"] # is this UTC?
- created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
- created = timestamp.TimeStamp(time.mktime(created)).RFC3339
+
+ try:
+ created = attrs["Date and Time"] # is this UTC?
+ created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
+ created = timestamp.TimeStamp(time.mktime(created)).RFC3339
+ except KeyError: # use file creation time
+ created = os.stat(filepath).st_mtime
+ created = timestamp.TimeStamp(created).RFC3339
+ warnings.warn(
+ f"{filepath.name} has no 'Date and Time' field: using file modified time instead: {created}"
+ )
kwargs = {"name": name, "kind": "Solis", "source": filestr, "created": created}
if parent is None:
@@ -113,7 +134,15 @@
data = parent.create_data(**kwargs)
axis0 = np.array(axis0)
- if float(attrs["Grating Groove Density (l/mm)"]) == 0:
+ try:
+ groove_density = float(attrs["Grating Groove Density (l/mm)"])
+ except KeyError: # assume no grating
+ warnings.warn(
+ f"{filepath.name} has no 'Grating Groove Density (1/mm)' field: guessing x axis units."
+ )
+ groove_density = isinstance(axis0[0], float)
+
+ if groove_density == 0:
xname = "xindex"
xunits = None
else:
@@ -135,9 +164,17 @@
axes = ["frame"] + axes
data.transform(*axes)
- arr /= float(attrs["Exposure Time (secs)"])
- # signal has units of Hz because time normalized
- data.create_channel(name="signal", values=arr, signed=False, units="Hz")
+ try:
+ exposure_time = float(attrs["Exposure Time (secs)"])
+ if exposure_time == 0:
+ raise ZeroDivisionError
+ arr /= exposure_time
+ except (KeyError, ZeroDivisionError) as e: # do not normalize
+ warnings.warn(f"{filepath.name} camera signal cannot be given as a count rate.")
+ data.create_channel(name="signal", values=arr, signed=False)
+ else:
+ # signal has units of Hz because time normalized
+ data.create_channel(name="signal", values=arr, signed=False, units="Hz")
for key, val in attrs.items():
data.attrs[key] = val
| {"golden_diff": "diff --git a/WrightTools/data/_solis.py b/WrightTools/data/_solis.py\n--- a/WrightTools/data/_solis.py\n+++ b/WrightTools/data/_solis.py\n@@ -7,6 +7,7 @@\n import os\n import pathlib\n import time\n+import warnings\n \n import numpy as np\n \n@@ -43,8 +44,20 @@\n \n Returns\n -------\n- data\n+ data : WrightTools.Data\n New data object.\n+ Channels: `signal`. If exposure time is in metadata, signal is given as a count rate (Hz).\n+ Variables, Axes: `yindex` and `xindex` (no grating) or `wm` (grating)\n+\n+ Notes\n+ -----\n+ When exporting as ascii, including metadata is optional.\n+ It is _strongly recommended_ that you include metadata in exports.\n+ Metadata informs the image creation date, exposure time, and axes.\n+ However, if metadata is not present, this importer will make its best guesses to populate these fields accurately.\n+\n+ Saving processed data (e.g. vertically-binned data) in Solis software can remove/omit important metadata, so we advise exporting the raw camera images.\n+\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n@@ -102,9 +115,17 @@\n attrs[key.strip()] = val.strip()\n \n f.close()\n- created = attrs[\"Date and Time\"] # is this UTC?\n- created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n- created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n+\n+ try:\n+ created = attrs[\"Date and Time\"] # is this UTC?\n+ created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n+ created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n+ except KeyError: # use file creation time\n+ created = os.stat(filepath).st_mtime\n+ created = timestamp.TimeStamp(created).RFC3339\n+ warnings.warn(\n+ f\"{filepath.name} has no 'Date and Time' field: using file modified time instead: {created}\"\n+ )\n \n kwargs = {\"name\": name, \"kind\": \"Solis\", \"source\": filestr, \"created\": created}\n if parent is None:\n@@ -113,7 +134,15 @@\n data = parent.create_data(**kwargs)\n \n axis0 = np.array(axis0)\n- if float(attrs[\"Grating Groove Density (l/mm)\"]) == 0:\n+ try:\n+ groove_density = float(attrs[\"Grating Groove Density (l/mm)\"])\n+ except KeyError: # assume no grating\n+ warnings.warn(\n+ f\"{filepath.name} has no 'Grating Groove Density (1/mm)' field: guessing x axis units.\"\n+ )\n+ groove_density = isinstance(axis0[0], float)\n+\n+ if groove_density == 0:\n xname = \"xindex\"\n xunits = None\n else:\n@@ -135,9 +164,17 @@\n axes = [\"frame\"] + axes\n \n data.transform(*axes)\n- arr /= float(attrs[\"Exposure Time (secs)\"])\n- # signal has units of Hz because time normalized\n- data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n+ try:\n+ exposure_time = float(attrs[\"Exposure Time (secs)\"])\n+ if exposure_time == 0:\n+ raise ZeroDivisionError\n+ arr /= exposure_time\n+ except (KeyError, ZeroDivisionError) as e: # do not normalize\n+ warnings.warn(f\"{filepath.name} camera signal cannot be given as a count rate.\")\n+ data.create_channel(name=\"signal\", values=arr, signed=False)\n+ else:\n+ # signal has units of Hz because time normalized\n+ data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n \n for key, val in attrs.items():\n data.attrs[key] = val\n", "issue": "from_solis: import fails when no metadata\nIt is possible to export asc image from Solis software without metadata (clearly this is not preferred, but it should be handled gracefully). \r\n\r\nfrom_solis assumes \"Data and Time\" field exists in metadata (for attrs timestamp). Fall back on file creation date as an alternative. \n", "before_files": [{"content": "\"\"\"Andor.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport pathlib\nimport time\n\nimport numpy as np\n\nfrom ._data import Data\nfrom .. import exceptions as wt_exceptions\nfrom ..kit import _timestamp as timestamp\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_Solis\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:\n \"\"\"Create a data object from Andor Solis software (ascii exports).\n\n Parameters\n ----------\n filepath : path-like\n Path to file (should be .asc format).\n Can be either a local or remote file (http/ftp).\n Can be compressed with gz/bz2, decompression based on file name.\n name : string (optional)\n Name to give to the created data object. If None, filename is used.\n Default is None.\n parent : WrightTools.Collection (optional)\n Collection to place new data object within. Default is None.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data\n New data object.\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n filepath = pathlib.Path(filepath)\n\n if not \".asc\" in filepath.suffixes:\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \".asc\")\n # parse name\n if not name:\n name = filepath.name.split(\".\")[0]\n # create data\n ds = np.DataSource(None)\n f = ds.open(filestr, \"rt\")\n axis0 = []\n arr = []\n attrs = {}\n\n line0 = f.readline().strip()[:-1]\n line0 = [float(x) for x in line0.split(\",\")] # TODO: robust to space, tab, comma\n axis0.append(line0.pop(0))\n arr.append(line0)\n\n def get_frames(f, arr, axis0):\n axis0_written = False\n while True:\n line = f.readline().strip()[:-1]\n if len(line) == 0:\n break\n else:\n line = [float(x) for x in line.split(\",\")]\n # signature of new frames is restart of axis0\n if not axis0_written and (line[0] == axis0[0]):\n axis0_written = True\n if axis0_written:\n line.pop(0)\n else:\n axis0.append(line.pop(0))\n arr.append(line)\n return arr, axis0\n\n arr, axis0 = get_frames(f, arr, axis0)\n nframes = len(arr) // len(axis0)\n\n i = 0\n while i < 3:\n line = f.readline().strip()\n if len(line) == 0:\n i += 1\n else:\n try:\n key, val = line.split(\":\", 1)\n except ValueError:\n pass\n else:\n attrs[key.strip()] = val.strip()\n\n f.close()\n created = attrs[\"Date and Time\"] # is this UTC?\n created = time.strptime(created, \"%a %b %d %H:%M:%S %Y\")\n created = timestamp.TimeStamp(time.mktime(created)).RFC3339\n\n kwargs = {\"name\": name, \"kind\": \"Solis\", \"source\": filestr, \"created\": created}\n if parent is None:\n data = Data(**kwargs)\n else:\n data = parent.create_data(**kwargs)\n\n axis0 = np.array(axis0)\n if float(attrs[\"Grating Groove Density (l/mm)\"]) == 0:\n xname = \"xindex\"\n xunits = None\n else:\n xname = \"wm\"\n xunits = \"nm\"\n axes = [xname, \"yindex\"]\n\n if nframes == 1:\n arr = np.array(arr)\n data.create_variable(name=xname, values=axis0[:, None], units=xunits)\n data.create_variable(name=\"yindex\", values=np.arange(arr.shape[-1])[None, :], units=None)\n else:\n arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))\n data.create_variable(name=\"frame\", values=np.arange(nframes)[:, None, None], units=None)\n data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)\n data.create_variable(\n name=\"yindex\", values=np.arange(arr.shape[-1])[None, None, :], units=None\n )\n axes = [\"frame\"] + axes\n\n data.transform(*axes)\n arr /= float(attrs[\"Exposure Time (secs)\"])\n # signal has units of Hz because time normalized\n data.create_channel(name=\"signal\", values=arr, signed=False, units=\"Hz\")\n\n for key, val in attrs.items():\n data.attrs[key] = val\n\n # finish\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n print(\" axes: {0}\".format(data.axis_names))\n print(\" shape: {0}\".format(data.shape))\n return data\n", "path": "WrightTools/data/_solis.py"}]} | 2,070 | 928 |
gh_patches_debug_42436 | rasdani/github-patches | git_diff | conan-io__conan-center-index-15293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] spix/0.5
### Package Name/Version
spix/0.5
### Changelog
https://github.com/faaxm/spix/releases/tag/v0.5
### Context about the new update
I will push a PR for this version
</issue>
<code>
[start of recipes/spix/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file
4 from conan.tools.build import check_min_cppstd
5 from conan.tools.scm import Version
6 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
7 import os
8
9
10 required_conan_version = ">=1.52.0"
11
12
13 class SpixConan(ConanFile):
14 name = "spix"
15 description = "UI test automation library for QtQuick/QML Apps"
16 license = "MIT"
17 url = "https://github.com/conan-io/conan-center-index"
18 homepage = "https://github.com/faaxm/spix"
19 topics = ("automation", "qt", "qml", "qt-quick", "qt5", "qtquick", "automated-testing", "qt-qml", "qml-applications")
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "shared": [True, False],
23 "fPIC": [True, False],
24 }
25 default_options = {
26 "shared": False,
27 "fPIC": True,
28 }
29
30 @property
31 def _minimum_cpp_standard(self):
32 return 14
33
34 @property
35 def _compilers_minimum_version(self):
36 return {
37 "Visual Studio": "14",
38 "gcc": "5",
39 "clang": "3.4",
40 "apple-clang": "10"
41 }
42
43 def export_sources(self):
44 export_conandata_patches(self)
45
46 def config_options(self):
47 if self.settings.os == "Windows":
48 del self.options.fPIC
49
50 def configure(self):
51 if self.options.shared:
52 try:
53 del self.options.fPIC
54 except Exception:
55 pass
56
57 def layout(self):
58 cmake_layout(self, src_folder="src")
59
60 def requirements(self):
61 self.requires("anyrpc/1.0.2")
62 self.requires("qt/6.3.1")
63 self.requires("expat/2.4.9")
64
65 def validate(self):
66 if self.info.settings.compiler.cppstd:
67 check_min_cppstd(self, self._minimum_cpp_standard)
68 minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
69 if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
70 raise ConanInvalidConfiguration(
71 f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
72 )
73
74 if Version(self.dependencies["qt"].ref.version).major == 6 and not self.options["qt"].qtshadertools:
75 raise ConanInvalidConfiguration(f"{self.ref} requires qt:qtshadertools to get the Quick module")
76 if not (self.options["qt"].gui and self.options["qt"].qtdeclarative):
77 raise ConanInvalidConfiguration(f"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module")
78
79 def source(self):
80 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
81
82 def generate(self):
83 tc = CMakeToolchain(self)
84 tc.variables["SPIX_BUILD_EXAMPLES"] = False
85 tc.variables["SPIX_BUILD_TESTS"] = False
86 tc.variables["SPIX_QT_MAJOR"] = Version(self.dependencies["qt"].ref.version).major
87 tc.generate()
88
89 deps = CMakeDeps(self)
90 deps.generate()
91
92 def _patch_sources(self):
93 apply_conandata_patches(self)
94 if Version(self.deps_cpp_info["qt"].version).major == 6:
95 replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "set(CMAKE_CXX_STANDARD 14)", "set(CMAKE_CXX_STANDARD 17)")
96
97 def build(self):
98 self._patch_sources()
99 cmake = CMake(self)
100 cmake.configure()
101 cmake.build()
102
103 def package(self):
104 copy(self, pattern="LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
105 cmake = CMake(self)
106 cmake.install()
107
108 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
109 rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
110 rmdir(self, os.path.join(self.package_folder, "share"))
111 rm(self, "*.la", os.path.join(self.package_folder, "lib"))
112 rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
113 rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
114
115 def package_info(self):
116 self.cpp_info.libs = ["Spix"]
117 self.cpp_info.set_property("cmake_file_name", "Spix")
118 self.cpp_info.set_property("cmake_target_name", "Spix::Spix")
119
120 # TODO remove once conan v2 removed cmake_find_package_*
121 self.cpp_info.names["cmake_find_package"] = "Spix"
122 self.cpp_info.names["cmake_find_package_multi"] = "Spix"
123
[end of recipes/spix/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/spix/all/conanfile.py b/recipes/spix/all/conanfile.py
--- a/recipes/spix/all/conanfile.py
+++ b/recipes/spix/all/conanfile.py
@@ -7,7 +7,7 @@
import os
-required_conan_version = ">=1.52.0"
+required_conan_version = ">=1.53.0"
class SpixConan(ConanFile):
@@ -29,16 +29,26 @@
@property
def _minimum_cpp_standard(self):
- return 14
+ return 14 if self.version == "0.4" else 17
@property
def _compilers_minimum_version(self):
- return {
- "Visual Studio": "14",
- "gcc": "5",
- "clang": "3.4",
- "apple-clang": "10"
- }
+ if self.version == "0.4":
+ return {
+ "Visual Studio": "14",
+ "msvc": "190",
+ "gcc": "5",
+ "clang": "3.4",
+ "apple-clang": "10"
+ }
+ else:
+ return {
+ "Visual Studio": "15.7",
+ "msvc": "192", # FIXME: 15.7 is actually 1914 but needs to be tested
+ "gcc": "7",
+ "clang": "5",
+ "apple-clang": "10",
+ }
def export_sources(self):
export_conandata_patches(self)
@@ -49,24 +59,20 @@
def configure(self):
if self.options.shared:
- try:
- del self.options.fPIC
- except Exception:
- pass
+ self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("anyrpc/1.0.2")
- self.requires("qt/6.3.1")
- self.requires("expat/2.4.9")
+ self.requires("qt/6.4.2")
def validate(self):
- if self.info.settings.compiler.cppstd:
+ if self.settings.compiler.cppstd:
check_min_cppstd(self, self._minimum_cpp_standard)
- minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
- if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
+ minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
+ if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
)
@@ -77,7 +83,7 @@
raise ConanInvalidConfiguration(f"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module")
def source(self):
- get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
+ get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
@@ -87,11 +93,13 @@
tc.generate()
deps = CMakeDeps(self)
+ deps.set_property("anyrpc", "cmake_file_name", "AnyRPC")
+ deps.set_property("anyrpc", "cmake_target_name", "AnyRPC::anyrpc")
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
- if Version(self.deps_cpp_info["qt"].version).major == 6:
+ if self.version == "0.4" and Version(self.dependencies["qt"].ref.version).major == 6:
replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "set(CMAKE_CXX_STANDARD 14)", "set(CMAKE_CXX_STANDARD 17)")
def build(self):
| {"golden_diff": "diff --git a/recipes/spix/all/conanfile.py b/recipes/spix/all/conanfile.py\n--- a/recipes/spix/all/conanfile.py\n+++ b/recipes/spix/all/conanfile.py\n@@ -7,7 +7,7 @@\n import os\n \n \n-required_conan_version = \">=1.52.0\"\n+required_conan_version = \">=1.53.0\"\n \n \n class SpixConan(ConanFile):\n@@ -29,16 +29,26 @@\n \n @property\n def _minimum_cpp_standard(self):\n- return 14\n+ return 14 if self.version == \"0.4\" else 17\n \n @property\n def _compilers_minimum_version(self):\n- return {\n- \"Visual Studio\": \"14\",\n- \"gcc\": \"5\",\n- \"clang\": \"3.4\",\n- \"apple-clang\": \"10\"\n- }\n+ if self.version == \"0.4\":\n+ return {\n+ \"Visual Studio\": \"14\",\n+ \"msvc\": \"190\",\n+ \"gcc\": \"5\",\n+ \"clang\": \"3.4\",\n+ \"apple-clang\": \"10\"\n+ }\n+ else:\n+ return {\n+ \"Visual Studio\": \"15.7\",\n+ \"msvc\": \"192\", # FIXME: 15.7 is actually 1914 but needs to be tested\n+ \"gcc\": \"7\",\n+ \"clang\": \"5\",\n+ \"apple-clang\": \"10\",\n+ }\n \n def export_sources(self):\n export_conandata_patches(self)\n@@ -49,24 +59,20 @@\n \n def configure(self):\n if self.options.shared:\n- try:\n- del self.options.fPIC\n- except Exception:\n- pass\n+ self.options.rm_safe(\"fPIC\")\n \n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n \n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n- self.requires(\"qt/6.3.1\")\n- self.requires(\"expat/2.4.9\")\n+ self.requires(\"qt/6.4.2\")\n \n def validate(self):\n- if self.info.settings.compiler.cppstd:\n+ if self.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n- minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)\n- if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:\n+ minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n+ if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n@@ -77,7 +83,7 @@\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n \n def source(self):\n- get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n+ get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n \n def generate(self):\n tc = CMakeToolchain(self)\n@@ -87,11 +93,13 @@\n tc.generate()\n \n deps = CMakeDeps(self)\n+ deps.set_property(\"anyrpc\", \"cmake_file_name\", \"AnyRPC\")\n+ deps.set_property(\"anyrpc\", \"cmake_target_name\", \"AnyRPC::anyrpc\")\n deps.generate()\n \n def _patch_sources(self):\n apply_conandata_patches(self)\n- if Version(self.deps_cpp_info[\"qt\"].version).major == 6:\n+ if self.version == \"0.4\" and Version(self.dependencies[\"qt\"].ref.version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n \n def build(self):\n", "issue": "[request] spix/0.5\n### Package Name/Version\n\nspix/0.5\n\n### Changelog\n\nhttps://github.com/faaxm/spix/releases/tag/v0.5\n\n### Context about the new update\n\nI will push a PR for this version\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.scm import Version\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nimport os\n\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass SpixConan(ConanFile):\n name = \"spix\"\n description = \"UI test automation library for QtQuick/QML Apps\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/faaxm/spix\"\n topics = (\"automation\", \"qt\", \"qml\", \"qt-quick\", \"qt5\", \"qtquick\", \"automated-testing\", \"qt-qml\", \"qml-applications\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _minimum_cpp_standard(self):\n return 14\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"14\",\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\"\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n try:\n del self.options.fPIC\n except Exception:\n pass\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n self.requires(\"qt/6.3.1\")\n self.requires(\"expat/2.4.9\")\n \n def validate(self):\n if self.info.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)\n if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n\n if Version(self.dependencies[\"qt\"].ref.version).major == 6 and not self.options[\"qt\"].qtshadertools:\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:qtshadertools to get the Quick module\")\n if not (self.options[\"qt\"].gui and self.options[\"qt\"].qtdeclarative):\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"SPIX_BUILD_EXAMPLES\"] = False\n tc.variables[\"SPIX_BUILD_TESTS\"] = False\n tc.variables[\"SPIX_QT_MAJOR\"] = Version(self.dependencies[\"qt\"].ref.version).major\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n if Version(self.deps_cpp_info[\"qt\"].version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, pattern=\"LICENSE.txt\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n cmake = CMake(self)\n cmake.install()\n\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"Spix\"]\n self.cpp_info.set_property(\"cmake_file_name\", \"Spix\") \n self.cpp_info.set_property(\"cmake_target_name\", \"Spix::Spix\")\n \n # TODO remove once conan v2 removed cmake_find_package_*\n self.cpp_info.names[\"cmake_find_package\"] = \"Spix\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Spix\"\n", "path": "recipes/spix/all/conanfile.py"}]} | 1,994 | 938 |
gh_patches_debug_8697 | rasdani/github-patches | git_diff | tobymao__sqlglot-1663 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue when calculating lineage when a CTE has the same name as a table from the schema
**Fully reproducible code snippet**
The following code
```sql
from sqlglot.lineage import lineage
selected_column = "col_a"
sql = """
with
my_cte_name_also_a_table_name as (
select * from raw.schema.my_table
)
, inter as (
select * from my_cte_name_also_a_table_name
)
select * from inter
"""
schema = {"raw": {"schema" : {"my_cte_name_also_a_table_name": {"col_1": "int"}, "my_table": {"col_a": "int"}}}}
schema_without_table = {"my_table": {"col_a": "int"}}
l = lineage(column=selected_column, sql=sql, schema=schema, dialect="snowflake")
```
returns the error
```
Traceback (most recent call last):
File "/xxx/short_issue_lineage.py", line 21, in <module>
l = lineage(column=selected_column, sql=sql, schema=schema, dialect="snowflake")
File "/xxx/lib/python3.9/site-packages/sqlglot/lineage.py", line 148, in lineage
return to_node(column if isinstance(column, str) else column.name, scope)
File "/xxx/lib/python3.9/site-packages/sqlglot/lineage.py", line 136, in to_node
to_node(
File "/xxx/lib/python3.9/site-packages/sqlglot/lineage.py", line 112, in to_node
source = optimize(
File "/xxx/lib/python3.9/site-packages/sqlglot/optimizer/optimizer.py", line 89, in optimize
expression = rule(expression, **rule_kwargs)
File "/xxx/lib/python3.9/site-packages/sqlglot/optimizer/qualify_columns.py", line 49, in qualify_columns
_qualify_columns(scope, resolver)
File "/xxx/lib/python3.9/site-packages/sqlglot/optimizer/qualify_columns.py", line 250, in _qualify_columns
raise OptimizeError(f"Unknown column: {column_name}")
sqlglot.errors.OptimizeError: Unknown column: col_a
```
It looks like there is an issue with the logic being confused between `my_cte_name_also_a_table_name` the CTE (with a column called `col_a`) and `my_cte_name_also_a_table_name` the table from the schema with a column called `col_1`.
With the example above, if I remove the table from the schema and calculate the lineage, the error goes away.
```
l = lineage(column=selected_column, sql=sql, schema=schema_without_table, dialect="snowflake")
```
Interestingly, the code without the intermediate cte `inter` calculates the lineage correctly when the table with the CTE name is provided in the schema.
```
sql = """
with
my_cte_name_also_a_table_name as (
select * from raw.schema.my_table
)
select * from my_cte_name_also_a_table_name
"""
l = lineage(column=selected_column, sql=sql, schema=schema, dialect="snowflake")
```
</issue>
<code>
[start of sqlglot/lineage.py]
1 from __future__ import annotations
2
3 import json
4 import typing as t
5 from dataclasses import dataclass, field
6
7 from sqlglot import Schema, exp, maybe_parse
8 from sqlglot.optimizer import Scope, build_scope, optimize
9 from sqlglot.optimizer.lower_identities import lower_identities
10 from sqlglot.optimizer.qualify_columns import qualify_columns
11 from sqlglot.optimizer.qualify_tables import qualify_tables
12
13 if t.TYPE_CHECKING:
14 from sqlglot.dialects.dialect import DialectType
15
16
17 @dataclass(frozen=True)
18 class Node:
19 name: str
20 expression: exp.Expression
21 source: exp.Expression
22 downstream: t.List[Node] = field(default_factory=list)
23 alias: str = ""
24
25 def walk(self) -> t.Iterator[Node]:
26 yield self
27
28 for d in self.downstream:
29 if isinstance(d, Node):
30 yield from d.walk()
31 else:
32 yield d
33
34 def to_html(self, **opts) -> LineageHTML:
35 return LineageHTML(self, **opts)
36
37
38 def lineage(
39 column: str | exp.Column,
40 sql: str | exp.Expression,
41 schema: t.Optional[t.Dict | Schema] = None,
42 sources: t.Optional[t.Dict[str, str | exp.Subqueryable]] = None,
43 rules: t.Sequence[t.Callable] = (lower_identities, qualify_tables, qualify_columns),
44 dialect: DialectType = None,
45 ) -> Node:
46 """Build the lineage graph for a column of a SQL query.
47
48 Args:
49 column: The column to build the lineage for.
50 sql: The SQL string or expression.
51 schema: The schema of tables.
52 sources: A mapping of queries which will be used to continue building lineage.
53 rules: Optimizer rules to apply, by default only qualifying tables and columns.
54 dialect: The dialect of input SQL.
55
56 Returns:
57 A lineage node.
58 """
59
60 expression = maybe_parse(sql, dialect=dialect)
61
62 if sources:
63 expression = exp.expand(
64 expression,
65 {
66 k: t.cast(exp.Subqueryable, maybe_parse(v, dialect=dialect))
67 for k, v in sources.items()
68 },
69 )
70
71 optimized = optimize(expression, schema=schema, rules=rules)
72 scope = build_scope(optimized)
73
74 def to_node(
75 column_name: str,
76 scope: Scope,
77 scope_name: t.Optional[str] = None,
78 upstream: t.Optional[Node] = None,
79 alias: t.Optional[str] = None,
80 ) -> Node:
81 aliases = {
82 dt.alias: dt.comments[0].split()[1]
83 for dt in scope.derived_tables
84 if dt.comments and dt.comments[0].startswith("source: ")
85 }
86 if isinstance(scope.expression, exp.Union):
87 for scope in scope.union_scopes:
88 node = to_node(
89 column_name,
90 scope=scope,
91 scope_name=scope_name,
92 upstream=upstream,
93 alias=aliases.get(scope_name),
94 )
95 return node
96
97 # Find the specific select clause that is the source of the column we want.
98 # This can either be a specific, named select or a generic `*` clause.
99 select = next(
100 (select for select in scope.selects if select.alias_or_name == column_name),
101 exp.Star() if scope.expression.is_star else None,
102 )
103
104 if not select:
105 raise ValueError(f"Could not find {column_name} in {scope.expression}")
106
107 if isinstance(scope.expression, exp.Select):
108 # For better ergonomics in our node labels, replace the full select with
109 # a version that has only the column we care about.
110 # "x", SELECT x, y FROM foo
111 # => "x", SELECT x FROM foo
112 source = optimize(
113 scope.expression.select(select, append=False), schema=schema, rules=rules
114 )
115 select = source.selects[0]
116 else:
117 source = scope.expression
118
119 # Create the node for this step in the lineage chain, and attach it to the previous one.
120 node = Node(
121 name=f"{scope_name}.{column_name}" if scope_name else column_name,
122 source=source,
123 expression=select,
124 alias=alias or "",
125 )
126 if upstream:
127 upstream.downstream.append(node)
128
129 # Find all columns that went into creating this one to list their lineage nodes.
130 for c in set(select.find_all(exp.Column)):
131 table = c.table
132 source = scope.sources.get(table)
133
134 if isinstance(source, Scope):
135 # The table itself came from a more specific scope. Recurse into that one using the unaliased column name.
136 to_node(
137 c.name, scope=source, scope_name=table, upstream=node, alias=aliases.get(table)
138 )
139 else:
140 # The source is not a scope - we've reached the end of the line. At this point, if a source is not found
141 # it means this column's lineage is unknown. This can happen if the definition of a source used in a query
142 # is not passed into the `sources` map.
143 source = source or exp.Placeholder()
144 node.downstream.append(Node(name=c.sql(), source=source, expression=source))
145
146 return node
147
148 return to_node(column if isinstance(column, str) else column.name, scope)
149
150
151 class LineageHTML:
152 """Node to HTML generator using vis.js.
153
154 https://visjs.github.io/vis-network/docs/network/
155 """
156
157 def __init__(
158 self,
159 node: Node,
160 dialect: DialectType = None,
161 imports: bool = True,
162 **opts: t.Any,
163 ):
164 self.node = node
165 self.imports = imports
166
167 self.options = {
168 "height": "500px",
169 "width": "100%",
170 "layout": {
171 "hierarchical": {
172 "enabled": True,
173 "nodeSpacing": 200,
174 "sortMethod": "directed",
175 },
176 },
177 "interaction": {
178 "dragNodes": False,
179 "selectable": False,
180 },
181 "physics": {
182 "enabled": False,
183 },
184 "edges": {
185 "arrows": "to",
186 },
187 "nodes": {
188 "font": "20px monaco",
189 "shape": "box",
190 "widthConstraint": {
191 "maximum": 300,
192 },
193 },
194 **opts,
195 }
196
197 self.nodes = {}
198 self.edges = []
199
200 for node in node.walk():
201 if isinstance(node.expression, exp.Table):
202 label = f"FROM {node.expression.this}"
203 title = f"<pre>SELECT {node.name} FROM {node.expression.this}</pre>"
204 group = 1
205 else:
206 label = node.expression.sql(pretty=True, dialect=dialect)
207 source = node.source.transform(
208 lambda n: exp.Tag(this=n, prefix="<b>", postfix="</b>")
209 if n is node.expression
210 else n,
211 copy=False,
212 ).sql(pretty=True, dialect=dialect)
213 title = f"<pre>{source}</pre>"
214 group = 0
215
216 node_id = id(node)
217
218 self.nodes[node_id] = {
219 "id": node_id,
220 "label": label,
221 "title": title,
222 "group": group,
223 }
224
225 for d in node.downstream:
226 self.edges.append({"from": node_id, "to": id(d)})
227
228 def __str__(self):
229 nodes = json.dumps(list(self.nodes.values()))
230 edges = json.dumps(self.edges)
231 options = json.dumps(self.options)
232 imports = (
233 """<script type="text/javascript" src="https://unpkg.com/vis-data@latest/peer/umd/vis-data.min.js"></script>
234 <script type="text/javascript" src="https://unpkg.com/vis-network@latest/peer/umd/vis-network.min.js"></script>
235 <link rel="stylesheet" type="text/css" href="https://unpkg.com/vis-network/styles/vis-network.min.css" />"""
236 if self.imports
237 else ""
238 )
239
240 return f"""<div>
241 <div id="sqlglot-lineage"></div>
242 {imports}
243 <script type="text/javascript">
244 var nodes = new vis.DataSet({nodes})
245 nodes.forEach(row => row["title"] = new DOMParser().parseFromString(row["title"], "text/html").body.childNodes[0])
246
247 new vis.Network(
248 document.getElementById("sqlglot-lineage"),
249 {{
250 nodes: nodes,
251 edges: new vis.DataSet({edges})
252 }},
253 {options},
254 )
255 </script>
256 </div>"""
257
258 def _repr_html_(self) -> str:
259 return self.__str__()
260
[end of sqlglot/lineage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sqlglot/lineage.py b/sqlglot/lineage.py
--- a/sqlglot/lineage.py
+++ b/sqlglot/lineage.py
@@ -109,10 +109,7 @@
# a version that has only the column we care about.
# "x", SELECT x, y FROM foo
# => "x", SELECT x FROM foo
- source = optimize(
- scope.expression.select(select, append=False), schema=schema, rules=rules
- )
- select = source.selects[0]
+ source = t.cast(exp.Expression, scope.expression.select(select, append=False))
else:
source = scope.expression
| {"golden_diff": "diff --git a/sqlglot/lineage.py b/sqlglot/lineage.py\n--- a/sqlglot/lineage.py\n+++ b/sqlglot/lineage.py\n@@ -109,10 +109,7 @@\n # a version that has only the column we care about.\n # \"x\", SELECT x, y FROM foo\n # => \"x\", SELECT x FROM foo\n- source = optimize(\n- scope.expression.select(select, append=False), schema=schema, rules=rules\n- )\n- select = source.selects[0]\n+ source = t.cast(exp.Expression, scope.expression.select(select, append=False))\n else:\n source = scope.expression\n", "issue": "Issue when calculating lineage when a CTE has the same name as a table from the schema\n**Fully reproducible code snippet**\r\n\r\nThe following code \r\n```sql\r\nfrom sqlglot.lineage import lineage\r\n\r\nselected_column = \"col_a\"\r\nsql = \"\"\"\r\nwith \r\n\r\nmy_cte_name_also_a_table_name as (\r\n select * from raw.schema.my_table\r\n)\r\n\r\n, inter as (\r\n select * from my_cte_name_also_a_table_name\r\n)\r\n\r\nselect * from inter\r\n\"\"\"\r\n\r\nschema = {\"raw\": {\"schema\" : {\"my_cte_name_also_a_table_name\": {\"col_1\": \"int\"}, \"my_table\": {\"col_a\": \"int\"}}}}\r\nschema_without_table = {\"my_table\": {\"col_a\": \"int\"}}\r\n\r\nl = lineage(column=selected_column, sql=sql, schema=schema, dialect=\"snowflake\")\r\n```\r\n\r\nreturns the error\r\n```\r\nTraceback (most recent call last):\r\n File \"/xxx/short_issue_lineage.py\", line 21, in <module>\r\n l = lineage(column=selected_column, sql=sql, schema=schema, dialect=\"snowflake\")\r\n File \"/xxx/lib/python3.9/site-packages/sqlglot/lineage.py\", line 148, in lineage\r\n return to_node(column if isinstance(column, str) else column.name, scope)\r\n File \"/xxx/lib/python3.9/site-packages/sqlglot/lineage.py\", line 136, in to_node\r\n to_node(\r\n File \"/xxx/lib/python3.9/site-packages/sqlglot/lineage.py\", line 112, in to_node\r\n source = optimize(\r\n File \"/xxx/lib/python3.9/site-packages/sqlglot/optimizer/optimizer.py\", line 89, in optimize\r\n expression = rule(expression, **rule_kwargs)\r\n File \"/xxx/lib/python3.9/site-packages/sqlglot/optimizer/qualify_columns.py\", line 49, in qualify_columns\r\n _qualify_columns(scope, resolver)\r\n File \"/xxx/lib/python3.9/site-packages/sqlglot/optimizer/qualify_columns.py\", line 250, in _qualify_columns\r\n raise OptimizeError(f\"Unknown column: {column_name}\")\r\nsqlglot.errors.OptimizeError: Unknown column: col_a\r\n```\r\n\r\n\r\nIt looks like there is an issue with the logic being confused between `my_cte_name_also_a_table_name` the CTE (with a column called `col_a`) and `my_cte_name_also_a_table_name` the table from the schema with a column called `col_1`.\r\n\r\nWith the example above, if I remove the table from the schema and calculate the lineage, the error goes away.\r\n```\r\nl = lineage(column=selected_column, sql=sql, schema=schema_without_table, dialect=\"snowflake\")\r\n```\r\n\r\n\r\nInterestingly, the code without the intermediate cte `inter` calculates the lineage correctly when the table with the CTE name is provided in the schema.\r\n```\r\nsql = \"\"\"\r\nwith \r\n\r\nmy_cte_name_also_a_table_name as (\r\n select * from raw.schema.my_table\r\n)\r\n\r\nselect * from my_cte_name_also_a_table_name\r\n\"\"\"\r\nl = lineage(column=selected_column, sql=sql, schema=schema, dialect=\"snowflake\")\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nimport typing as t\nfrom dataclasses import dataclass, field\n\nfrom sqlglot import Schema, exp, maybe_parse\nfrom sqlglot.optimizer import Scope, build_scope, optimize\nfrom sqlglot.optimizer.lower_identities import lower_identities\nfrom sqlglot.optimizer.qualify_columns import qualify_columns\nfrom sqlglot.optimizer.qualify_tables import qualify_tables\n\nif t.TYPE_CHECKING:\n from sqlglot.dialects.dialect import DialectType\n\n\n@dataclass(frozen=True)\nclass Node:\n name: str\n expression: exp.Expression\n source: exp.Expression\n downstream: t.List[Node] = field(default_factory=list)\n alias: str = \"\"\n\n def walk(self) -> t.Iterator[Node]:\n yield self\n\n for d in self.downstream:\n if isinstance(d, Node):\n yield from d.walk()\n else:\n yield d\n\n def to_html(self, **opts) -> LineageHTML:\n return LineageHTML(self, **opts)\n\n\ndef lineage(\n column: str | exp.Column,\n sql: str | exp.Expression,\n schema: t.Optional[t.Dict | Schema] = None,\n sources: t.Optional[t.Dict[str, str | exp.Subqueryable]] = None,\n rules: t.Sequence[t.Callable] = (lower_identities, qualify_tables, qualify_columns),\n dialect: DialectType = None,\n) -> Node:\n \"\"\"Build the lineage graph for a column of a SQL query.\n\n Args:\n column: The column to build the lineage for.\n sql: The SQL string or expression.\n schema: The schema of tables.\n sources: A mapping of queries which will be used to continue building lineage.\n rules: Optimizer rules to apply, by default only qualifying tables and columns.\n dialect: The dialect of input SQL.\n\n Returns:\n A lineage node.\n \"\"\"\n\n expression = maybe_parse(sql, dialect=dialect)\n\n if sources:\n expression = exp.expand(\n expression,\n {\n k: t.cast(exp.Subqueryable, maybe_parse(v, dialect=dialect))\n for k, v in sources.items()\n },\n )\n\n optimized = optimize(expression, schema=schema, rules=rules)\n scope = build_scope(optimized)\n\n def to_node(\n column_name: str,\n scope: Scope,\n scope_name: t.Optional[str] = None,\n upstream: t.Optional[Node] = None,\n alias: t.Optional[str] = None,\n ) -> Node:\n aliases = {\n dt.alias: dt.comments[0].split()[1]\n for dt in scope.derived_tables\n if dt.comments and dt.comments[0].startswith(\"source: \")\n }\n if isinstance(scope.expression, exp.Union):\n for scope in scope.union_scopes:\n node = to_node(\n column_name,\n scope=scope,\n scope_name=scope_name,\n upstream=upstream,\n alias=aliases.get(scope_name),\n )\n return node\n\n # Find the specific select clause that is the source of the column we want.\n # This can either be a specific, named select or a generic `*` clause.\n select = next(\n (select for select in scope.selects if select.alias_or_name == column_name),\n exp.Star() if scope.expression.is_star else None,\n )\n\n if not select:\n raise ValueError(f\"Could not find {column_name} in {scope.expression}\")\n\n if isinstance(scope.expression, exp.Select):\n # For better ergonomics in our node labels, replace the full select with\n # a version that has only the column we care about.\n # \"x\", SELECT x, y FROM foo\n # => \"x\", SELECT x FROM foo\n source = optimize(\n scope.expression.select(select, append=False), schema=schema, rules=rules\n )\n select = source.selects[0]\n else:\n source = scope.expression\n\n # Create the node for this step in the lineage chain, and attach it to the previous one.\n node = Node(\n name=f\"{scope_name}.{column_name}\" if scope_name else column_name,\n source=source,\n expression=select,\n alias=alias or \"\",\n )\n if upstream:\n upstream.downstream.append(node)\n\n # Find all columns that went into creating this one to list their lineage nodes.\n for c in set(select.find_all(exp.Column)):\n table = c.table\n source = scope.sources.get(table)\n\n if isinstance(source, Scope):\n # The table itself came from a more specific scope. Recurse into that one using the unaliased column name.\n to_node(\n c.name, scope=source, scope_name=table, upstream=node, alias=aliases.get(table)\n )\n else:\n # The source is not a scope - we've reached the end of the line. At this point, if a source is not found\n # it means this column's lineage is unknown. This can happen if the definition of a source used in a query\n # is not passed into the `sources` map.\n source = source or exp.Placeholder()\n node.downstream.append(Node(name=c.sql(), source=source, expression=source))\n\n return node\n\n return to_node(column if isinstance(column, str) else column.name, scope)\n\n\nclass LineageHTML:\n \"\"\"Node to HTML generator using vis.js.\n\n https://visjs.github.io/vis-network/docs/network/\n \"\"\"\n\n def __init__(\n self,\n node: Node,\n dialect: DialectType = None,\n imports: bool = True,\n **opts: t.Any,\n ):\n self.node = node\n self.imports = imports\n\n self.options = {\n \"height\": \"500px\",\n \"width\": \"100%\",\n \"layout\": {\n \"hierarchical\": {\n \"enabled\": True,\n \"nodeSpacing\": 200,\n \"sortMethod\": \"directed\",\n },\n },\n \"interaction\": {\n \"dragNodes\": False,\n \"selectable\": False,\n },\n \"physics\": {\n \"enabled\": False,\n },\n \"edges\": {\n \"arrows\": \"to\",\n },\n \"nodes\": {\n \"font\": \"20px monaco\",\n \"shape\": \"box\",\n \"widthConstraint\": {\n \"maximum\": 300,\n },\n },\n **opts,\n }\n\n self.nodes = {}\n self.edges = []\n\n for node in node.walk():\n if isinstance(node.expression, exp.Table):\n label = f\"FROM {node.expression.this}\"\n title = f\"<pre>SELECT {node.name} FROM {node.expression.this}</pre>\"\n group = 1\n else:\n label = node.expression.sql(pretty=True, dialect=dialect)\n source = node.source.transform(\n lambda n: exp.Tag(this=n, prefix=\"<b>\", postfix=\"</b>\")\n if n is node.expression\n else n,\n copy=False,\n ).sql(pretty=True, dialect=dialect)\n title = f\"<pre>{source}</pre>\"\n group = 0\n\n node_id = id(node)\n\n self.nodes[node_id] = {\n \"id\": node_id,\n \"label\": label,\n \"title\": title,\n \"group\": group,\n }\n\n for d in node.downstream:\n self.edges.append({\"from\": node_id, \"to\": id(d)})\n\n def __str__(self):\n nodes = json.dumps(list(self.nodes.values()))\n edges = json.dumps(self.edges)\n options = json.dumps(self.options)\n imports = (\n \"\"\"<script type=\"text/javascript\" src=\"https://unpkg.com/vis-data@latest/peer/umd/vis-data.min.js\"></script>\n <script type=\"text/javascript\" src=\"https://unpkg.com/vis-network@latest/peer/umd/vis-network.min.js\"></script>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://unpkg.com/vis-network/styles/vis-network.min.css\" />\"\"\"\n if self.imports\n else \"\"\n )\n\n return f\"\"\"<div>\n <div id=\"sqlglot-lineage\"></div>\n {imports}\n <script type=\"text/javascript\">\n var nodes = new vis.DataSet({nodes})\n nodes.forEach(row => row[\"title\"] = new DOMParser().parseFromString(row[\"title\"], \"text/html\").body.childNodes[0])\n\n new vis.Network(\n document.getElementById(\"sqlglot-lineage\"),\n {{\n nodes: nodes,\n edges: new vis.DataSet({edges})\n }},\n {options},\n )\n </script>\n</div>\"\"\"\n\n def _repr_html_(self) -> str:\n return self.__str__()\n", "path": "sqlglot/lineage.py"}]} | 3,817 | 156 |
gh_patches_debug_28026 | rasdani/github-patches | git_diff | bridgecrewio__checkov-961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive on CKV_AWS_59 - APIGatewayAuthorization ignores API keys
**Describe the bug**
**CKV_AWS_59** assumes the API is open to the public if authorization is **NONE**. However, API keys are another option on REST APIs and are configured separately. See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-setup-api-key-with-console.html.
**To Reproduce**
The following terraform code will cause a test failure, which appears to be against the spirit of the rule:
```terraform
resource "aws_api_gateway_method" "POST" {
...
authorization = NONE
api_key_required = true
...
}
````
**Expected behavior**
I would expect this configuration to be considered secure.
**Desktop (please complete the following information):**
- OS: linux/amd64 Docker image bridgecrew/checkov:1.0.833
- Checkov Version 1.0.833
</issue>
<code>
[start of checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
3
4 class APIGatewayAuthorization(BaseResourceCheck):
5
6 def __init__(self):
7 name = "Ensure there is no open access to back-end resources through API"
8 id = "CKV_AWS_59"
9 supported_resources = ['AWS::ApiGateway::Method']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'Properties' in conf.keys():
15 if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():
16 if conf['Properties']['HttpMethod'] != "OPTIONS" and conf['Properties']['AuthorizationType'] == "NONE":
17 return CheckResult.FAILED
18 return CheckResult.PASSED
19
20 check = APIGatewayAuthorization()
21
[end of checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py]
[start of checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class APIGatewayAuthorization(BaseResourceCheck):
6
7 def __init__(self):
8 name = "Ensure there is no open access to back-end resources through API"
9 id = "CKV_AWS_59"
10 supported_resources = ['aws_api_gateway_method']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 self.evaluated_keys = ['http_method', 'authorization']
16 if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE":
17 return CheckResult.FAILED
18 return CheckResult.PASSED
19
20
21 check = APIGatewayAuthorization()
22
[end of checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
--- a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
+++ b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
@@ -14,7 +14,8 @@
if 'Properties' in conf.keys():
if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():
if conf['Properties']['HttpMethod'] != "OPTIONS" and conf['Properties']['AuthorizationType'] == "NONE":
- return CheckResult.FAILED
+ if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:
+ return CheckResult.FAILED
return CheckResult.PASSED
check = APIGatewayAuthorization()
diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
--- a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
+++ b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
@@ -12,8 +12,8 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
- self.evaluated_keys = ['http_method', 'authorization']
- if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE":
+ self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']
+ if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):
return CheckResult.FAILED
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n--- a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n+++ b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n@@ -14,7 +14,8 @@\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n- return CheckResult.FAILED\n+ if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n \n check = APIGatewayAuthorization()\ndiff --git a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n--- a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n+++ b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n@@ -12,8 +12,8 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n- self.evaluated_keys = ['http_method', 'authorization']\n- if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\":\n+ self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']\n+ if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "False positive on CKV_AWS_59 - APIGatewayAuthorization ignores API keys\n**Describe the bug**\r\n**CKV_AWS_59** assumes the API is open to the public if authorization is **NONE**. However, API keys are another option on REST APIs and are configured separately. See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-setup-api-key-with-console.html.\r\n\r\n**To Reproduce**\r\nThe following terraform code will cause a test failure, which appears to be against the spirit of the rule:\r\n```terraform\r\nresource \"aws_api_gateway_method\" \"POST\" {\r\n...\r\nauthorization = NONE\r\napi_key_required = true\r\n...\r\n}\r\n````\r\n\r\n**Expected behavior**\r\nI would expect this configuration to be considered secure. \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux/amd64 Docker image bridgecrew/checkov:1.0.833\r\n - Checkov Version 1.0.833\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['AWS::ApiGateway::Method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['aws_api_gateway_method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = ['http_method', 'authorization']\n if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py"}]} | 1,255 | 412 |
gh_patches_debug_6386 | rasdani/github-patches | git_diff | huggingface__huggingface_hub-757 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chore: Updated the pillow version specifier
Hello there :wave:
Following up on #755, I believe the core build requirements were meant to be updated as well. This aligns the version specifier of Pillow between the core build and the "tests" extra build!
Any feedback is welcome!
cc @osanseviero @Narsil
</issue>
<code>
[start of api-inference-community/setup.py]
1 from setuptools import setup
2
3
4 setup(
5 name="api_inference_community",
6 version="0.0.21",
7 description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
8 url="http://github.com/huggingface/api-inference-community",
9 author="Nicolas Patry",
10 author_email="[email protected]",
11 license="MIT",
12 packages=["api_inference_community"],
13 python_requires=">=3.6.0",
14 zip_safe=False,
15 install_requires=list(line for line in open("requirements.txt", "r")),
16 extras_require={
17 "test": [
18 "httpx>=0.18",
19 "Pillow>=8.2",
20 "httpx>=0.18",
21 "torch>=1.9.0",
22 "pytest>=6.2",
23 ]
24 },
25 )
26
[end of api-inference-community/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api-inference-community/setup.py b/api-inference-community/setup.py
--- a/api-inference-community/setup.py
+++ b/api-inference-community/setup.py
@@ -3,7 +3,7 @@
setup(
name="api_inference_community",
- version="0.0.21",
+ version="0.0.23",
description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
url="http://github.com/huggingface/api-inference-community",
author="Nicolas Patry",
| {"golden_diff": "diff --git a/api-inference-community/setup.py b/api-inference-community/setup.py\n--- a/api-inference-community/setup.py\n+++ b/api-inference-community/setup.py\n@@ -3,7 +3,7 @@\n \n setup(\n name=\"api_inference_community\",\n- version=\"0.0.21\",\n+ version=\"0.0.23\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n", "issue": "chore: Updated the pillow version specifier\nHello there :wave: \r\n\r\nFollowing up on #755, I believe the core build requirements were meant to be updated as well. This aligns the version specifier of Pillow between the core build and the \"tests\" extra build!\r\n\r\nAny feedback is welcome!\r\n\r\ncc @osanseviero @Narsil \n", "before_files": [{"content": "from setuptools import setup\n\n\nsetup(\n name=\"api_inference_community\",\n version=\"0.0.21\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=[\"api_inference_community\"],\n python_requires=\">=3.6.0\",\n zip_safe=False,\n install_requires=list(line for line in open(\"requirements.txt\", \"r\")),\n extras_require={\n \"test\": [\n \"httpx>=0.18\",\n \"Pillow>=8.2\",\n \"httpx>=0.18\",\n \"torch>=1.9.0\",\n \"pytest>=6.2\",\n ]\n },\n)\n", "path": "api-inference-community/setup.py"}]} | 851 | 130 |
gh_patches_debug_23307 | rasdani/github-patches | git_diff | pypa__virtualenv-1730 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken activation in Windows for python3
virtualenv activation through `activate_this.py` is broken in Windows for python3. Check this very simple piece of code
```
import os
import subprocess
import sys
from distutils.spawn import find_executable
venv = find_executable("virtualenv")
testdir = os.path.join(os.path.curdir, 'testenv')
subprocess.check_output((venv, testdir, "-p", sys.executable))
bin_path = os.path.join(testdir, "Scripts") if sys.platform in ("win32", "cygwin") else os.path.join(testdir, "bin")
path = os.path.join(bin_path, "activate_this.py")
with open(path) as f:
exec(f.read(), {"__file__": path})
```
This generates a `AttributeError: 'str' object has no attribute 'decode'`. Taking a look at the `activate_this.py` code:
```
for lib in "..\Lib\site-packages".split(os.pathsep):
path = os.path.realpath(os.path.join(bin_dir, lib))
site.addsitedir(path.decode("utf-8") if "yes" else path)
```
it's indeed normal that we get the error because we're always calling `decode()` over a str. Question is where the `__DECODE_PATH__` from https://github.com/pypa/virtualenv/blob/master/src/virtualenv/activation/python/activate_this.py#L28 is assigned to `"yes"`?
</issue>
<code>
[start of src/virtualenv/activation/python/__init__.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 from collections import OrderedDict
5
6 from virtualenv.info import WIN_CPYTHON_2
7 from virtualenv.util.path import Path
8 from virtualenv.util.six import ensure_text
9
10 from ..via_template import ViaTemplateActivator
11
12
13 class PythonActivator(ViaTemplateActivator):
14 def templates(self):
15 yield Path("activate_this.py")
16
17 def replacements(self, creator, dest_folder):
18 replacements = super(PythonActivator, self).replacements(creator, dest_folder)
19 lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
20 replacements.update(
21 {
22 "__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
23 "__DECODE_PATH__": ("yes" if WIN_CPYTHON_2 else ""),
24 }
25 )
26 return replacements
27
28 @staticmethod
29 def _repr_unicode(creator, value):
30 py2 = creator.interpreter.version_info.major == 2
31 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
32 value = ensure_text(repr(value.encode("utf-8"))[1:-1])
33 return value
34
[end of src/virtualenv/activation/python/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py
--- a/src/virtualenv/activation/python/__init__.py
+++ b/src/virtualenv/activation/python/__init__.py
@@ -3,7 +3,6 @@
import os
from collections import OrderedDict
-from virtualenv.info import WIN_CPYTHON_2
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_text
@@ -17,10 +16,11 @@
def replacements(self, creator, dest_folder):
replacements = super(PythonActivator, self).replacements(creator, dest_folder)
lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
+ win_py2 = creator.interpreter.platform == "win32" and creator.interpreter.version_info.major == 2
replacements.update(
{
"__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
- "__DECODE_PATH__": ("yes" if WIN_CPYTHON_2 else ""),
+ "__DECODE_PATH__": ("yes" if win_py2 else ""),
}
)
return replacements
| {"golden_diff": "diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py\n--- a/src/virtualenv/activation/python/__init__.py\n+++ b/src/virtualenv/activation/python/__init__.py\n@@ -3,7 +3,6 @@\n import os\n from collections import OrderedDict\n \n-from virtualenv.info import WIN_CPYTHON_2\n from virtualenv.util.path import Path\n from virtualenv.util.six import ensure_text\n \n@@ -17,10 +16,11 @@\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n+ win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n- \"__DECODE_PATH__\": (\"yes\" if WIN_CPYTHON_2 else \"\"),\n+ \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n", "issue": "Broken activation in Windows for python3\nvirtualenv activation through `activate_this.py` is broken in Windows for python3. Check this very simple piece of code\r\n\r\n```\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom distutils.spawn import find_executable\r\n\r\nvenv = find_executable(\"virtualenv\")\r\ntestdir = os.path.join(os.path.curdir, 'testenv')\r\n\r\nsubprocess.check_output((venv, testdir, \"-p\", sys.executable))\r\n\r\nbin_path = os.path.join(testdir, \"Scripts\") if sys.platform in (\"win32\", \"cygwin\") else os.path.join(testdir, \"bin\")\r\n\r\npath = os.path.join(bin_path, \"activate_this.py\")\r\nwith open(path) as f:\r\n exec(f.read(), {\"__file__\": path})\r\n```\r\n\r\nThis generates a `AttributeError: 'str' object has no attribute 'decode'`. Taking a look at the `activate_this.py` code:\r\n\r\n```\r\nfor lib in \"..\\Lib\\site-packages\".split(os.pathsep):\r\n path = os.path.realpath(os.path.join(bin_dir, lib))\r\n site.addsitedir(path.decode(\"utf-8\") if \"yes\" else path)\r\n```\r\n\r\nit's indeed normal that we get the error because we're always calling `decode()` over a str. Question is where the `__DECODE_PATH__` from https://github.com/pypa/virtualenv/blob/master/src/virtualenv/activation/python/activate_this.py#L28 is assigned to `\"yes\"`?\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.info import WIN_CPYTHON_2\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if WIN_CPYTHON_2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}]} | 1,193 | 266 |
gh_patches_debug_13038 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BLD: change pypi name from `dtoolkit` to `my-data-toolkit`
400 The name 'dtoolkit' isn't allowed.
https://github.com/Zeroto521/my-data-toolkit/runs/4673018493?check_suite_focus=true
</issue>
<code>
[start of doc/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options.
4 # For a full list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7
8 # -- Project information -----------------------------------------------------
9
10 # The version info for the project you're documenting, acts as replacement for
11 # |version| and |release|, also used in various other places throughout the
12 # built documents.
13 from __future__ import annotations
14
15 import inspect
16 import os
17 import sys
18 from datetime import datetime
19
20 import dtoolkit
21
22 version = release = dtoolkit.__version__
23 version = version.replace(".dev0", "")
24 version = version.replace(".post0", "")
25
26
27 project = "DToolKit"
28 author = "Zero <@Zeroto521>"
29 copyright = f"2021-{datetime.now().year}, {author}" # pylint: disable=redefined-builtin
30 github_url = "https://github.com/Zeroto521/my-data-toolkit"
31
32
33 # -- General configuration ---------------------------------------------------
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 "myst_parser",
40 "numpydoc",
41 "sphinx.ext.autodoc",
42 "sphinx.ext.autosummary",
43 "sphinx.ext.autosectionlabel",
44 "sphinx.ext.extlinks",
45 "sphinx.ext.intersphinx",
46 "sphinx.ext.linkcode",
47 "sphinx_toggleprompt",
48 "IPython.sphinxext.ipython_console_highlighting",
49 "IPython.sphinxext.ipython_directive",
50 "nbsphinx",
51 ]
52
53 # The suffix of source filenames.
54 source_suffix = [".rst", ".md"]
55
56 # Add any paths that contain templates here, relative to this directory.
57 templates_path = ["_templates"]
58
59 # List of patterns, relative to source directory, that match files and
60 # directories to ignore when looking for source files.
61 # This pattern also affects html_static_path and html_extra_path.
62 exclude_patterns = []
63
64
65 # -- Options for HTML output -------------------------------------------------
66
67 # The theme to use for HTML and HTML Help pages. See the documentation for
68 # a list of builtin themes.
69 html_theme = "pydata_sphinx_theme"
70
71 # Theme options are theme-specific and customize the look and feel of a theme
72 # further. For a list of options available for each theme, see the
73 # documentation.
74 html_theme_options = {
75 "search_bar_position": "sidebar",
76 "github_url": github_url,
77 "icon_links": [
78 {
79 "name": "PyPI",
80 "url": "https://pypi.org/project/my-data-toolkit",
81 "icon": "fas fa-box",
82 },
83 ],
84 }
85
86 # Add any paths that contain custom static files (such as style sheets) here,
87 # relative to this directory. They are copied after the builtin static files,
88 # so a file named "default.css" will overwrite the builtin "default.css".
89 html_static_path = ["_static"]
90
91
92 # Add redirect for previously existing pages, each item is like `(from_old, to_new)`
93
94 moved_pages = [
95 ("py-modindex", "reference"),
96 ]
97
98 html_additional_pages = {page[0]: "redirect.html" for page in moved_pages}
99
100 html_context = {"redirects": dict(moved_pages)}
101
102
103 nbsphinx_prolog = r"""
104 {% set docname = env.doc2path(env.docname, base=None) %}
105
106 .. tip::
107
108 This page was generated from `{{ docname }}`__.
109
110 __ https://github.com/zeroto521/my-data-toolkit/blob/main/doc/source/{{ docname }}
111 """
112
113
114 # --Options for sphinx extensions -----------------------------------------------
115
116 # connect docs in other projects
117 intersphinx_mapping = {
118 "python": ("http://docs.python.org/", None),
119 "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
120 "numpy": ("https://numpy.org/doc/stable/", None),
121 "sklearn": ("https://scikit-learn.org/stable/", None),
122 "geopandas": ("https://geopandas.readthedocs.io/en/stable/", None),
123 "shapely": ("https://shapely.readthedocs.io/en/stable/", None),
124 "pyproj": ("https://pyproj4.github.io/pyproj/stable/", None),
125 "pygeos": ("https://pygeos.readthedocs.io/en/stable/", None),
126 }
127
128 # extlinks alias
129 extlinks = {
130 "issue": (f"{github_url}/issues/%s", "issue#"),
131 "pr": (f"{github_url}/issues/%s", "pr#"),
132 "user": (r"https://github.com/%s", "@"),
133 }
134
135 myst_enable_extensions = [
136 "colon_fence",
137 ]
138
139 autosummary_generate = True
140
141 nbsphinx_execute = "always"
142 nbsphinx_allow_errors = True
143
144
145 # based on pandas doc/source/conf.py
146 def linkcode_resolve(domain: str, info: dict[str, str]) -> str | None:
147 """
148 Determine the URL corresponding to Python object
149 """
150
151 if domain != "py":
152 return None
153
154 modname = info["module"]
155 fullname = info["fullname"]
156
157 submod = sys.modules.get(modname)
158 if submod is None:
159 return None
160
161 obj = submod
162 for part in fullname.split("."):
163 try:
164 obj = getattr(obj, part)
165 except AttributeError:
166 return None
167
168 try:
169 fn = inspect.getsourcefile(inspect.unwrap(obj))
170 except TypeError:
171 fn = None
172
173 if not fn:
174 return None
175
176 # to fix these doc doesn't exist in dtoolkit
177 if project.lower() not in fn:
178 return None
179
180 try:
181 source, lineno = inspect.getsourcelines(obj)
182 except OSError:
183 lineno = None
184
185 linespec = f"#L{lineno}-L{lineno + len(source) - 1}" if lineno else ""
186 fn = os.path.relpath(fn, start=os.path.dirname(dtoolkit.__file__))
187
188 base_link = f"{github_url}/blob/" + "{branch}" + f"/dtoolkit/{fn}{linespec}"
189 if "post" in version:
190 return base_link.format(branch="main")
191
192 return base_link.format(branch=f"v{version}")
193
[end of doc/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -24,7 +24,7 @@
version = version.replace(".post0", "")
-project = "DToolKit"
+project = "My Data Toolkit"
author = "Zero <@Zeroto521>"
copyright = f"2021-{datetime.now().year}, {author}" # pylint: disable=redefined-builtin
github_url = "https://github.com/Zeroto521/my-data-toolkit"
@@ -174,7 +174,7 @@
return None
# to fix these doc doesn't exist in dtoolkit
- if project.lower() not in fn:
+ if "dtoolkit" not in fn:
return None
try:
| {"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -24,7 +24,7 @@\n version = version.replace(\".post0\", \"\")\n \n \n-project = \"DToolKit\"\n+project = \"My Data Toolkit\"\n author = \"Zero <@Zeroto521>\"\n copyright = f\"2021-{datetime.now().year}, {author}\" # pylint: disable=redefined-builtin\n github_url = \"https://github.com/Zeroto521/my-data-toolkit\"\n@@ -174,7 +174,7 @@\n return None\n \n # to fix these doc doesn't exist in dtoolkit\n- if project.lower() not in fn:\n+ if \"dtoolkit\" not in fn:\n return None\n \n try:\n", "issue": "BLD: change pypi name from `dtoolkit` to `my-data-toolkit`\n400 The name 'dtoolkit' isn't allowed.\r\n\r\nhttps://github.com/Zeroto521/my-data-toolkit/runs/4673018493?check_suite_focus=true\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options.\n# For a full list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n\n# -- Project information -----------------------------------------------------\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\nfrom __future__ import annotations\n\nimport inspect\nimport os\nimport sys\nfrom datetime import datetime\n\nimport dtoolkit\n\nversion = release = dtoolkit.__version__\nversion = version.replace(\".dev0\", \"\")\nversion = version.replace(\".post0\", \"\")\n\n\nproject = \"DToolKit\"\nauthor = \"Zero <@Zeroto521>\"\ncopyright = f\"2021-{datetime.now().year}, {author}\" # pylint: disable=redefined-builtin\ngithub_url = \"https://github.com/Zeroto521/my-data-toolkit\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n \"numpydoc\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.linkcode\",\n \"sphinx_toggleprompt\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n \"IPython.sphinxext.ipython_directive\",\n \"nbsphinx\",\n]\n\n# The suffix of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pydata_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"search_bar_position\": \"sidebar\",\n \"github_url\": github_url,\n \"icon_links\": [\n {\n \"name\": \"PyPI\",\n \"url\": \"https://pypi.org/project/my-data-toolkit\",\n \"icon\": \"fas fa-box\",\n },\n ],\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# Add redirect for previously existing pages, each item is like `(from_old, to_new)`\n\nmoved_pages = [\n (\"py-modindex\", \"reference\"),\n]\n\nhtml_additional_pages = {page[0]: \"redirect.html\" for page in moved_pages}\n\nhtml_context = {\"redirects\": dict(moved_pages)}\n\n\nnbsphinx_prolog = r\"\"\"\n{% set docname = env.doc2path(env.docname, base=None) %}\n\n.. tip::\n\n This page was generated from `{{ docname }}`__.\n\n __ https://github.com/zeroto521/my-data-toolkit/blob/main/doc/source/{{ docname }}\n\"\"\"\n\n\n# --Options for sphinx extensions -----------------------------------------------\n\n# connect docs in other projects\nintersphinx_mapping = {\n \"python\": (\"http://docs.python.org/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"sklearn\": (\"https://scikit-learn.org/stable/\", None),\n \"geopandas\": (\"https://geopandas.readthedocs.io/en/stable/\", None),\n \"shapely\": (\"https://shapely.readthedocs.io/en/stable/\", None),\n \"pyproj\": (\"https://pyproj4.github.io/pyproj/stable/\", None),\n \"pygeos\": (\"https://pygeos.readthedocs.io/en/stable/\", None),\n}\n\n# extlinks alias\nextlinks = {\n \"issue\": (f\"{github_url}/issues/%s\", \"issue#\"),\n \"pr\": (f\"{github_url}/issues/%s\", \"pr#\"),\n \"user\": (r\"https://github.com/%s\", \"@\"),\n}\n\nmyst_enable_extensions = [\n \"colon_fence\",\n]\n\nautosummary_generate = True\n\nnbsphinx_execute = \"always\"\nnbsphinx_allow_errors = True\n\n\n# based on pandas doc/source/conf.py\ndef linkcode_resolve(domain: str, info: dict[str, str]) -> str | None:\n \"\"\"\n Determine the URL corresponding to Python object\n \"\"\"\n\n if domain != \"py\":\n return None\n\n modname = info[\"module\"]\n fullname = info[\"fullname\"]\n\n submod = sys.modules.get(modname)\n if submod is None:\n return None\n\n obj = submod\n for part in fullname.split(\".\"):\n try:\n obj = getattr(obj, part)\n except AttributeError:\n return None\n\n try:\n fn = inspect.getsourcefile(inspect.unwrap(obj))\n except TypeError:\n fn = None\n\n if not fn:\n return None\n\n # to fix these doc doesn't exist in dtoolkit\n if project.lower() not in fn:\n return None\n\n try:\n source, lineno = inspect.getsourcelines(obj)\n except OSError:\n lineno = None\n\n linespec = f\"#L{lineno}-L{lineno + len(source) - 1}\" if lineno else \"\"\n fn = os.path.relpath(fn, start=os.path.dirname(dtoolkit.__file__))\n\n base_link = f\"{github_url}/blob/\" + \"{branch}\" + f\"/dtoolkit/{fn}{linespec}\"\n if \"post\" in version:\n return base_link.format(branch=\"main\")\n\n return base_link.format(branch=f\"v{version}\")\n", "path": "doc/source/conf.py"}]} | 2,476 | 185 |
gh_patches_debug_34369 | rasdani/github-patches | git_diff | ckan__ckan-5754 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translation missing for "Data explorer" in reclineview
**CKAN version=2.8.6**
**Describe the bug**
When switching to the french language, the tab name "data explorer on a resource preview page is not translated
**Steps to reproduce**
1. Installing ckan from source
2. Set up a dataset with a previewable format (e.g csv)
3. preview the data
4. Select the french language by changing the url t0 http://XXX.X.X.X:XXXXfr/XXXXXXX
**Expected behavior**
The tab name shoud be translated to something like "Explorateur de données"
</issue>
<code>
[start of ckanext/reclineview/plugin.py]
1 # encoding: utf-8
2
3 from logging import getLogger
4
5 import six
6
7 from ckan.common import json, config
8 import ckan.plugins as p
9 import ckan.plugins.toolkit as toolkit
10
11 log = getLogger(__name__)
12 ignore_empty = p.toolkit.get_validator('ignore_empty')
13 natural_number_validator = p.toolkit.get_validator('natural_number_validator')
14 Invalid = p.toolkit.Invalid
15
16
17 def get_mapview_config():
18 '''
19 Extracts and returns map view configuration of the reclineview extension.
20 '''
21 namespace = 'ckanext.spatial.common_map.'
22 return {k.replace(namespace, ''): v
23 for k, v in six.iteritems(config)
24 if k.startswith(namespace)}
25
26
27 def get_dataproxy_url():
28 '''
29 Returns the value of the ckan.recline.dataproxy_url config option
30 '''
31 return config.get(
32 'ckan.recline.dataproxy_url', '//jsonpdataproxy.appspot.com')
33
34
35 def in_list(list_possible_values):
36 '''
37 Validator that checks that the input value is one of the given
38 possible values.
39
40 :param list_possible_values: function that returns list of possible values
41 for validated field
42 :type possible_values: function
43 '''
44 def validate(key, data, errors, context):
45 if not data[key] in list_possible_values():
46 raise Invalid('"{0}" is not a valid parameter'.format(data[key]))
47 return validate
48
49
50 def datastore_fields(resource, valid_field_types):
51 '''
52 Return a list of all datastore fields for a given resource, as long as
53 the datastore field type is in valid_field_types.
54
55 :param resource: resource dict
56 :type resource: dict
57 :param valid_field_types: field types to include in returned list
58 :type valid_field_types: list of strings
59 '''
60 data = {'resource_id': resource['id'], 'limit': 0}
61 fields = toolkit.get_action('datastore_search')({}, data)['fields']
62 return [{'value': f['id'], 'text': f['id']} for f in fields
63 if f['type'] in valid_field_types]
64
65
66 class ReclineViewBase(p.SingletonPlugin):
67 '''
68 This base class for the Recline view extensions.
69 '''
70 p.implements(p.IConfigurer, inherit=True)
71 p.implements(p.IResourceView, inherit=True)
72 p.implements(p.ITemplateHelpers, inherit=True)
73
74 def update_config(self, config):
75 '''
76 Set up the resource library, public directory and
77 template directory for the view
78 '''
79 toolkit.add_public_directory(config, 'theme/public')
80 toolkit.add_template_directory(config, 'theme/templates')
81 toolkit.add_resource('theme/public', 'ckanext-reclineview')
82
83 def can_view(self, data_dict):
84 resource = data_dict['resource']
85 return (resource.get('datastore_active') or
86 '_datastore_only_resource' in resource.get('url', ''))
87
88 def setup_template_variables(self, context, data_dict):
89 return {'resource_json': json.dumps(data_dict['resource']),
90 'resource_view_json': json.dumps(data_dict['resource_view'])}
91
92 def view_template(self, context, data_dict):
93 return 'recline_view.html'
94
95 def get_helpers(self):
96 return {
97 'get_map_config': get_mapview_config,
98 'get_dataproxy_url': get_dataproxy_url,
99 }
100
101
102 class ReclineView(ReclineViewBase):
103 '''
104 This extension views resources using a Recline MultiView.
105 '''
106
107 def info(self):
108 return {'name': 'recline_view',
109 'title': 'Data Explorer',
110 'filterable': True,
111 'icon': 'table',
112 'requires_datastore': False,
113 'default_title': p.toolkit._('Data Explorer'),
114 }
115
116 def can_view(self, data_dict):
117 resource = data_dict['resource']
118
119 if (resource.get('datastore_active') or
120 '_datastore_only_resource' in resource.get('url', '')):
121 return True
122 resource_format = resource.get('format', None)
123 if resource_format:
124 return resource_format.lower() in [
125 'csv', 'xls', 'xlsx', 'ods', 'tsv'
126 ]
127 else:
128 return False
129
130
131 class ReclineGridView(ReclineViewBase):
132 '''
133 This extension views resources using a Recline grid.
134 '''
135
136 def info(self):
137 return {'name': 'recline_grid_view',
138 'title': 'Grid',
139 'filterable': True,
140 'icon': 'table',
141 'requires_datastore': True,
142 'default_title': p.toolkit._('Table'),
143 }
144
145
146 class ReclineGraphView(ReclineViewBase):
147 '''
148 This extension views resources using a Recline graph.
149 '''
150
151 graph_types = [{'value': 'lines-and-points',
152 'text': 'Lines and points'},
153 {'value': 'lines', 'text': 'Lines'},
154 {'value': 'points', 'text': 'Points'},
155 {'value': 'bars', 'text': 'Bars'},
156 {'value': 'columns', 'text': 'Columns'}]
157
158 datastore_fields = []
159
160 datastore_field_types = ['numeric', 'int4', 'timestamp']
161
162 def list_graph_types(self):
163 return [t['value'] for t in self.graph_types]
164
165 def list_datastore_fields(self):
166 return [t['value'] for t in self.datastore_fields]
167
168 def info(self):
169 # in_list validator here is passed functions because this
170 # method does not know what the possible values of the
171 # datastore fields are (requires a datastore search)
172 schema = {
173 'offset': [ignore_empty, natural_number_validator],
174 'limit': [ignore_empty, natural_number_validator],
175 'graph_type': [ignore_empty, in_list(self.list_graph_types)],
176 'group': [ignore_empty, in_list(self.list_datastore_fields)],
177 'series': [ignore_empty, in_list(self.list_datastore_fields)]
178 }
179 return {'name': 'recline_graph_view',
180 'title': 'Graph',
181 'filterable': True,
182 'icon': 'bar-chart-o',
183 'requires_datastore': True,
184 'schema': schema,
185 'default_title': p.toolkit._('Graph'),
186 }
187
188 def setup_template_variables(self, context, data_dict):
189 self.datastore_fields = datastore_fields(data_dict['resource'],
190 self.datastore_field_types)
191 vars = ReclineViewBase.setup_template_variables(self, context,
192 data_dict)
193 vars.update({'graph_types': self.graph_types,
194 'graph_fields': self.datastore_fields})
195 return vars
196
197 def form_template(self, context, data_dict):
198 return 'recline_graph_form.html'
199
200
201 class ReclineMapView(ReclineViewBase):
202 '''
203 This extension views resources using a Recline map.
204 '''
205
206 map_field_types = [{'value': 'lat_long',
207 'text': 'Latitude / Longitude fields'},
208 {'value': 'geojson', 'text': 'GeoJSON'}]
209
210 datastore_fields = []
211
212 datastore_field_latlon_types = ['numeric']
213
214 datastore_field_geojson_types = ['text']
215
216 def list_map_field_types(self):
217 return [t['value'] for t in self.map_field_types]
218
219 def list_datastore_fields(self):
220 return [t['value'] for t in self.datastore_fields]
221
222 def info(self):
223 # in_list validator here is passed functions because this
224 # method does not know what the possible values of the
225 # datastore fields are (requires a datastore search)
226 schema = {
227 'offset': [ignore_empty, natural_number_validator],
228 'limit': [ignore_empty, natural_number_validator],
229 'map_field_type': [ignore_empty,
230 in_list(self.list_map_field_types)],
231 'latitude_field': [ignore_empty,
232 in_list(self.list_datastore_fields)],
233 'longitude_field': [ignore_empty,
234 in_list(self.list_datastore_fields)],
235 'geojson_field': [ignore_empty,
236 in_list(self.list_datastore_fields)],
237 'auto_zoom': [ignore_empty],
238 'cluster_markers': [ignore_empty]
239 }
240 return {'name': 'recline_map_view',
241 'title': 'Map',
242 'schema': schema,
243 'filterable': True,
244 'icon': 'map-marker',
245 'default_title': p.toolkit._('Map'),
246 }
247
248 def setup_template_variables(self, context, data_dict):
249 map_latlon_fields = datastore_fields(
250 data_dict['resource'], self.datastore_field_latlon_types)
251 map_geojson_fields = datastore_fields(
252 data_dict['resource'], self.datastore_field_geojson_types)
253
254 self.datastore_fields = map_latlon_fields + map_geojson_fields
255
256 vars = ReclineViewBase.setup_template_variables(self, context,
257 data_dict)
258 vars.update({'map_field_types': self.map_field_types,
259 'map_latlon_fields': map_latlon_fields,
260 'map_geojson_fields': map_geojson_fields
261 })
262 return vars
263
264 def form_template(self, context, data_dict):
265 return 'recline_map_form.html'
266
[end of ckanext/reclineview/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext/reclineview/plugin.py b/ckanext/reclineview/plugin.py
--- a/ckanext/reclineview/plugin.py
+++ b/ckanext/reclineview/plugin.py
@@ -7,6 +7,7 @@
from ckan.common import json, config
import ckan.plugins as p
import ckan.plugins.toolkit as toolkit
+from ckan.plugins.toolkit import _
log = getLogger(__name__)
ignore_empty = p.toolkit.get_validator('ignore_empty')
@@ -106,7 +107,7 @@
def info(self):
return {'name': 'recline_view',
- 'title': 'Data Explorer',
+ 'title': _('Data Explorer'),
'filterable': True,
'icon': 'table',
'requires_datastore': False,
@@ -135,7 +136,7 @@
def info(self):
return {'name': 'recline_grid_view',
- 'title': 'Grid',
+ 'title': _('Grid'),
'filterable': True,
'icon': 'table',
'requires_datastore': True,
@@ -177,7 +178,7 @@
'series': [ignore_empty, in_list(self.list_datastore_fields)]
}
return {'name': 'recline_graph_view',
- 'title': 'Graph',
+ 'title': _('Graph'),
'filterable': True,
'icon': 'bar-chart-o',
'requires_datastore': True,
@@ -238,7 +239,7 @@
'cluster_markers': [ignore_empty]
}
return {'name': 'recline_map_view',
- 'title': 'Map',
+ 'title': _('Map'),
'schema': schema,
'filterable': True,
'icon': 'map-marker',
| {"golden_diff": "diff --git a/ckanext/reclineview/plugin.py b/ckanext/reclineview/plugin.py\n--- a/ckanext/reclineview/plugin.py\n+++ b/ckanext/reclineview/plugin.py\n@@ -7,6 +7,7 @@\n from ckan.common import json, config\n import ckan.plugins as p\n import ckan.plugins.toolkit as toolkit\n+from ckan.plugins.toolkit import _\n \n log = getLogger(__name__)\n ignore_empty = p.toolkit.get_validator('ignore_empty')\n@@ -106,7 +107,7 @@\n \n def info(self):\n return {'name': 'recline_view',\n- 'title': 'Data Explorer',\n+ 'title': _('Data Explorer'),\n 'filterable': True,\n 'icon': 'table',\n 'requires_datastore': False,\n@@ -135,7 +136,7 @@\n \n def info(self):\n return {'name': 'recline_grid_view',\n- 'title': 'Grid',\n+ 'title': _('Grid'),\n 'filterable': True,\n 'icon': 'table',\n 'requires_datastore': True,\n@@ -177,7 +178,7 @@\n 'series': [ignore_empty, in_list(self.list_datastore_fields)]\n }\n return {'name': 'recline_graph_view',\n- 'title': 'Graph',\n+ 'title': _('Graph'),\n 'filterable': True,\n 'icon': 'bar-chart-o',\n 'requires_datastore': True,\n@@ -238,7 +239,7 @@\n 'cluster_markers': [ignore_empty]\n }\n return {'name': 'recline_map_view',\n- 'title': 'Map',\n+ 'title': _('Map'),\n 'schema': schema,\n 'filterable': True,\n 'icon': 'map-marker',\n", "issue": "Translation missing for \"Data explorer\" in reclineview\n**CKAN version=2.8.6**\r\n\r\n**Describe the bug**\r\nWhen switching to the french language, the tab name \"data explorer on a resource preview page is not translated\r\n\r\n**Steps to reproduce**\r\n1. Installing ckan from source\r\n2. Set up a dataset with a previewable format (e.g csv)\r\n3. preview the data \r\n4. Select the french language by changing the url t0 http://XXX.X.X.X:XXXXfr/XXXXXXX\r\n\r\n\r\n**Expected behavior**\r\nThe tab name shoud be translated to something like \"Explorateur de donn\u00e9es\"\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom logging import getLogger\n\nimport six\n\nfrom ckan.common import json, config\nimport ckan.plugins as p\nimport ckan.plugins.toolkit as toolkit\n\nlog = getLogger(__name__)\nignore_empty = p.toolkit.get_validator('ignore_empty')\nnatural_number_validator = p.toolkit.get_validator('natural_number_validator')\nInvalid = p.toolkit.Invalid\n\n\ndef get_mapview_config():\n '''\n Extracts and returns map view configuration of the reclineview extension.\n '''\n namespace = 'ckanext.spatial.common_map.'\n return {k.replace(namespace, ''): v\n for k, v in six.iteritems(config)\n if k.startswith(namespace)}\n\n\ndef get_dataproxy_url():\n '''\n Returns the value of the ckan.recline.dataproxy_url config option\n '''\n return config.get(\n 'ckan.recline.dataproxy_url', '//jsonpdataproxy.appspot.com')\n\n\ndef in_list(list_possible_values):\n '''\n Validator that checks that the input value is one of the given\n possible values.\n\n :param list_possible_values: function that returns list of possible values\n for validated field\n :type possible_values: function\n '''\n def validate(key, data, errors, context):\n if not data[key] in list_possible_values():\n raise Invalid('\"{0}\" is not a valid parameter'.format(data[key]))\n return validate\n\n\ndef datastore_fields(resource, valid_field_types):\n '''\n Return a list of all datastore fields for a given resource, as long as\n the datastore field type is in valid_field_types.\n\n :param resource: resource dict\n :type resource: dict\n :param valid_field_types: field types to include in returned list\n :type valid_field_types: list of strings\n '''\n data = {'resource_id': resource['id'], 'limit': 0}\n fields = toolkit.get_action('datastore_search')({}, data)['fields']\n return [{'value': f['id'], 'text': f['id']} for f in fields\n if f['type'] in valid_field_types]\n\n\nclass ReclineViewBase(p.SingletonPlugin):\n '''\n This base class for the Recline view extensions.\n '''\n p.implements(p.IConfigurer, inherit=True)\n p.implements(p.IResourceView, inherit=True)\n p.implements(p.ITemplateHelpers, inherit=True)\n\n def update_config(self, config):\n '''\n Set up the resource library, public directory and\n template directory for the view\n '''\n toolkit.add_public_directory(config, 'theme/public')\n toolkit.add_template_directory(config, 'theme/templates')\n toolkit.add_resource('theme/public', 'ckanext-reclineview')\n\n def can_view(self, data_dict):\n resource = data_dict['resource']\n return (resource.get('datastore_active') or\n '_datastore_only_resource' in resource.get('url', ''))\n\n def setup_template_variables(self, context, data_dict):\n return {'resource_json': json.dumps(data_dict['resource']),\n 'resource_view_json': json.dumps(data_dict['resource_view'])}\n\n def view_template(self, context, data_dict):\n return 'recline_view.html'\n\n def get_helpers(self):\n return {\n 'get_map_config': get_mapview_config,\n 'get_dataproxy_url': get_dataproxy_url,\n }\n\n\nclass ReclineView(ReclineViewBase):\n '''\n This extension views resources using a Recline MultiView.\n '''\n\n def info(self):\n return {'name': 'recline_view',\n 'title': 'Data Explorer',\n 'filterable': True,\n 'icon': 'table',\n 'requires_datastore': False,\n 'default_title': p.toolkit._('Data Explorer'),\n }\n\n def can_view(self, data_dict):\n resource = data_dict['resource']\n\n if (resource.get('datastore_active') or\n '_datastore_only_resource' in resource.get('url', '')):\n return True\n resource_format = resource.get('format', None)\n if resource_format:\n return resource_format.lower() in [\n 'csv', 'xls', 'xlsx', 'ods', 'tsv'\n ]\n else:\n return False\n\n\nclass ReclineGridView(ReclineViewBase):\n '''\n This extension views resources using a Recline grid.\n '''\n\n def info(self):\n return {'name': 'recline_grid_view',\n 'title': 'Grid',\n 'filterable': True,\n 'icon': 'table',\n 'requires_datastore': True,\n 'default_title': p.toolkit._('Table'),\n }\n\n\nclass ReclineGraphView(ReclineViewBase):\n '''\n This extension views resources using a Recline graph.\n '''\n\n graph_types = [{'value': 'lines-and-points',\n 'text': 'Lines and points'},\n {'value': 'lines', 'text': 'Lines'},\n {'value': 'points', 'text': 'Points'},\n {'value': 'bars', 'text': 'Bars'},\n {'value': 'columns', 'text': 'Columns'}]\n\n datastore_fields = []\n\n datastore_field_types = ['numeric', 'int4', 'timestamp']\n\n def list_graph_types(self):\n return [t['value'] for t in self.graph_types]\n\n def list_datastore_fields(self):\n return [t['value'] for t in self.datastore_fields]\n\n def info(self):\n # in_list validator here is passed functions because this\n # method does not know what the possible values of the\n # datastore fields are (requires a datastore search)\n schema = {\n 'offset': [ignore_empty, natural_number_validator],\n 'limit': [ignore_empty, natural_number_validator],\n 'graph_type': [ignore_empty, in_list(self.list_graph_types)],\n 'group': [ignore_empty, in_list(self.list_datastore_fields)],\n 'series': [ignore_empty, in_list(self.list_datastore_fields)]\n }\n return {'name': 'recline_graph_view',\n 'title': 'Graph',\n 'filterable': True,\n 'icon': 'bar-chart-o',\n 'requires_datastore': True,\n 'schema': schema,\n 'default_title': p.toolkit._('Graph'),\n }\n\n def setup_template_variables(self, context, data_dict):\n self.datastore_fields = datastore_fields(data_dict['resource'],\n self.datastore_field_types)\n vars = ReclineViewBase.setup_template_variables(self, context,\n data_dict)\n vars.update({'graph_types': self.graph_types,\n 'graph_fields': self.datastore_fields})\n return vars\n\n def form_template(self, context, data_dict):\n return 'recline_graph_form.html'\n\n\nclass ReclineMapView(ReclineViewBase):\n '''\n This extension views resources using a Recline map.\n '''\n\n map_field_types = [{'value': 'lat_long',\n 'text': 'Latitude / Longitude fields'},\n {'value': 'geojson', 'text': 'GeoJSON'}]\n\n datastore_fields = []\n\n datastore_field_latlon_types = ['numeric']\n\n datastore_field_geojson_types = ['text']\n\n def list_map_field_types(self):\n return [t['value'] for t in self.map_field_types]\n\n def list_datastore_fields(self):\n return [t['value'] for t in self.datastore_fields]\n\n def info(self):\n # in_list validator here is passed functions because this\n # method does not know what the possible values of the\n # datastore fields are (requires a datastore search)\n schema = {\n 'offset': [ignore_empty, natural_number_validator],\n 'limit': [ignore_empty, natural_number_validator],\n 'map_field_type': [ignore_empty,\n in_list(self.list_map_field_types)],\n 'latitude_field': [ignore_empty,\n in_list(self.list_datastore_fields)],\n 'longitude_field': [ignore_empty,\n in_list(self.list_datastore_fields)],\n 'geojson_field': [ignore_empty,\n in_list(self.list_datastore_fields)],\n 'auto_zoom': [ignore_empty],\n 'cluster_markers': [ignore_empty]\n }\n return {'name': 'recline_map_view',\n 'title': 'Map',\n 'schema': schema,\n 'filterable': True,\n 'icon': 'map-marker',\n 'default_title': p.toolkit._('Map'),\n }\n\n def setup_template_variables(self, context, data_dict):\n map_latlon_fields = datastore_fields(\n data_dict['resource'], self.datastore_field_latlon_types)\n map_geojson_fields = datastore_fields(\n data_dict['resource'], self.datastore_field_geojson_types)\n\n self.datastore_fields = map_latlon_fields + map_geojson_fields\n\n vars = ReclineViewBase.setup_template_variables(self, context,\n data_dict)\n vars.update({'map_field_types': self.map_field_types,\n 'map_latlon_fields': map_latlon_fields,\n 'map_geojson_fields': map_geojson_fields\n })\n return vars\n\n def form_template(self, context, data_dict):\n return 'recline_map_form.html'\n", "path": "ckanext/reclineview/plugin.py"}]} | 3,337 | 406 |
gh_patches_debug_16221 | rasdani/github-patches | git_diff | pyca__cryptography-5462 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow bytearray in padding and unpadding
It looks like in the padding code, it requires the data passed to padding and unpadding to be bytes, and does not allow bytearray's. I saw in #4409 that bytes like objects were allowed as key material and such, does it make sense to allow them to be given to padders and unpadders? If so, I think it's just two instances of `_check_bytes` that would need to changed to `_check_byteslike` in https://github.com/pyca/cryptography/blob/master/src/cryptography/hazmat/primitives/padding.py, line 43 and 67.
</issue>
<code>
[start of src/cryptography/hazmat/primitives/padding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8
9 import six
10
11 from cryptography import utils
12 from cryptography.exceptions import AlreadyFinalized
13 from cryptography.hazmat.bindings._padding import lib
14
15
16 @six.add_metaclass(abc.ABCMeta)
17 class PaddingContext(object):
18 @abc.abstractmethod
19 def update(self, data):
20 """
21 Pads the provided bytes and returns any available data as bytes.
22 """
23
24 @abc.abstractmethod
25 def finalize(self):
26 """
27 Finalize the padding, returns bytes.
28 """
29
30
31 def _byte_padding_check(block_size):
32 if not (0 <= block_size <= 2040):
33 raise ValueError("block_size must be in range(0, 2041).")
34
35 if block_size % 8 != 0:
36 raise ValueError("block_size must be a multiple of 8.")
37
38
39 def _byte_padding_update(buffer_, data, block_size):
40 if buffer_ is None:
41 raise AlreadyFinalized("Context was already finalized.")
42
43 utils._check_bytes("data", data)
44
45 buffer_ += data
46
47 finished_blocks = len(buffer_) // (block_size // 8)
48
49 result = buffer_[: finished_blocks * (block_size // 8)]
50 buffer_ = buffer_[finished_blocks * (block_size // 8) :]
51
52 return buffer_, result
53
54
55 def _byte_padding_pad(buffer_, block_size, paddingfn):
56 if buffer_ is None:
57 raise AlreadyFinalized("Context was already finalized.")
58
59 pad_size = block_size // 8 - len(buffer_)
60 return buffer_ + paddingfn(pad_size)
61
62
63 def _byte_unpadding_update(buffer_, data, block_size):
64 if buffer_ is None:
65 raise AlreadyFinalized("Context was already finalized.")
66
67 utils._check_bytes("data", data)
68
69 buffer_ += data
70
71 finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0)
72
73 result = buffer_[: finished_blocks * (block_size // 8)]
74 buffer_ = buffer_[finished_blocks * (block_size // 8) :]
75
76 return buffer_, result
77
78
79 def _byte_unpadding_check(buffer_, block_size, checkfn):
80 if buffer_ is None:
81 raise AlreadyFinalized("Context was already finalized.")
82
83 if len(buffer_) != block_size // 8:
84 raise ValueError("Invalid padding bytes.")
85
86 valid = checkfn(buffer_, block_size // 8)
87
88 if not valid:
89 raise ValueError("Invalid padding bytes.")
90
91 pad_size = six.indexbytes(buffer_, -1)
92 return buffer_[:-pad_size]
93
94
95 class PKCS7(object):
96 def __init__(self, block_size):
97 _byte_padding_check(block_size)
98 self.block_size = block_size
99
100 def padder(self):
101 return _PKCS7PaddingContext(self.block_size)
102
103 def unpadder(self):
104 return _PKCS7UnpaddingContext(self.block_size)
105
106
107 @utils.register_interface(PaddingContext)
108 class _PKCS7PaddingContext(object):
109 def __init__(self, block_size):
110 self.block_size = block_size
111 # TODO: more copies than necessary, we should use zero-buffer (#193)
112 self._buffer = b""
113
114 def update(self, data):
115 self._buffer, result = _byte_padding_update(
116 self._buffer, data, self.block_size
117 )
118 return result
119
120 def _padding(self, size):
121 return six.int2byte(size) * size
122
123 def finalize(self):
124 result = _byte_padding_pad(
125 self._buffer, self.block_size, self._padding
126 )
127 self._buffer = None
128 return result
129
130
131 @utils.register_interface(PaddingContext)
132 class _PKCS7UnpaddingContext(object):
133 def __init__(self, block_size):
134 self.block_size = block_size
135 # TODO: more copies than necessary, we should use zero-buffer (#193)
136 self._buffer = b""
137
138 def update(self, data):
139 self._buffer, result = _byte_unpadding_update(
140 self._buffer, data, self.block_size
141 )
142 return result
143
144 def finalize(self):
145 result = _byte_unpadding_check(
146 self._buffer, self.block_size, lib.Cryptography_check_pkcs7_padding
147 )
148 self._buffer = None
149 return result
150
151
152 class ANSIX923(object):
153 def __init__(self, block_size):
154 _byte_padding_check(block_size)
155 self.block_size = block_size
156
157 def padder(self):
158 return _ANSIX923PaddingContext(self.block_size)
159
160 def unpadder(self):
161 return _ANSIX923UnpaddingContext(self.block_size)
162
163
164 @utils.register_interface(PaddingContext)
165 class _ANSIX923PaddingContext(object):
166 def __init__(self, block_size):
167 self.block_size = block_size
168 # TODO: more copies than necessary, we should use zero-buffer (#193)
169 self._buffer = b""
170
171 def update(self, data):
172 self._buffer, result = _byte_padding_update(
173 self._buffer, data, self.block_size
174 )
175 return result
176
177 def _padding(self, size):
178 return six.int2byte(0) * (size - 1) + six.int2byte(size)
179
180 def finalize(self):
181 result = _byte_padding_pad(
182 self._buffer, self.block_size, self._padding
183 )
184 self._buffer = None
185 return result
186
187
188 @utils.register_interface(PaddingContext)
189 class _ANSIX923UnpaddingContext(object):
190 def __init__(self, block_size):
191 self.block_size = block_size
192 # TODO: more copies than necessary, we should use zero-buffer (#193)
193 self._buffer = b""
194
195 def update(self, data):
196 self._buffer, result = _byte_unpadding_update(
197 self._buffer, data, self.block_size
198 )
199 return result
200
201 def finalize(self):
202 result = _byte_unpadding_check(
203 self._buffer,
204 self.block_size,
205 lib.Cryptography_check_ansix923_padding,
206 )
207 self._buffer = None
208 return result
209
[end of src/cryptography/hazmat/primitives/padding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/padding.py b/src/cryptography/hazmat/primitives/padding.py
--- a/src/cryptography/hazmat/primitives/padding.py
+++ b/src/cryptography/hazmat/primitives/padding.py
@@ -40,9 +40,9 @@
if buffer_ is None:
raise AlreadyFinalized("Context was already finalized.")
- utils._check_bytes("data", data)
+ utils._check_byteslike("data", data)
- buffer_ += data
+ buffer_ += bytes(data)
finished_blocks = len(buffer_) // (block_size // 8)
@@ -64,9 +64,9 @@
if buffer_ is None:
raise AlreadyFinalized("Context was already finalized.")
- utils._check_bytes("data", data)
+ utils._check_byteslike("data", data)
- buffer_ += data
+ buffer_ += bytes(data)
finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0)
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/padding.py b/src/cryptography/hazmat/primitives/padding.py\n--- a/src/cryptography/hazmat/primitives/padding.py\n+++ b/src/cryptography/hazmat/primitives/padding.py\n@@ -40,9 +40,9 @@\n if buffer_ is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n \n- utils._check_bytes(\"data\", data)\n+ utils._check_byteslike(\"data\", data)\n \n- buffer_ += data\n+ buffer_ += bytes(data)\n \n finished_blocks = len(buffer_) // (block_size // 8)\n \n@@ -64,9 +64,9 @@\n if buffer_ is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n \n- utils._check_bytes(\"data\", data)\n+ utils._check_byteslike(\"data\", data)\n \n- buffer_ += data\n+ buffer_ += bytes(data)\n \n finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0)\n", "issue": "Allow bytearray in padding and unpadding\nIt looks like in the padding code, it requires the data passed to padding and unpadding to be bytes, and does not allow bytearray's. I saw in #4409 that bytes like objects were allowed as key material and such, does it make sense to allow them to be given to padders and unpadders? If so, I think it's just two instances of `_check_bytes` that would need to changed to `_check_byteslike` in https://github.com/pyca/cryptography/blob/master/src/cryptography/hazmat/primitives/padding.py, line 43 and 67.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import AlreadyFinalized\nfrom cryptography.hazmat.bindings._padding import lib\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass PaddingContext(object):\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Pads the provided bytes and returns any available data as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalize the padding, returns bytes.\n \"\"\"\n\n\ndef _byte_padding_check(block_size):\n if not (0 <= block_size <= 2040):\n raise ValueError(\"block_size must be in range(0, 2041).\")\n\n if block_size % 8 != 0:\n raise ValueError(\"block_size must be a multiple of 8.\")\n\n\ndef _byte_padding_update(buffer_, data, block_size):\n if buffer_ is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n\n utils._check_bytes(\"data\", data)\n\n buffer_ += data\n\n finished_blocks = len(buffer_) // (block_size // 8)\n\n result = buffer_[: finished_blocks * (block_size // 8)]\n buffer_ = buffer_[finished_blocks * (block_size // 8) :]\n\n return buffer_, result\n\n\ndef _byte_padding_pad(buffer_, block_size, paddingfn):\n if buffer_ is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n\n pad_size = block_size // 8 - len(buffer_)\n return buffer_ + paddingfn(pad_size)\n\n\ndef _byte_unpadding_update(buffer_, data, block_size):\n if buffer_ is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n\n utils._check_bytes(\"data\", data)\n\n buffer_ += data\n\n finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0)\n\n result = buffer_[: finished_blocks * (block_size // 8)]\n buffer_ = buffer_[finished_blocks * (block_size // 8) :]\n\n return buffer_, result\n\n\ndef _byte_unpadding_check(buffer_, block_size, checkfn):\n if buffer_ is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n\n if len(buffer_) != block_size // 8:\n raise ValueError(\"Invalid padding bytes.\")\n\n valid = checkfn(buffer_, block_size // 8)\n\n if not valid:\n raise ValueError(\"Invalid padding bytes.\")\n\n pad_size = six.indexbytes(buffer_, -1)\n return buffer_[:-pad_size]\n\n\nclass PKCS7(object):\n def __init__(self, block_size):\n _byte_padding_check(block_size)\n self.block_size = block_size\n\n def padder(self):\n return _PKCS7PaddingContext(self.block_size)\n\n def unpadder(self):\n return _PKCS7UnpaddingContext(self.block_size)\n\n\[email protected]_interface(PaddingContext)\nclass _PKCS7PaddingContext(object):\n def __init__(self, block_size):\n self.block_size = block_size\n # TODO: more copies than necessary, we should use zero-buffer (#193)\n self._buffer = b\"\"\n\n def update(self, data):\n self._buffer, result = _byte_padding_update(\n self._buffer, data, self.block_size\n )\n return result\n\n def _padding(self, size):\n return six.int2byte(size) * size\n\n def finalize(self):\n result = _byte_padding_pad(\n self._buffer, self.block_size, self._padding\n )\n self._buffer = None\n return result\n\n\[email protected]_interface(PaddingContext)\nclass _PKCS7UnpaddingContext(object):\n def __init__(self, block_size):\n self.block_size = block_size\n # TODO: more copies than necessary, we should use zero-buffer (#193)\n self._buffer = b\"\"\n\n def update(self, data):\n self._buffer, result = _byte_unpadding_update(\n self._buffer, data, self.block_size\n )\n return result\n\n def finalize(self):\n result = _byte_unpadding_check(\n self._buffer, self.block_size, lib.Cryptography_check_pkcs7_padding\n )\n self._buffer = None\n return result\n\n\nclass ANSIX923(object):\n def __init__(self, block_size):\n _byte_padding_check(block_size)\n self.block_size = block_size\n\n def padder(self):\n return _ANSIX923PaddingContext(self.block_size)\n\n def unpadder(self):\n return _ANSIX923UnpaddingContext(self.block_size)\n\n\[email protected]_interface(PaddingContext)\nclass _ANSIX923PaddingContext(object):\n def __init__(self, block_size):\n self.block_size = block_size\n # TODO: more copies than necessary, we should use zero-buffer (#193)\n self._buffer = b\"\"\n\n def update(self, data):\n self._buffer, result = _byte_padding_update(\n self._buffer, data, self.block_size\n )\n return result\n\n def _padding(self, size):\n return six.int2byte(0) * (size - 1) + six.int2byte(size)\n\n def finalize(self):\n result = _byte_padding_pad(\n self._buffer, self.block_size, self._padding\n )\n self._buffer = None\n return result\n\n\[email protected]_interface(PaddingContext)\nclass _ANSIX923UnpaddingContext(object):\n def __init__(self, block_size):\n self.block_size = block_size\n # TODO: more copies than necessary, we should use zero-buffer (#193)\n self._buffer = b\"\"\n\n def update(self, data):\n self._buffer, result = _byte_unpadding_update(\n self._buffer, data, self.block_size\n )\n return result\n\n def finalize(self):\n result = _byte_unpadding_check(\n self._buffer,\n self.block_size,\n lib.Cryptography_check_ansix923_padding,\n )\n self._buffer = None\n return result\n", "path": "src/cryptography/hazmat/primitives/padding.py"}]} | 2,605 | 237 |
gh_patches_debug_22192 | rasdani/github-patches | git_diff | python-pillow__Pillow-1985 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changed int conversion in ImagePalette to ord for Python 2
Error reported in #1592
</issue>
<code>
[start of PIL/ImagePalette.py]
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # image palette object
6 #
7 # History:
8 # 1996-03-11 fl Rewritten.
9 # 1997-01-03 fl Up and running.
10 # 1997-08-23 fl Added load hack
11 # 2001-04-16 fl Fixed randint shadow bug in random()
12 #
13 # Copyright (c) 1997-2001 by Secret Labs AB
14 # Copyright (c) 1996-1997 by Fredrik Lundh
15 #
16 # See the README file for information on usage and redistribution.
17 #
18
19 import array
20 from PIL import ImageColor
21
22
23 class ImagePalette(object):
24 """
25 Color palette for palette mapped images
26
27 :param mode: The mode to use for the Palette. See:
28 :ref:`concept-modes`. Defaults to "RGB"
29 :param palette: An optional palette. If given, it must be a bytearray,
30 an array or a list of ints between 0-255 and of length ``size``
31 times the number of colors in ``mode``. The list must be aligned
32 by channel (All R values must be contiguous in the list before G
33 and B values.) Defaults to 0 through 255 per channel.
34 :param size: An optional palette size. If given, it cannot be equal to
35 or greater than 256. Defaults to 0.
36 """
37
38 def __init__(self, mode="RGB", palette=None, size=0):
39 self.mode = mode
40 self.rawmode = None # if set, palette contains raw data
41 self.palette = palette or list(range(256))*len(self.mode)
42 self.colors = {}
43 self.dirty = None
44 if ((size == 0 and len(self.mode)*256 != len(self.palette)) or
45 (size != 0 and size != len(self.palette))):
46 raise ValueError("wrong palette size")
47
48 def copy(self):
49 new = ImagePalette()
50
51 new.mode = self.mode
52 new.rawmode = self.rawmode
53 if self.palette is not None:
54 new.palette = self.palette[:]
55 new.colors = self.colors.copy()
56 new.dirty = self.dirty
57
58 return new
59
60 def getdata(self):
61 """
62 Get palette contents in format suitable # for the low-level
63 ``im.putpalette`` primitive.
64
65 .. warning:: This method is experimental.
66 """
67 if self.rawmode:
68 return self.rawmode, self.palette
69 return self.mode + ";L", self.tobytes()
70
71 def tobytes(self):
72 """Convert palette to bytes.
73
74 .. warning:: This method is experimental.
75 """
76 if self.rawmode:
77 raise ValueError("palette contains raw palette data")
78 if isinstance(self.palette, bytes):
79 return self.palette
80 arr = array.array("B", self.palette)
81 if hasattr(arr, 'tobytes'):
82 return arr.tobytes()
83 return arr.tostring()
84
85 # Declare tostring as an alias for tobytes
86 tostring = tobytes
87
88 def getcolor(self, color):
89 """Given an rgb tuple, allocate palette entry.
90
91 .. warning:: This method is experimental.
92 """
93 if self.rawmode:
94 raise ValueError("palette contains raw palette data")
95 if isinstance(color, tuple):
96 try:
97 return self.colors[color]
98 except KeyError:
99 # allocate new color slot
100 if isinstance(self.palette, bytes):
101 self.palette = [int(x) for x in self.palette]
102 index = len(self.colors)
103 if index >= 256:
104 raise ValueError("cannot allocate more than 256 colors")
105 self.colors[color] = index
106 self.palette[index] = color[0]
107 self.palette[index+256] = color[1]
108 self.palette[index+512] = color[2]
109 self.dirty = 1
110 return index
111 else:
112 raise ValueError("unknown color specifier: %r" % color)
113
114 def save(self, fp):
115 """Save palette to text file.
116
117 .. warning:: This method is experimental.
118 """
119 if self.rawmode:
120 raise ValueError("palette contains raw palette data")
121 if isinstance(fp, str):
122 fp = open(fp, "w")
123 fp.write("# Palette\n")
124 fp.write("# Mode: %s\n" % self.mode)
125 for i in range(256):
126 fp.write("%d" % i)
127 for j in range(i*len(self.mode), (i+1)*len(self.mode)):
128 try:
129 fp.write(" %d" % self.palette[j])
130 except IndexError:
131 fp.write(" 0")
132 fp.write("\n")
133 fp.close()
134
135
136 # --------------------------------------------------------------------
137 # Internal
138
139 def raw(rawmode, data):
140 palette = ImagePalette()
141 palette.rawmode = rawmode
142 palette.palette = data
143 palette.dirty = 1
144 return palette
145
146
147 # --------------------------------------------------------------------
148 # Factories
149
150 def make_linear_lut(black, white):
151 lut = []
152 if black == 0:
153 for i in range(256):
154 lut.append(white*i//255)
155 else:
156 raise NotImplementedError # FIXME
157 return lut
158
159
160 def make_gamma_lut(exp):
161 lut = []
162 for i in range(256):
163 lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))
164 return lut
165
166
167 def negative(mode="RGB"):
168 palette = list(range(256))
169 palette.reverse()
170 return ImagePalette(mode, palette * len(mode))
171
172
173 def random(mode="RGB"):
174 from random import randint
175 palette = []
176 for i in range(256*len(mode)):
177 palette.append(randint(0, 255))
178 return ImagePalette(mode, palette)
179
180
181 def sepia(white="#fff0c0"):
182 r, g, b = ImageColor.getrgb(white)
183 r = make_linear_lut(0, r)
184 g = make_linear_lut(0, g)
185 b = make_linear_lut(0, b)
186 return ImagePalette("RGB", r + g + b)
187
188
189 def wedge(mode="RGB"):
190 return ImagePalette(mode, list(range(256)) * len(mode))
191
192
193 def load(filename):
194
195 # FIXME: supports GIMP gradients only
196
197 fp = open(filename, "rb")
198
199 lut = None
200
201 if not lut:
202 try:
203 from PIL import GimpPaletteFile
204 fp.seek(0)
205 p = GimpPaletteFile.GimpPaletteFile(fp)
206 lut = p.getpalette()
207 except (SyntaxError, ValueError):
208 # import traceback
209 # traceback.print_exc()
210 pass
211
212 if not lut:
213 try:
214 from PIL import GimpGradientFile
215 fp.seek(0)
216 p = GimpGradientFile.GimpGradientFile(fp)
217 lut = p.getpalette()
218 except (SyntaxError, ValueError):
219 # import traceback
220 # traceback.print_exc()
221 pass
222
223 if not lut:
224 try:
225 from PIL import PaletteFile
226 fp.seek(0)
227 p = PaletteFile.PaletteFile(fp)
228 lut = p.getpalette()
229 except (SyntaxError, ValueError):
230 # import traceback
231 # traceback.print_exc()
232 pass
233
234 if not lut:
235 raise IOError("cannot load palette")
236
237 return lut # data, rawmode
238
[end of PIL/ImagePalette.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PIL/ImagePalette.py b/PIL/ImagePalette.py
--- a/PIL/ImagePalette.py
+++ b/PIL/ImagePalette.py
@@ -38,7 +38,7 @@
def __init__(self, mode="RGB", palette=None, size=0):
self.mode = mode
self.rawmode = None # if set, palette contains raw data
- self.palette = palette or list(range(256))*len(self.mode)
+ self.palette = palette or bytearray(range(256))*len(self.mode)
self.colors = {}
self.dirty = None
if ((size == 0 and len(self.mode)*256 != len(self.palette)) or
@@ -98,7 +98,7 @@
except KeyError:
# allocate new color slot
if isinstance(self.palette, bytes):
- self.palette = [int(x) for x in self.palette]
+ self.palette = bytearray(self.palette)
index = len(self.colors)
if index >= 256:
raise ValueError("cannot allocate more than 256 colors")
| {"golden_diff": "diff --git a/PIL/ImagePalette.py b/PIL/ImagePalette.py\n--- a/PIL/ImagePalette.py\n+++ b/PIL/ImagePalette.py\n@@ -38,7 +38,7 @@\n def __init__(self, mode=\"RGB\", palette=None, size=0):\n self.mode = mode\n self.rawmode = None # if set, palette contains raw data\n- self.palette = palette or list(range(256))*len(self.mode)\n+ self.palette = palette or bytearray(range(256))*len(self.mode)\n self.colors = {}\n self.dirty = None\n if ((size == 0 and len(self.mode)*256 != len(self.palette)) or\n@@ -98,7 +98,7 @@\n except KeyError:\n # allocate new color slot\n if isinstance(self.palette, bytes):\n- self.palette = [int(x) for x in self.palette]\n+ self.palette = bytearray(self.palette)\n index = len(self.colors)\n if index >= 256:\n raise ValueError(\"cannot allocate more than 256 colors\")\n", "issue": "Changed int conversion in ImagePalette to ord for Python 2\nError reported in #1592\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# image palette object\n#\n# History:\n# 1996-03-11 fl Rewritten.\n# 1997-01-03 fl Up and running.\n# 1997-08-23 fl Added load hack\n# 2001-04-16 fl Fixed randint shadow bug in random()\n#\n# Copyright (c) 1997-2001 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport array\nfrom PIL import ImageColor\n\n\nclass ImagePalette(object):\n \"\"\"\n Color palette for palette mapped images\n\n :param mode: The mode to use for the Palette. See:\n :ref:`concept-modes`. Defaults to \"RGB\"\n :param palette: An optional palette. If given, it must be a bytearray,\n an array or a list of ints between 0-255 and of length ``size``\n times the number of colors in ``mode``. The list must be aligned\n by channel (All R values must be contiguous in the list before G\n and B values.) Defaults to 0 through 255 per channel.\n :param size: An optional palette size. If given, it cannot be equal to\n or greater than 256. Defaults to 0.\n \"\"\"\n\n def __init__(self, mode=\"RGB\", palette=None, size=0):\n self.mode = mode\n self.rawmode = None # if set, palette contains raw data\n self.palette = palette or list(range(256))*len(self.mode)\n self.colors = {}\n self.dirty = None\n if ((size == 0 and len(self.mode)*256 != len(self.palette)) or\n (size != 0 and size != len(self.palette))):\n raise ValueError(\"wrong palette size\")\n\n def copy(self):\n new = ImagePalette()\n\n new.mode = self.mode\n new.rawmode = self.rawmode\n if self.palette is not None:\n new.palette = self.palette[:]\n new.colors = self.colors.copy()\n new.dirty = self.dirty\n\n return new\n\n def getdata(self):\n \"\"\"\n Get palette contents in format suitable # for the low-level\n ``im.putpalette`` primitive.\n\n .. warning:: This method is experimental.\n \"\"\"\n if self.rawmode:\n return self.rawmode, self.palette\n return self.mode + \";L\", self.tobytes()\n\n def tobytes(self):\n \"\"\"Convert palette to bytes.\n\n .. warning:: This method is experimental.\n \"\"\"\n if self.rawmode:\n raise ValueError(\"palette contains raw palette data\")\n if isinstance(self.palette, bytes):\n return self.palette\n arr = array.array(\"B\", self.palette)\n if hasattr(arr, 'tobytes'):\n return arr.tobytes()\n return arr.tostring()\n\n # Declare tostring as an alias for tobytes\n tostring = tobytes\n\n def getcolor(self, color):\n \"\"\"Given an rgb tuple, allocate palette entry.\n\n .. warning:: This method is experimental.\n \"\"\"\n if self.rawmode:\n raise ValueError(\"palette contains raw palette data\")\n if isinstance(color, tuple):\n try:\n return self.colors[color]\n except KeyError:\n # allocate new color slot\n if isinstance(self.palette, bytes):\n self.palette = [int(x) for x in self.palette]\n index = len(self.colors)\n if index >= 256:\n raise ValueError(\"cannot allocate more than 256 colors\")\n self.colors[color] = index\n self.palette[index] = color[0]\n self.palette[index+256] = color[1]\n self.palette[index+512] = color[2]\n self.dirty = 1\n return index\n else:\n raise ValueError(\"unknown color specifier: %r\" % color)\n\n def save(self, fp):\n \"\"\"Save palette to text file.\n\n .. warning:: This method is experimental.\n \"\"\"\n if self.rawmode:\n raise ValueError(\"palette contains raw palette data\")\n if isinstance(fp, str):\n fp = open(fp, \"w\")\n fp.write(\"# Palette\\n\")\n fp.write(\"# Mode: %s\\n\" % self.mode)\n for i in range(256):\n fp.write(\"%d\" % i)\n for j in range(i*len(self.mode), (i+1)*len(self.mode)):\n try:\n fp.write(\" %d\" % self.palette[j])\n except IndexError:\n fp.write(\" 0\")\n fp.write(\"\\n\")\n fp.close()\n\n\n# --------------------------------------------------------------------\n# Internal\n\ndef raw(rawmode, data):\n palette = ImagePalette()\n palette.rawmode = rawmode\n palette.palette = data\n palette.dirty = 1\n return palette\n\n\n# --------------------------------------------------------------------\n# Factories\n\ndef make_linear_lut(black, white):\n lut = []\n if black == 0:\n for i in range(256):\n lut.append(white*i//255)\n else:\n raise NotImplementedError # FIXME\n return lut\n\n\ndef make_gamma_lut(exp):\n lut = []\n for i in range(256):\n lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))\n return lut\n\n\ndef negative(mode=\"RGB\"):\n palette = list(range(256))\n palette.reverse()\n return ImagePalette(mode, palette * len(mode))\n\n\ndef random(mode=\"RGB\"):\n from random import randint\n palette = []\n for i in range(256*len(mode)):\n palette.append(randint(0, 255))\n return ImagePalette(mode, palette)\n\n\ndef sepia(white=\"#fff0c0\"):\n r, g, b = ImageColor.getrgb(white)\n r = make_linear_lut(0, r)\n g = make_linear_lut(0, g)\n b = make_linear_lut(0, b)\n return ImagePalette(\"RGB\", r + g + b)\n\n\ndef wedge(mode=\"RGB\"):\n return ImagePalette(mode, list(range(256)) * len(mode))\n\n\ndef load(filename):\n\n # FIXME: supports GIMP gradients only\n\n fp = open(filename, \"rb\")\n\n lut = None\n\n if not lut:\n try:\n from PIL import GimpPaletteFile\n fp.seek(0)\n p = GimpPaletteFile.GimpPaletteFile(fp)\n lut = p.getpalette()\n except (SyntaxError, ValueError):\n # import traceback\n # traceback.print_exc()\n pass\n\n if not lut:\n try:\n from PIL import GimpGradientFile\n fp.seek(0)\n p = GimpGradientFile.GimpGradientFile(fp)\n lut = p.getpalette()\n except (SyntaxError, ValueError):\n # import traceback\n # traceback.print_exc()\n pass\n\n if not lut:\n try:\n from PIL import PaletteFile\n fp.seek(0)\n p = PaletteFile.PaletteFile(fp)\n lut = p.getpalette()\n except (SyntaxError, ValueError):\n # import traceback\n # traceback.print_exc()\n pass\n\n if not lut:\n raise IOError(\"cannot load palette\")\n\n return lut # data, rawmode\n", "path": "PIL/ImagePalette.py"}]} | 2,809 | 238 |
gh_patches_debug_25720 | rasdani/github-patches | git_diff | cal-itp__benefits-1343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configure a Sentry denylist
Looks like we can configure a `denylist` on `EventScrubber` when calling `sentry_sdk.init`: https://docs.sentry.io/platforms/python/data-management/sensitive-data/
Another more general approach to modifying events is to configure a `before_send` function: https://docs.sentry.io/platforms/python/configuration/filtering/
_Originally posted by @angela-tran in https://github.com/cal-itp/benefits/issues/1334#issuecomment-1490416579_
</issue>
<code>
[start of benefits/sentry.py]
1 from benefits import VERSION
2 import sentry_sdk
3 from sentry_sdk.integrations.django import DjangoIntegration
4 import shutil
5 import os
6 import subprocess
7
8
9 SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT", "local")
10
11
12 def git_available():
13 return bool(shutil.which("git"))
14
15
16 # https://stackoverflow.com/a/24584384/358804
17 def is_git_directory(path="."):
18 dev_null = open(os.devnull, "w")
19 return subprocess.call(["git", "-C", path, "status"], stderr=dev_null, stdout=dev_null) == 0
20
21
22 # https://stackoverflow.com/a/21901260/358804
23 def get_git_revision_hash():
24 return subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()
25
26
27 def get_sha_file_path():
28 current_file = os.path.dirname(os.path.abspath(__file__))
29 return os.path.join(current_file, "..", "static", "sha.txt")
30
31
32 def get_sha_from_file():
33 sha_path = get_sha_file_path()
34 if os.path.isfile(sha_path):
35 with open(sha_path) as f:
36 return f.read().strip()
37 else:
38 return None
39
40
41 def get_release() -> str:
42 """Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION."""
43
44 if git_available() and is_git_directory():
45 return get_git_revision_hash()
46 else:
47 sha = get_sha_from_file()
48 if sha:
49 return sha
50 else:
51 # one of the above *should* always be available, but including this just in case
52 return VERSION
53
54
55 def configure():
56 SENTRY_DSN = os.environ.get("SENTRY_DSN")
57 if SENTRY_DSN:
58 release = get_release()
59 print(f"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...")
60
61 # https://docs.sentry.io/platforms/python/configuration/
62 sentry_sdk.init(
63 dsn=SENTRY_DSN,
64 integrations=[
65 DjangoIntegration(),
66 ],
67 traces_sample_rate=1.0,
68 environment=SENTRY_ENVIRONMENT,
69 release=release,
70 in_app_include=["benefits"],
71 )
72 else:
73 print("SENTRY_DSN not set, so won't send events")
74
[end of benefits/sentry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/sentry.py b/benefits/sentry.py
--- a/benefits/sentry.py
+++ b/benefits/sentry.py
@@ -1,10 +1,13 @@
-from benefits import VERSION
-import sentry_sdk
-from sentry_sdk.integrations.django import DjangoIntegration
import shutil
import os
import subprocess
+import sentry_sdk
+from sentry_sdk.integrations.django import DjangoIntegration
+from sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST
+
+from benefits import VERSION
+
SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT", "local")
@@ -52,6 +55,12 @@
return VERSION
+def get_denylist():
+ # custom denylist
+ denylist = DEFAULT_DENYLIST + ["sub", "name"]
+ return denylist
+
+
def configure():
SENTRY_DSN = os.environ.get("SENTRY_DSN")
if SENTRY_DSN:
@@ -68,6 +77,10 @@
environment=SENTRY_ENVIRONMENT,
release=release,
in_app_include=["benefits"],
+ # send_default_pii must be False (the default) for a custom EventScrubber/denylist
+ # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber
+ send_default_pii=False,
+ event_scrubber=EventScrubber(denylist=get_denylist()),
)
else:
print("SENTRY_DSN not set, so won't send events")
| {"golden_diff": "diff --git a/benefits/sentry.py b/benefits/sentry.py\n--- a/benefits/sentry.py\n+++ b/benefits/sentry.py\n@@ -1,10 +1,13 @@\n-from benefits import VERSION\n-import sentry_sdk\n-from sentry_sdk.integrations.django import DjangoIntegration\n import shutil\n import os\n import subprocess\n \n+import sentry_sdk\n+from sentry_sdk.integrations.django import DjangoIntegration\n+from sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST\n+\n+from benefits import VERSION\n+\n \n SENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n \n@@ -52,6 +55,12 @@\n return VERSION\n \n \n+def get_denylist():\n+ # custom denylist\n+ denylist = DEFAULT_DENYLIST + [\"sub\", \"name\"]\n+ return denylist\n+\n+\n def configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n@@ -68,6 +77,10 @@\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n+ # send_default_pii must be False (the default) for a custom EventScrubber/denylist\n+ # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber\n+ send_default_pii=False,\n+ event_scrubber=EventScrubber(denylist=get_denylist()),\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "issue": "Configure a Sentry denylist\nLooks like we can configure a `denylist` on `EventScrubber` when calling `sentry_sdk.init`: https://docs.sentry.io/platforms/python/data-management/sensitive-data/\r\n\r\nAnother more general approach to modifying events is to configure a `before_send` function: https://docs.sentry.io/platforms/python/configuration/filtering/\r\n\r\n_Originally posted by @angela-tran in https://github.com/cal-itp/benefits/issues/1334#issuecomment-1490416579_\r\n \n", "before_files": [{"content": "from benefits import VERSION\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\nimport shutil\nimport os\nimport subprocess\n\n\nSENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n\n\ndef git_available():\n return bool(shutil.which(\"git\"))\n\n\n# https://stackoverflow.com/a/24584384/358804\ndef is_git_directory(path=\".\"):\n dev_null = open(os.devnull, \"w\")\n return subprocess.call([\"git\", \"-C\", path, \"status\"], stderr=dev_null, stdout=dev_null) == 0\n\n\n# https://stackoverflow.com/a/21901260/358804\ndef get_git_revision_hash():\n return subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).decode(\"ascii\").strip()\n\n\ndef get_sha_file_path():\n current_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_file, \"..\", \"static\", \"sha.txt\")\n\n\ndef get_sha_from_file():\n sha_path = get_sha_file_path()\n if os.path.isfile(sha_path):\n with open(sha_path) as f:\n return f.read().strip()\n else:\n return None\n\n\ndef get_release() -> str:\n \"\"\"Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION.\"\"\"\n\n if git_available() and is_git_directory():\n return get_git_revision_hash()\n else:\n sha = get_sha_from_file()\n if sha:\n return sha\n else:\n # one of the above *should* always be available, but including this just in case\n return VERSION\n\n\ndef configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n release = get_release()\n print(f\"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...\")\n\n # https://docs.sentry.io/platforms/python/configuration/\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[\n DjangoIntegration(),\n ],\n traces_sample_rate=1.0,\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "path": "benefits/sentry.py"}]} | 1,307 | 355 |
gh_patches_debug_14180 | rasdani/github-patches | git_diff | pre-commit__pre-commit-622 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unstaged files + never ran pre-commit => "No such file or directory: .../.cache/pre-commit/patch..."
```
$ pre-commit run
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.
An unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
Check the log at /home/asottile/.cache/pre-commit/pre-commit.log
```
Stacktrace:
```python
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 44, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 231, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 249, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py", line 46, in staged_files_only
with io.open(patch_filename, 'wb') as patch_file:
IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
```
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os.path
7 import time
8
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11
12
13 logger = logging.getLogger('pre_commit')
14
15
16 def _git_apply(patch):
17 args = ('apply', '--whitespace=nowarn', patch)
18 try:
19 cmd_output('git', *args, encoding=None)
20 except CalledProcessError:
21 # Retry with autocrlf=false -- see #570
22 cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)
23
24
25 @contextlib.contextmanager
26 def staged_files_only(patch_dir):
27 """Clear any unstaged changes from the git working directory inside this
28 context.
29 """
30 # Determine if there are unstaged files
31 tree = cmd_output('git', 'write-tree')[1].strip()
32 retcode, diff_stdout_binary, _ = cmd_output(
33 'git', 'diff-index', '--ignore-submodules', '--binary',
34 '--exit-code', '--no-color', '--no-ext-diff', tree, '--',
35 retcode=None,
36 encoding=None,
37 )
38 if retcode and diff_stdout_binary.strip():
39 patch_filename = 'patch{}'.format(int(time.time()))
40 patch_filename = os.path.join(patch_dir, patch_filename)
41 logger.warning('Unstaged files detected.')
42 logger.info(
43 'Stashing unstaged files to {}.'.format(patch_filename),
44 )
45 # Save the current unstaged changes as a patch
46 with io.open(patch_filename, 'wb') as patch_file:
47 patch_file.write(diff_stdout_binary)
48
49 # Clear the working directory of unstaged changes
50 cmd_output('git', 'checkout', '--', '.')
51 try:
52 yield
53 finally:
54 # Try to apply the patch we saved
55 try:
56 _git_apply(patch_filename)
57 except CalledProcessError:
58 logger.warning(
59 'Stashed changes conflicted with hook auto-fixes... '
60 'Rolling back fixes...',
61 )
62 # We failed to apply the patch, presumably due to fixes made
63 # by hooks.
64 # Roll back the changes made by hooks.
65 cmd_output('git', 'checkout', '--', '.')
66 _git_apply(patch_filename)
67 logger.info('Restored changes from {}.'.format(patch_filename))
68 else:
69 # There weren't any staged files so we don't need to do anything
70 # special
71 yield
72
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -8,6 +8,7 @@
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
+from pre_commit.util import mkdirp
logger = logging.getLogger('pre_commit')
@@ -43,6 +44,7 @@
'Stashing unstaged files to {}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
+ mkdirp(patch_dir)
with io.open(patch_filename, 'wb') as patch_file:
patch_file.write(diff_stdout_binary)
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -8,6 +8,7 @@\n \n from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n+from pre_commit.util import mkdirp\n \n \n logger = logging.getLogger('pre_commit')\n@@ -43,6 +44,7 @@\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n+ mkdirp(patch_dir)\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n", "issue": "Unstaged files + never ran pre-commit => \"No such file or directory: .../.cache/pre-commit/patch...\"\n```\r\n$ pre-commit run\r\n[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.\r\nAn unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'\r\nCheck the log at /home/asottile/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\nStacktrace:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/error_handler.py\", line 44, in error_handler\r\n yield\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/main.py\", line 231, in main\r\n return run(runner, args)\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/commands/run.py\", line 249, in run\r\n with ctx:\r\n File \"/usr/lib/python2.7/contextlib.py\", line 17, in __enter__\r\n return self.gen.next()\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py\", line 46, in staged_files_only\r\n with io.open(patch_filename, 'wb') as patch_file:\r\nIOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport time\n\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output('git', *args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)\n\n\[email protected]\ndef staged_files_only(patch_dir):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = 'patch{}'.format(int(time.time()))\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_output('git', 'checkout', '--', '.')\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output('git', 'checkout', '--', '.')\n _git_apply(patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,553 | 155 |
gh_patches_debug_22044 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New canny implementation silently fails with integer images.
### Description:
The new `skimage.feature.canny` implementation silently fails if given an integer image. This worked on `scikit-image<=0.19`, and no longer works with `scikit-image=0.20`. The documentation says that any dtype should work:
```
image : 2D array
Grayscale input image to detect edges on; can be of any dtype.
```
### Way to reproduce:
```
from skimage.feature import canny
import numpy as np
im = np.zeros((100, 100))
im[0: 50, 0: 50] = 1.0
print("Edge pixels with float input: ", canny(im, low_threshold=0, high_threshold=1).sum())
print("Edge pixels with int input: ", canny(im.astype(np.int64), low_threshold=0, high_threshold=1).sum())
```
This prints on new skimage (0.20):
```
Edge pixels with float input: 182
Edge pixels with int input: 0
```
And on old skimage (0.19):
```
Edge pixels with float input: 144
Edge pixels with int input: 144
```
As I write this test case I also need to ask ... why did the number of pixels change?
### Version information:
```Shell
3.10.10 | packaged by conda-forge | (main, Mar 24 2023, 20:08:06) [GCC 11.3.0]
Linux-3.10.0-1160.88.1.el7.x86_64-x86_64-with-glibc2.17
scikit-image version: 0.20.0
numpy version: 1.23.5
```
</issue>
<code>
[start of skimage/feature/_canny.py]
1 """
2 canny.py - Canny Edge detector
3
4 Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
5 Pattern Analysis and Machine Intelligence, 8:679-714, 1986
6 """
7
8 import numpy as np
9 import scipy.ndimage as ndi
10
11 from ..util.dtype import dtype_limits
12 from .._shared.filters import gaussian
13 from .._shared.utils import _supported_float_type, check_nD
14 from ._canny_cy import _nonmaximum_suppression_bilinear
15
16
17 def _preprocess(image, mask, sigma, mode, cval):
18 """Generate a smoothed image and an eroded mask.
19
20 The image is smoothed using a gaussian filter ignoring masked
21 pixels and the mask is eroded.
22
23 Parameters
24 ----------
25 image : array
26 Image to be smoothed.
27 mask : array
28 Mask with 1's for significant pixels, 0's for masked pixels.
29 sigma : scalar or sequence of scalars
30 Standard deviation for Gaussian kernel. The standard
31 deviations of the Gaussian filter are given for each axis as a
32 sequence, or as a single number, in which case it is equal for
33 all axes.
34 mode : str, {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}
35 The ``mode`` parameter determines how the array borders are
36 handled, where ``cval`` is the value when mode is equal to
37 'constant'.
38 cval : float, optional
39 Value to fill past edges of input if `mode` is 'constant'.
40
41 Returns
42 -------
43 smoothed_image : ndarray
44 The smoothed array
45 eroded_mask : ndarray
46 The eroded mask.
47
48 Notes
49 -----
50 This function calculates the fractional contribution of masked pixels
51 by applying the function to the mask (which gets you the fraction of
52 the pixel data that's due to significant points). We then mask the image
53 and apply the function. The resulting values will be lower by the
54 bleed-over fraction, so you can recalibrate by dividing by the function
55 on the mask to recover the effect of smoothing from just the significant
56 pixels.
57 """
58 gaussian_kwargs = dict(sigma=sigma, mode=mode, cval=cval,
59 preserve_range=False)
60 compute_bleedover = (mode == 'constant' or mask is not None)
61 float_type = _supported_float_type(image.dtype)
62 if mask is None:
63 if compute_bleedover:
64 mask = np.ones(image.shape, dtype=float_type)
65 masked_image = image
66
67 eroded_mask = np.ones(image.shape, dtype=bool)
68 eroded_mask[:1, :] = 0
69 eroded_mask[-1:, :] = 0
70 eroded_mask[:, :1] = 0
71 eroded_mask[:, -1:] = 0
72
73 else:
74 mask = mask.astype(bool, copy=False)
75 masked_image = np.zeros_like(image)
76 masked_image[mask] = image[mask]
77
78 # Make the eroded mask. Setting the border value to zero will wipe
79 # out the image edges for us.
80 s = ndi.generate_binary_structure(2, 2)
81 eroded_mask = ndi.binary_erosion(mask, s, border_value=0)
82
83 if compute_bleedover:
84 # Compute the fractional contribution of masked pixels by applying
85 # the function to the mask (which gets you the fraction of the
86 # pixel data that's due to significant points)
87 bleed_over = gaussian(mask.astype(float_type, copy=False),
88 **gaussian_kwargs) + np.finfo(float_type).eps
89
90 # Smooth the masked image
91 smoothed_image = gaussian(masked_image, **gaussian_kwargs)
92
93 # Lower the result by the bleed-over fraction, so you can
94 # recalibrate by dividing by the function on the mask to recover
95 # the effect of smoothing from just the significant pixels.
96 if compute_bleedover:
97 smoothed_image /= bleed_over
98
99 return smoothed_image, eroded_mask
100
101
102 def canny(image, sigma=1., low_threshold=None, high_threshold=None,
103 mask=None, use_quantiles=False, *, mode='constant', cval=0.0):
104 """Edge filter an image using the Canny algorithm.
105
106 Parameters
107 ----------
108 image : 2D array
109 Grayscale input image to detect edges on; can be of any dtype.
110 sigma : float, optional
111 Standard deviation of the Gaussian filter.
112 low_threshold : float, optional
113 Lower bound for hysteresis thresholding (linking edges).
114 If None, low_threshold is set to 10% of dtype's max.
115 high_threshold : float, optional
116 Upper bound for hysteresis thresholding (linking edges).
117 If None, high_threshold is set to 20% of dtype's max.
118 mask : array, dtype=bool, optional
119 Mask to limit the application of Canny to a certain area.
120 use_quantiles : bool, optional
121 If ``True`` then treat low_threshold and high_threshold as
122 quantiles of the edge magnitude image, rather than absolute
123 edge magnitude values. If ``True`` then the thresholds must be
124 in the range [0, 1].
125 mode : str, {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}
126 The ``mode`` parameter determines how the array borders are
127 handled during Gaussian filtering, where ``cval`` is the value when
128 mode is equal to 'constant'.
129 cval : float, optional
130 Value to fill past edges of input if `mode` is 'constant'.
131
132 Returns
133 -------
134 output : 2D array (image)
135 The binary edge map.
136
137 See also
138 --------
139 skimage.filters.sobel
140
141 Notes
142 -----
143 The steps of the algorithm are as follows:
144
145 * Smooth the image using a Gaussian with ``sigma`` width.
146
147 * Apply the horizontal and vertical Sobel operators to get the gradients
148 within the image. The edge strength is the norm of the gradient.
149
150 * Thin potential edges to 1-pixel wide curves. First, find the normal
151 to the edge at each point. This is done by looking at the
152 signs and the relative magnitude of the X-Sobel and Y-Sobel
153 to sort the points into 4 categories: horizontal, vertical,
154 diagonal and antidiagonal. Then look in the normal and reverse
155 directions to see if the values in either of those directions are
156 greater than the point in question. Use interpolation to get a mix of
157 points instead of picking the one that's the closest to the normal.
158
159 * Perform a hysteresis thresholding: first label all points above the
160 high threshold as edges. Then recursively label any point above the
161 low threshold that is 8-connected to a labeled point as an edge.
162
163 References
164 ----------
165 .. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
166 Pattern Analysis and Machine Intelligence, 8:679-714, 1986
167 :DOI:`10.1109/TPAMI.1986.4767851`
168 .. [2] William Green's Canny tutorial
169 https://en.wikipedia.org/wiki/Canny_edge_detector
170
171 Examples
172 --------
173 >>> from skimage import feature
174 >>> rng = np.random.default_rng()
175 >>> # Generate noisy image of a square
176 >>> im = np.zeros((256, 256))
177 >>> im[64:-64, 64:-64] = 1
178 >>> im += 0.2 * rng.random(im.shape)
179 >>> # First trial with the Canny filter, with the default smoothing
180 >>> edges1 = feature.canny(im)
181 >>> # Increase the smoothing for better results
182 >>> edges2 = feature.canny(im, sigma=3)
183
184 """
185
186 # Regarding masks, any point touching a masked point will have a gradient
187 # that is "infected" by the masked point, so it's enough to erode the
188 # mask by one and then mask the output. We also mask out the border points
189 # because who knows what lies beyond the edge of the image?
190
191 check_nD(image, 2)
192 dtype_max = dtype_limits(image, clip_negative=False)[1]
193
194 if low_threshold is None:
195 low_threshold = 0.1
196 elif use_quantiles:
197 if not(0.0 <= low_threshold <= 1.0):
198 raise ValueError("Quantile thresholds must be between 0 and 1.")
199 else:
200 low_threshold /= dtype_max
201
202 if high_threshold is None:
203 high_threshold = 0.2
204 elif use_quantiles:
205 if not(0.0 <= high_threshold <= 1.0):
206 raise ValueError("Quantile thresholds must be between 0 and 1.")
207 else:
208 high_threshold /= dtype_max
209
210 if high_threshold < low_threshold:
211 raise ValueError("low_threshold should be lower then high_threshold")
212
213 # Image filtering
214 smoothed, eroded_mask = _preprocess(image, mask, sigma, mode, cval)
215
216 # Gradient magnitude estimation
217 jsobel = ndi.sobel(smoothed, axis=1)
218 isobel = ndi.sobel(smoothed, axis=0)
219 magnitude = isobel * isobel
220 magnitude += jsobel * jsobel
221 np.sqrt(magnitude, out=magnitude)
222
223 if use_quantiles:
224 low_threshold, high_threshold = np.percentile(magnitude,
225 [100.0 * low_threshold,
226 100.0 * high_threshold])
227
228 # Non-maximum suppression
229 low_masked = _nonmaximum_suppression_bilinear(
230 isobel, jsobel, magnitude, eroded_mask, low_threshold
231 )
232
233 # Double thresholding and edge tracking
234 #
235 # Segment the low-mask, then only keep low-segments that have
236 # some high_mask component in them
237 #
238 low_mask = low_masked > 0
239 strel = np.ones((3, 3), bool)
240 labels, count = ndi.label(low_mask, strel)
241 if count == 0:
242 return low_mask
243
244 high_mask = low_mask & (low_masked >= high_threshold)
245 nonzero_sums = np.unique(labels[high_mask])
246 good_label = np.zeros((count + 1,), bool)
247 good_label[nonzero_sums] = True
248 output_mask = good_label[labels]
249 return output_mask
250
[end of skimage/feature/_canny.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/feature/_canny.py b/skimage/feature/_canny.py
--- a/skimage/feature/_canny.py
+++ b/skimage/feature/_canny.py
@@ -55,8 +55,12 @@
on the mask to recover the effect of smoothing from just the significant
pixels.
"""
- gaussian_kwargs = dict(sigma=sigma, mode=mode, cval=cval,
- preserve_range=False)
+ gaussian_kwargs = dict(
+ sigma=sigma,
+ mode=mode,
+ cval=cval,
+ preserve_range=False
+ )
compute_bleedover = (mode == 'constant' or mask is not None)
float_type = _supported_float_type(image.dtype)
if mask is None:
@@ -188,6 +192,9 @@
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
+ if np.issubdtype(image.dtype, np.int64) or np.issubdtype(image.dtype, np.uint64):
+ raise ValueError("64-bit integer images are not supported")
+
check_nD(image, 2)
dtype_max = dtype_limits(image, clip_negative=False)[1]
| {"golden_diff": "diff --git a/skimage/feature/_canny.py b/skimage/feature/_canny.py\n--- a/skimage/feature/_canny.py\n+++ b/skimage/feature/_canny.py\n@@ -55,8 +55,12 @@\n on the mask to recover the effect of smoothing from just the significant\n pixels.\n \"\"\"\n- gaussian_kwargs = dict(sigma=sigma, mode=mode, cval=cval,\n- preserve_range=False)\n+ gaussian_kwargs = dict(\n+ sigma=sigma,\n+ mode=mode,\n+ cval=cval,\n+ preserve_range=False\n+ )\n compute_bleedover = (mode == 'constant' or mask is not None)\n float_type = _supported_float_type(image.dtype)\n if mask is None:\n@@ -188,6 +192,9 @@\n # mask by one and then mask the output. We also mask out the border points\n # because who knows what lies beyond the edge of the image?\n \n+ if np.issubdtype(image.dtype, np.int64) or np.issubdtype(image.dtype, np.uint64):\n+ raise ValueError(\"64-bit integer images are not supported\")\n+\n check_nD(image, 2)\n dtype_max = dtype_limits(image, clip_negative=False)[1]\n", "issue": "New canny implementation silently fails with integer images.\n### Description:\n\nThe new `skimage.feature.canny` implementation silently fails if given an integer image. This worked on `scikit-image<=0.19`, and no longer works with `scikit-image=0.20`. The documentation says that any dtype should work:\r\n```\r\n image : 2D array\r\n Grayscale input image to detect edges on; can be of any dtype.\r\n```\r\n\n\n### Way to reproduce:\n\n```\r\nfrom skimage.feature import canny\r\nimport numpy as np\r\n\r\nim = np.zeros((100, 100))\r\nim[0: 50, 0: 50] = 1.0\r\nprint(\"Edge pixels with float input: \", canny(im, low_threshold=0, high_threshold=1).sum())\r\nprint(\"Edge pixels with int input: \", canny(im.astype(np.int64), low_threshold=0, high_threshold=1).sum())\r\n```\r\nThis prints on new skimage (0.20):\r\n```\r\nEdge pixels with float input: 182\r\nEdge pixels with int input: 0\r\n```\r\nAnd on old skimage (0.19):\r\n```\r\nEdge pixels with float input: 144\r\nEdge pixels with int input: 144\r\n```\r\n\r\nAs I write this test case I also need to ask ... why did the number of pixels change?\n\n### Version information:\n\n```Shell\n3.10.10 | packaged by conda-forge | (main, Mar 24 2023, 20:08:06) [GCC 11.3.0]\r\nLinux-3.10.0-1160.88.1.el7.x86_64-x86_64-with-glibc2.17\r\nscikit-image version: 0.20.0\r\nnumpy version: 1.23.5\n```\n\n", "before_files": [{"content": "\"\"\"\ncanny.py - Canny Edge detector\n\nReference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.\n Pattern Analysis and Machine Intelligence, 8:679-714, 1986\n\"\"\"\n\nimport numpy as np\nimport scipy.ndimage as ndi\n\nfrom ..util.dtype import dtype_limits\nfrom .._shared.filters import gaussian\nfrom .._shared.utils import _supported_float_type, check_nD\nfrom ._canny_cy import _nonmaximum_suppression_bilinear\n\n\ndef _preprocess(image, mask, sigma, mode, cval):\n \"\"\"Generate a smoothed image and an eroded mask.\n\n The image is smoothed using a gaussian filter ignoring masked\n pixels and the mask is eroded.\n\n Parameters\n ----------\n image : array\n Image to be smoothed.\n mask : array\n Mask with 1's for significant pixels, 0's for masked pixels.\n sigma : scalar or sequence of scalars\n Standard deviation for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for\n all axes.\n mode : str, {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}\n The ``mode`` parameter determines how the array borders are\n handled, where ``cval`` is the value when mode is equal to\n 'constant'.\n cval : float, optional\n Value to fill past edges of input if `mode` is 'constant'.\n\n Returns\n -------\n smoothed_image : ndarray\n The smoothed array\n eroded_mask : ndarray\n The eroded mask.\n\n Notes\n -----\n This function calculates the fractional contribution of masked pixels\n by applying the function to the mask (which gets you the fraction of\n the pixel data that's due to significant points). We then mask the image\n and apply the function. The resulting values will be lower by the\n bleed-over fraction, so you can recalibrate by dividing by the function\n on the mask to recover the effect of smoothing from just the significant\n pixels.\n \"\"\"\n gaussian_kwargs = dict(sigma=sigma, mode=mode, cval=cval,\n preserve_range=False)\n compute_bleedover = (mode == 'constant' or mask is not None)\n float_type = _supported_float_type(image.dtype)\n if mask is None:\n if compute_bleedover:\n mask = np.ones(image.shape, dtype=float_type)\n masked_image = image\n\n eroded_mask = np.ones(image.shape, dtype=bool)\n eroded_mask[:1, :] = 0\n eroded_mask[-1:, :] = 0\n eroded_mask[:, :1] = 0\n eroded_mask[:, -1:] = 0\n\n else:\n mask = mask.astype(bool, copy=False)\n masked_image = np.zeros_like(image)\n masked_image[mask] = image[mask]\n\n # Make the eroded mask. Setting the border value to zero will wipe\n # out the image edges for us.\n s = ndi.generate_binary_structure(2, 2)\n eroded_mask = ndi.binary_erosion(mask, s, border_value=0)\n\n if compute_bleedover:\n # Compute the fractional contribution of masked pixels by applying\n # the function to the mask (which gets you the fraction of the\n # pixel data that's due to significant points)\n bleed_over = gaussian(mask.astype(float_type, copy=False),\n **gaussian_kwargs) + np.finfo(float_type).eps\n\n # Smooth the masked image\n smoothed_image = gaussian(masked_image, **gaussian_kwargs)\n\n # Lower the result by the bleed-over fraction, so you can\n # recalibrate by dividing by the function on the mask to recover\n # the effect of smoothing from just the significant pixels.\n if compute_bleedover:\n smoothed_image /= bleed_over\n\n return smoothed_image, eroded_mask\n\n\ndef canny(image, sigma=1., low_threshold=None, high_threshold=None,\n mask=None, use_quantiles=False, *, mode='constant', cval=0.0):\n \"\"\"Edge filter an image using the Canny algorithm.\n\n Parameters\n ----------\n image : 2D array\n Grayscale input image to detect edges on; can be of any dtype.\n sigma : float, optional\n Standard deviation of the Gaussian filter.\n low_threshold : float, optional\n Lower bound for hysteresis thresholding (linking edges).\n If None, low_threshold is set to 10% of dtype's max.\n high_threshold : float, optional\n Upper bound for hysteresis thresholding (linking edges).\n If None, high_threshold is set to 20% of dtype's max.\n mask : array, dtype=bool, optional\n Mask to limit the application of Canny to a certain area.\n use_quantiles : bool, optional\n If ``True`` then treat low_threshold and high_threshold as\n quantiles of the edge magnitude image, rather than absolute\n edge magnitude values. If ``True`` then the thresholds must be\n in the range [0, 1].\n mode : str, {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}\n The ``mode`` parameter determines how the array borders are\n handled during Gaussian filtering, where ``cval`` is the value when\n mode is equal to 'constant'.\n cval : float, optional\n Value to fill past edges of input if `mode` is 'constant'.\n\n Returns\n -------\n output : 2D array (image)\n The binary edge map.\n\n See also\n --------\n skimage.filters.sobel\n\n Notes\n -----\n The steps of the algorithm are as follows:\n\n * Smooth the image using a Gaussian with ``sigma`` width.\n\n * Apply the horizontal and vertical Sobel operators to get the gradients\n within the image. The edge strength is the norm of the gradient.\n\n * Thin potential edges to 1-pixel wide curves. First, find the normal\n to the edge at each point. This is done by looking at the\n signs and the relative magnitude of the X-Sobel and Y-Sobel\n to sort the points into 4 categories: horizontal, vertical,\n diagonal and antidiagonal. Then look in the normal and reverse\n directions to see if the values in either of those directions are\n greater than the point in question. Use interpolation to get a mix of\n points instead of picking the one that's the closest to the normal.\n\n * Perform a hysteresis thresholding: first label all points above the\n high threshold as edges. Then recursively label any point above the\n low threshold that is 8-connected to a labeled point as an edge.\n\n References\n ----------\n .. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.\n Pattern Analysis and Machine Intelligence, 8:679-714, 1986\n :DOI:`10.1109/TPAMI.1986.4767851`\n .. [2] William Green's Canny tutorial\n https://en.wikipedia.org/wiki/Canny_edge_detector\n\n Examples\n --------\n >>> from skimage import feature\n >>> rng = np.random.default_rng()\n >>> # Generate noisy image of a square\n >>> im = np.zeros((256, 256))\n >>> im[64:-64, 64:-64] = 1\n >>> im += 0.2 * rng.random(im.shape)\n >>> # First trial with the Canny filter, with the default smoothing\n >>> edges1 = feature.canny(im)\n >>> # Increase the smoothing for better results\n >>> edges2 = feature.canny(im, sigma=3)\n\n \"\"\"\n\n # Regarding masks, any point touching a masked point will have a gradient\n # that is \"infected\" by the masked point, so it's enough to erode the\n # mask by one and then mask the output. We also mask out the border points\n # because who knows what lies beyond the edge of the image?\n\n check_nD(image, 2)\n dtype_max = dtype_limits(image, clip_negative=False)[1]\n\n if low_threshold is None:\n low_threshold = 0.1\n elif use_quantiles:\n if not(0.0 <= low_threshold <= 1.0):\n raise ValueError(\"Quantile thresholds must be between 0 and 1.\")\n else:\n low_threshold /= dtype_max\n\n if high_threshold is None:\n high_threshold = 0.2\n elif use_quantiles:\n if not(0.0 <= high_threshold <= 1.0):\n raise ValueError(\"Quantile thresholds must be between 0 and 1.\")\n else:\n high_threshold /= dtype_max\n\n if high_threshold < low_threshold:\n raise ValueError(\"low_threshold should be lower then high_threshold\")\n\n # Image filtering\n smoothed, eroded_mask = _preprocess(image, mask, sigma, mode, cval)\n\n # Gradient magnitude estimation\n jsobel = ndi.sobel(smoothed, axis=1)\n isobel = ndi.sobel(smoothed, axis=0)\n magnitude = isobel * isobel\n magnitude += jsobel * jsobel\n np.sqrt(magnitude, out=magnitude)\n\n if use_quantiles:\n low_threshold, high_threshold = np.percentile(magnitude,\n [100.0 * low_threshold,\n 100.0 * high_threshold])\n\n # Non-maximum suppression\n low_masked = _nonmaximum_suppression_bilinear(\n isobel, jsobel, magnitude, eroded_mask, low_threshold\n )\n\n # Double thresholding and edge tracking\n #\n # Segment the low-mask, then only keep low-segments that have\n # some high_mask component in them\n #\n low_mask = low_masked > 0\n strel = np.ones((3, 3), bool)\n labels, count = ndi.label(low_mask, strel)\n if count == 0:\n return low_mask\n\n high_mask = low_mask & (low_masked >= high_threshold)\n nonzero_sums = np.unique(labels[high_mask])\n good_label = np.zeros((count + 1,), bool)\n good_label[nonzero_sums] = True\n output_mask = good_label[labels]\n return output_mask\n", "path": "skimage/feature/_canny.py"}]} | 3,913 | 292 |
gh_patches_debug_36746 | rasdani/github-patches | git_diff | DataDog__dd-agent-2965 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support ECDSA for ssh_check
ssh_check.py is not support ECDSA ssh key.
paramiko is support ECDSA ssh key.
http://docs.paramiko.org/en/1.17/api/keys.html#module-paramiko.ecdsakey
I changes ssh_key.py, but It's not working.
```
2016-05-08 17:56:27 JST | ERROR | dd.collector | checks.ssh_check(__init__.py:763) | Check 'ssh_check' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 746, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/agent/checks.d/ssh_check.py", line 70, in check
password=conf.password, pkey=private_key)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py", line 307, in connect
look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py", line 519, in _auth
raise saved_exception
AuthenticationException: Authentication failed.
```
</issue>
<code>
[start of checks.d/ssh_check.py]
1 # (C) Datadog, Inc. 2010-2016
2 # All rights reserved
3 # Licensed under Simplified BSD License (see LICENSE)
4
5 # stdlib
6 from collections import namedtuple
7 import time
8
9 # 3p
10 import paramiko
11
12 # project
13 from checks import AgentCheck
14
15
16 class CheckSSH(AgentCheck):
17
18 OPTIONS = [
19 ('host', True, None, str),
20 ('port', False, 22, int),
21 ('username', True, None, str),
22 ('password', False, None, str),
23 ('private_key_file', False, None, str),
24 ('sftp_check', False, True, bool),
25 ('add_missing_keys', False, False, bool),
26 ]
27
28 Config = namedtuple('Config', [
29 'host',
30 'port',
31 'username',
32 'password',
33 'private_key_file',
34 'sftp_check',
35 'add_missing_keys',
36 ])
37
38 def _load_conf(self, instance):
39 params = []
40 for option, required, default, expected_type in self.OPTIONS:
41 value = instance.get(option)
42 if required and (not value or type(value)) != expected_type :
43 raise Exception("Please specify a valid {0}".format(option))
44
45 if value is None or type(value) != expected_type:
46 self.log.debug("Bad or missing value for {0} parameter. Using default".format(option))
47 value = default
48
49 params.append(value)
50 return self.Config._make(params)
51
52 def check(self, instance):
53 conf = self._load_conf(instance)
54 tags = ["instance:{0}-{1}".format(conf.host, conf.port)]
55
56 private_key = None
57 try:
58 private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
59 except IOError:
60 self.warning("Unable to find private key file: {}".format(conf.private_key_file))
61 except paramiko.ssh_exception.PasswordRequiredException:
62 self.warning("Private key file is encrypted but no password was given")
63 except paramiko.ssh_exception.SSHException:
64 self.warning("Private key file is invalid")
65
66 client = paramiko.SSHClient()
67 if conf.add_missing_keys:
68 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
69 client.load_system_host_keys()
70
71 exception_message = None
72 #Service Availability to check status of SSH
73 try:
74 client.connect(conf.host, port=conf.port, username=conf.username,
75 password=conf.password, pkey=private_key)
76 self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
77 message=exception_message)
78
79 except Exception as e:
80 exception_message = str(e)
81 status = AgentCheck.CRITICAL
82 self.service_check('ssh.can_connect', status, tags=tags,
83 message=exception_message)
84 if conf.sftp_check:
85 self.service_check('sftp.can_connect', status, tags=tags,
86 message=exception_message)
87 raise
88
89 #Service Availability to check status of SFTP
90 if conf.sftp_check:
91 try:
92 sftp = client.open_sftp()
93 #Check response time of SFTP
94 start_time = time.time()
95 sftp.listdir('.')
96 status = AgentCheck.OK
97 end_time = time.time()
98 time_taken = end_time - start_time
99 self.gauge('sftp.response_time', time_taken, tags=tags)
100
101 except Exception as e:
102 exception_message = str(e)
103 status = AgentCheck.CRITICAL
104
105 if exception_message is None:
106 exception_message = "No errors occured"
107
108 self.service_check('sftp.can_connect', status, tags=tags,
109 message=exception_message)
110
[end of checks.d/ssh_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py
--- a/checks.d/ssh_check.py
+++ b/checks.d/ssh_check.py
@@ -21,6 +21,7 @@
('username', True, None, str),
('password', False, None, str),
('private_key_file', False, None, str),
+ ('private_key_type', False, 'rsa', str),
('sftp_check', False, True, bool),
('add_missing_keys', False, False, bool),
]
@@ -31,6 +32,7 @@
'username',
'password',
'private_key_file',
+ 'private_key_type',
'sftp_check',
'add_missing_keys',
])
@@ -55,7 +57,10 @@
private_key = None
try:
- private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
+ if conf.private_key_type == 'ecdsa':
+ private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)
+ else:
+ private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
except IOError:
self.warning("Unable to find private key file: {}".format(conf.private_key_file))
except paramiko.ssh_exception.PasswordRequiredException:
@@ -69,11 +74,11 @@
client.load_system_host_keys()
exception_message = None
- #Service Availability to check status of SSH
+ # Service Availability to check status of SSH
try:
client.connect(conf.host, port=conf.port, username=conf.username,
password=conf.password, pkey=private_key)
- self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
message=exception_message)
except Exception as e:
@@ -86,7 +91,7 @@
message=exception_message)
raise
- #Service Availability to check status of SFTP
+ # Service Availability to check status of SFTP
if conf.sftp_check:
try:
sftp = client.open_sftp()
| {"golden_diff": "diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py\n--- a/checks.d/ssh_check.py\n+++ b/checks.d/ssh_check.py\n@@ -21,6 +21,7 @@\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n+ ('private_key_type', False, 'rsa', str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n@@ -31,6 +32,7 @@\n 'username',\n 'password',\n 'private_key_file',\n+ 'private_key_type',\n 'sftp_check',\n 'add_missing_keys',\n ])\n@@ -55,7 +57,10 @@\n \n private_key = None\n try:\n- private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n+ if conf.private_key_type == 'ecdsa':\n+ private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)\n+ else:\n+ private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n@@ -69,11 +74,11 @@\n client.load_system_host_keys()\n \n exception_message = None\n- #Service Availability to check status of SSH\n+ # Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n- self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n \n except Exception as e:\n@@ -86,7 +91,7 @@\n message=exception_message)\n raise\n \n- #Service Availability to check status of SFTP\n+ # Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n", "issue": "Support ECDSA for ssh_check\nssh_check.py is not support ECDSA ssh key.\nparamiko is support ECDSA ssh key.\nhttp://docs.paramiko.org/en/1.17/api/keys.html#module-paramiko.ecdsakey\n\nI changes ssh_key.py, but It's not working.\n\n```\n2016-05-08 17:56:27 JST | ERROR | dd.collector | checks.ssh_check(__init__.py:763) | Check 'ssh_check' instance #0 failed\nTraceback (most recent call last):\n File \"/opt/datadog-agent/agent/checks/__init__.py\", line 746, in run\n self.check(copy.deepcopy(instance))\n File \"/opt/datadog-agent/agent/checks.d/ssh_check.py\", line 70, in check\n password=conf.password, pkey=private_key)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py\", line 307, in connect\n look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py\", line 519, in _auth\n raise saved_exception\nAuthenticationException: Authentication failed.\n```\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom collections import namedtuple\nimport time\n\n# 3p\nimport paramiko\n\n# project\nfrom checks import AgentCheck\n\n\nclass CheckSSH(AgentCheck):\n\n OPTIONS = [\n ('host', True, None, str),\n ('port', False, 22, int),\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n\n Config = namedtuple('Config', [\n 'host',\n 'port',\n 'username',\n 'password',\n 'private_key_file',\n 'sftp_check',\n 'add_missing_keys',\n ])\n\n def _load_conf(self, instance):\n params = []\n for option, required, default, expected_type in self.OPTIONS:\n value = instance.get(option)\n if required and (not value or type(value)) != expected_type :\n raise Exception(\"Please specify a valid {0}\".format(option))\n\n if value is None or type(value) != expected_type:\n self.log.debug(\"Bad or missing value for {0} parameter. Using default\".format(option))\n value = default\n\n params.append(value)\n return self.Config._make(params)\n\n def check(self, instance):\n conf = self._load_conf(instance)\n tags = [\"instance:{0}-{1}\".format(conf.host, conf.port)]\n\n private_key = None\n try:\n private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n self.warning(\"Private key file is encrypted but no password was given\")\n except paramiko.ssh_exception.SSHException:\n self.warning(\"Private key file is invalid\")\n\n client = paramiko.SSHClient()\n if conf.add_missing_keys:\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.load_system_host_keys()\n\n exception_message = None\n #Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n self.service_check('ssh.can_connect', status, tags=tags,\n message=exception_message)\n if conf.sftp_check:\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n raise\n\n #Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n #Check response time of SFTP\n start_time = time.time()\n sftp.listdir('.')\n status = AgentCheck.OK\n end_time = time.time()\n time_taken = end_time - start_time\n self.gauge('sftp.response_time', time_taken, tags=tags)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n\n if exception_message is None:\n exception_message = \"No errors occured\"\n\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n", "path": "checks.d/ssh_check.py"}]} | 1,858 | 496 |
gh_patches_debug_2494 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-951 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Projectcontainer active projects count broken
https://mein.berlin.de/projects/stadtforum-berlin-wohnen/
shows `7 of 4` active projects.
</issue>
<code>
[start of meinberlin/apps/projectcontainers/models.py]
1 from django.db import models
2 from django.utils import timezone
3 from django.utils.translation import ugettext_lazy as _
4
5 from adhocracy4.projects import models as project_models
6
7
8 class ProjectContainer(project_models.Project):
9 projects = models.ManyToManyField(
10 project_models.Project,
11 related_name='containers',
12 verbose_name=_('Projects')
13 )
14
15 @property
16 def not_archived_projects(self):
17 return self.projects.filter(is_archived=False)
18
19 @property
20 def active_projects(self):
21 now = timezone.now()
22 return self.projects.filter(
23 module__phase__start_date__lte=now,
24 module__phase__end_date__gt=now)
25
26 @property
27 def phases(self):
28 from adhocracy4.phases import models as phase_models
29 return phase_models.Phase.objects\
30 .filter(module__project__containers__id=self.id)
31
[end of meinberlin/apps/projectcontainers/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/projectcontainers/models.py b/meinberlin/apps/projectcontainers/models.py
--- a/meinberlin/apps/projectcontainers/models.py
+++ b/meinberlin/apps/projectcontainers/models.py
@@ -21,7 +21,7 @@
now = timezone.now()
return self.projects.filter(
module__phase__start_date__lte=now,
- module__phase__end_date__gt=now)
+ module__phase__end_date__gt=now).distinct()
@property
def phases(self):
| {"golden_diff": "diff --git a/meinberlin/apps/projectcontainers/models.py b/meinberlin/apps/projectcontainers/models.py\n--- a/meinberlin/apps/projectcontainers/models.py\n+++ b/meinberlin/apps/projectcontainers/models.py\n@@ -21,7 +21,7 @@\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n- module__phase__end_date__gt=now)\n+ module__phase__end_date__gt=now).distinct()\n \n @property\n def phases(self):\n", "issue": "Projectcontainer active projects count broken\nhttps://mein.berlin.de/projects/stadtforum-berlin-wohnen/\r\n\r\nshows `7 of 4` active projects.\n", "before_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects import models as project_models\n\n\nclass ProjectContainer(project_models.Project):\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='containers',\n verbose_name=_('Projects')\n )\n\n @property\n def not_archived_projects(self):\n return self.projects.filter(is_archived=False)\n\n @property\n def active_projects(self):\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n module__phase__end_date__gt=now)\n\n @property\n def phases(self):\n from adhocracy4.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project__containers__id=self.id)\n", "path": "meinberlin/apps/projectcontainers/models.py"}]} | 815 | 122 |
gh_patches_debug_20973 | rasdani/github-patches | git_diff | cal-itp__benefits-1793 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SBMTD Mobility Pass Enrollment: Small adjustments to application copy
Incorporating suggested feedback from the engineering team to improve application copy for the SBMTD Mobility Pass enrollment pathway.
A couple notes:
- We will leave "Mobility Pass" untranslated and capitalized in the Spanish copy, as this is the proper name of the product in English at SBMTD.
- In some places, we used "four-digit" rather than "4-digit" when referring to the Mobility Pass number. We will standardize on "4-digit" everywhere. This style is more concise and consistent with the implementation for MST's Courtesy Card.
## Acceptance Criteria
- [x] English Copy and Spanish Copy in the Mobility Pass pathway in the Benefits app matches the copy in the SBMTD tab of the [copy source spreadsheet](https://docs.google.com/spreadsheets/d/1_Gi_YbJr4ZuXCOsnOWaewvHqUO1nC1nKqiVDHvw0118/edit?usp=sharing).
</issue>
<code>
[start of benefits/eligibility/forms.py]
1 """
2 The eligibility application: Form definition for the eligibility verification flow.
3 """
4 import logging
5
6 from django import forms
7 from django.utils.translation import gettext_lazy as _
8
9 from benefits.core import models, recaptcha, widgets
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class EligibilityVerifierSelectionForm(forms.Form):
16 """Form to capture eligibility verifier selection."""
17
18 action_url = "eligibility:index"
19 id = "form-verifier-selection"
20 method = "POST"
21
22 verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect)
23 # sets label to empty string so the radio_select template can override the label style
24 submit_value = _("Choose this Benefit")
25
26 def __init__(self, agency: models.TransitAgency, *args, **kwargs):
27 super().__init__(*args, **kwargs)
28 verifiers = agency.eligibility_verifiers.filter(active=True)
29
30 self.classes = "col-lg-8"
31 # second element is not used since we render the whole label using selection_label_template,
32 # therefore set to None
33 self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
34 self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
35
36 def clean(self):
37 if not recaptcha.verify(self.data):
38 raise forms.ValidationError("reCAPTCHA failed")
39
40
41 class EligibilityVerificationForm(forms.Form):
42 """Form to collect eligibility verification details."""
43
44 action_url = "eligibility:confirm"
45 id = "form-eligibility-verification"
46 method = "POST"
47
48 submit_value = _("Check eligibility")
49 submitting_value = _("Checking")
50
51 _error_messages = {
52 "invalid": _("Check your input. The format looks wrong."),
53 "missing": _("This field is required."),
54 }
55
56 def __init__(
57 self,
58 title,
59 headline,
60 blurb,
61 name_label,
62 name_placeholder,
63 name_help_text,
64 sub_label,
65 sub_placeholder,
66 sub_help_text,
67 name_max_length=None,
68 sub_input_mode=None,
69 sub_max_length=None,
70 sub_pattern=None,
71 *args,
72 **kwargs,
73 ):
74 """Initialize a new EligibilityVerifier form.
75
76 Args:
77 title (str): The page (i.e. tab) title for the form's page.
78
79 headline (str): The <h1> on the form's page.
80
81 blurb (str): Intro <p> on the form's page.
82
83 name_label (str): Label for the name form field.
84
85 name_placeholder (str): Field placeholder for the name form field.
86
87 name_help_text (str): Extra help text for the name form field.
88
89 sub_label (str): Label for the sub form field.
90
91 sub_placeholder (str): Field placeholder for the sub form field.
92
93 sub_help_text (str): Extra help text for the sub form field.
94
95 name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier
96
97 sub_input_mode (str): Input mode can be "numeric", "tel", "search", etc. to override default "text" keyboard on
98 mobile devices
99
100 sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier
101
102 sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier
103
104 Extra args and kwargs are passed through to the underlying django.forms.Form.
105 """
106 super().__init__(auto_id=True, label_suffix="", *args, **kwargs)
107
108 self.title = title
109 self.headline = headline
110 self.blurb = blurb
111
112 self.classes = "col-lg-6"
113 sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)
114 if sub_pattern:
115 sub_widget.attrs.update({"pattern": sub_pattern})
116 if sub_input_mode:
117 sub_widget.attrs.update({"inputmode": sub_input_mode})
118 if sub_max_length:
119 sub_widget.attrs.update({"maxlength": sub_max_length})
120
121 self.fields["sub"] = forms.CharField(
122 label=sub_label,
123 widget=sub_widget,
124 help_text=sub_help_text,
125 )
126
127 name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)
128 if name_max_length:
129 name_widget.attrs.update({"maxlength": name_max_length})
130
131 self.fields["name"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)
132
133 def clean(self):
134 if not recaptcha.verify(self.data):
135 raise forms.ValidationError("reCAPTCHA failed")
136
137
138 class MSTCourtesyCard(EligibilityVerificationForm):
139 """EligibilityVerification form for the MST Courtesy Card."""
140
141 def __init__(self, *args, **kwargs):
142 super().__init__(
143 title=_("Agency card information"),
144 headline=_("Let’s see if we can confirm your eligibility."),
145 blurb=_("Please input your Courtesy Card number and last name below to confirm your eligibility."),
146 name_label=_("Last name (as it appears on Courtesy Card)"),
147 name_placeholder="Garcia",
148 name_help_text=_("We use this to help confirm your Courtesy Card."),
149 sub_label=_("MST Courtesy Card number"),
150 sub_help_text=_("This is a 5-digit number on the front and back of your card."),
151 sub_placeholder="12345",
152 name_max_length=255,
153 sub_input_mode="numeric",
154 sub_max_length=5,
155 sub_pattern=r"\d{5}",
156 *args,
157 **kwargs,
158 )
159
160
161 class SBMTDMobilityPass(EligibilityVerificationForm):
162 """EligibilityVerification form for the SBMTD Mobility Pass."""
163
164 def __init__(self, *args, **kwargs):
165 super().__init__(
166 title=_("Agency card information"),
167 headline=_("Let’s see if we can confirm your eligibility."),
168 blurb=_("Please input your Mobility Pass number and last name below to confirm your eligibility."),
169 name_label=_("Last name (as it appears on Mobility Pass)"),
170 name_placeholder="Garcia",
171 name_help_text=_("We use this to help confirm your Mobility Pass."),
172 sub_label=_("SBMTD Mobility Pass number"),
173 sub_help_text=_("This is a 4-digit number on the front and back of your card."),
174 sub_placeholder="1234",
175 name_max_length=255,
176 sub_input_mode="numeric",
177 sub_max_length=4,
178 sub_pattern=r"\d{4}",
179 *args,
180 **kwargs,
181 )
182
[end of benefits/eligibility/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py
--- a/benefits/eligibility/forms.py
+++ b/benefits/eligibility/forms.py
@@ -166,11 +166,11 @@
title=_("Agency card information"),
headline=_("Let’s see if we can confirm your eligibility."),
blurb=_("Please input your Mobility Pass number and last name below to confirm your eligibility."),
- name_label=_("Last name (as it appears on Mobility Pass)"),
+ name_label=_("Last name (as it appears on Mobility Pass card)"),
name_placeholder="Garcia",
name_help_text=_("We use this to help confirm your Mobility Pass."),
sub_label=_("SBMTD Mobility Pass number"),
- sub_help_text=_("This is a 4-digit number on the front and back of your card."),
+ sub_help_text=_("This is a 4-digit number on the back of your card."),
sub_placeholder="1234",
name_max_length=255,
sub_input_mode="numeric",
| {"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -166,11 +166,11 @@\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Mobility Pass number and last name below to confirm your eligibility.\"),\n- name_label=_(\"Last name (as it appears on Mobility Pass)\"),\n+ name_label=_(\"Last name (as it appears on Mobility Pass card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Mobility Pass.\"),\n sub_label=_(\"SBMTD Mobility Pass number\"),\n- sub_help_text=_(\"This is a 4-digit number on the front and back of your card.\"),\n+ sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n", "issue": "SBMTD Mobility Pass Enrollment: Small adjustments to application copy\nIncorporating suggested feedback from the engineering team to improve application copy for the SBMTD Mobility Pass enrollment pathway.\r\n\r\nA couple notes:\r\n- We will leave \"Mobility Pass\" untranslated and capitalized in the Spanish copy, as this is the proper name of the product in English at SBMTD. \r\n- In some places, we used \"four-digit\" rather than \"4-digit\" when referring to the Mobility Pass number. We will standardize on \"4-digit\" everywhere. This style is more concise and consistent with the implementation for MST's Courtesy Card.\r\n\r\n## Acceptance Criteria\r\n\r\n- [x] English Copy and Spanish Copy in the Mobility Pass pathway in the Benefits app matches the copy in the SBMTD tab of the [copy source spreadsheet](https://docs.google.com/spreadsheets/d/1_Gi_YbJr4ZuXCOsnOWaewvHqUO1nC1nKqiVDHvw0118/edit?usp=sharing). \r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"Choose this Benefit\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.eligibility_verifiers.filter(active=True)\n\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"Check eligibility\")\n submitting_value = _(\"Checking\")\n\n _error_messages = {\n \"invalid\": _(\"Check your input. The format looks wrong.\"),\n \"missing\": _(\"This field is required.\"),\n }\n\n def __init__(\n self,\n title,\n headline,\n blurb,\n name_label,\n name_placeholder,\n name_help_text,\n sub_label,\n sub_placeholder,\n sub_help_text,\n name_max_length=None,\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n *args,\n **kwargs,\n ):\n \"\"\"Initialize a new EligibilityVerifier form.\n\n Args:\n title (str): The page (i.e. tab) title for the form's page.\n\n headline (str): The <h1> on the form's page.\n\n blurb (str): Intro <p> on the form's page.\n\n name_label (str): Label for the name form field.\n\n name_placeholder (str): Field placeholder for the name form field.\n\n name_help_text (str): Extra help text for the name form field.\n\n sub_label (str): Label for the sub form field.\n\n sub_placeholder (str): Field placeholder for the sub form field.\n\n sub_help_text (str): Extra help text for the sub form field.\n\n name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier\n\n sub_input_mode (str): Input mode can be \"numeric\", \"tel\", \"search\", etc. to override default \"text\" keyboard on\n mobile devices\n\n sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier\n\n sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier\n\n Extra args and kwargs are passed through to the underlying django.forms.Form.\n \"\"\"\n super().__init__(auto_id=True, label_suffix=\"\", *args, **kwargs)\n\n self.title = title\n self.headline = headline\n self.blurb = blurb\n\n self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)\n if sub_pattern:\n sub_widget.attrs.update({\"pattern\": sub_pattern})\n if sub_input_mode:\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n widget=sub_widget,\n help_text=sub_help_text,\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass MSTCourtesyCard(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the MST Courtesy Card.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Courtesy Card number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Courtesy Card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Courtesy Card.\"),\n sub_label=_(\"MST Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n *args,\n **kwargs,\n )\n\n\nclass SBMTDMobilityPass(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the SBMTD Mobility Pass.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Mobility Pass number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Mobility Pass)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Mobility Pass.\"),\n sub_label=_(\"SBMTD Mobility Pass number\"),\n sub_help_text=_(\"This is a 4-digit number on the front and back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n *args,\n **kwargs,\n )\n", "path": "benefits/eligibility/forms.py"}]} | 2,597 | 233 |
gh_patches_debug_589 | rasdani/github-patches | git_diff | pex-tool__pex-1377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.43
On the docket:
+ [x] Support more verbose output for interpreter info. (#1347)
+ [x] Fix Pex emitting warnings about its Pip PEX venv. (#1351)
+ [x] Fix execution modes. (#1353)
+ [x] Warn for PEX env vars unsupported by venv. (#1354)
+ [x] Do not suppress pex output in bidst_pex (#1358)
+ [x] Using --platform manylinux2010 includes pyarrow wheel for manylinux2014 #1355
+ [x] Fix --no-manylinux. #1365
+ [x] Environment markers are incorrectly evaluated for --platform resolves. #1366
+ [x] Pex probes wheel metadata incorrectly. #1375
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.42"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.42"
+__version__ = "2.1.43"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.42\"\n+__version__ = \"2.1.43\"\n", "issue": "Release 2.1.43\nOn the docket:\r\n+ [x] Support more verbose output for interpreter info. (#1347) \r\n+ [x] Fix Pex emitting warnings about its Pip PEX venv. (#1351)\r\n+ [x] Fix execution modes. (#1353) \r\n+ [x] Warn for PEX env vars unsupported by venv. (#1354)\r\n+ [x] Do not suppress pex output in bidst_pex (#1358)\r\n+ [x] Using --platform manylinux2010 includes pyarrow wheel for manylinux2014 #1355\r\n+ [x] Fix --no-manylinux. #1365\r\n+ [x] Environment markers are incorrectly evaluated for --platform resolves. #1366\r\n+ [x] Pex probes wheel metadata incorrectly. #1375\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.42\"\n", "path": "pex/version.py"}]} | 778 | 96 |
gh_patches_debug_38386 | rasdani/github-patches | git_diff | pymeasure__pymeasure-782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
How to test adapter.connection calls
Some instruments call the adapter's `connection`property, like #697 which makes ProtocolAdapter fail. Instruments should not be adjusted to make the ProtocolAdapter work.
The problem is, that you are not supposed to use normally the adapter's connection directly, because the `connection` depends on the specific adapter type. If an instrument calls the connection, the ProtocolAdapter fails. Instruments should not be adjusted to make the ProtocolAdapter work.
Two solutions:
- A generic mock connection seems good, because that would care for many use cases. Otherwise we had to add several methods (clear, close, timeout property, get_stb...) individually.
- Or we could add timeout as a property to the `Adapter`, which will call its connection depending on the Adapter type (ProtocolAdapter stores it as a variable). Similarly the "read_stb" and `clear` etc.
</issue>
<code>
[start of pymeasure/adapters/protocol.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2022 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import logging
26
27 from .adapter import Adapter
28
29 log = logging.getLogger(__name__)
30 log.addHandler(logging.NullHandler())
31
32
33 def to_bytes(command):
34 """Change `command` to a bytes object"""
35 if isinstance(command, (bytes, bytearray)):
36 return command
37 elif command is None:
38 return b""
39 elif isinstance(command, str):
40 return command.encode("utf-8")
41 elif isinstance(command, (list, tuple)):
42 return bytes(command)
43 elif isinstance(command, (int, float)):
44 return str(command).encode("utf-8")
45 raise TypeError(f"Invalid input of type {type(command).__name__}.")
46
47
48 class ProtocolAdapter(Adapter):
49 """ Adapter class for testing the command exchange protocol without instrument hardware.
50
51 This adapter is primarily meant for use within :func:`pymeasure.test.expected_protocol()`.
52
53 :param list comm_pairs: List of "reference" message pair tuples. The first element is
54 what is sent to the instrument, the second one is the returned message.
55 'None' indicates that a pair member (write or read) does not exist.
56 The messages do **not** include the termination characters.
57 """
58
59 def __init__(self, comm_pairs=[], preprocess_reply=None, **kwargs):
60 """Generate the adapter and initialize internal buffers."""
61 super().__init__(preprocess_reply=preprocess_reply, **kwargs)
62 assert isinstance(comm_pairs, (list, tuple)), (
63 "Parameter comm_pairs has to be a list or tuple.")
64 for pair in comm_pairs:
65 if len(pair) != 2:
66 raise ValueError(f'Comm_pairs element {pair} does not have two elements!')
67 self._read_buffer = b""
68 self._write_buffer = b""
69 self.comm_pairs = comm_pairs
70 self._index = 0
71
72 def _write(self, command, **kwargs):
73 """Compare the command with the expected one and fill the read."""
74 self._write_bytes(to_bytes(command))
75 assert self._write_buffer == b"", (
76 f"Written bytes '{self._write_buffer}' do not match expected "
77 f"'{self.comm_pairs[self._index][0]}'.")
78
79 def _write_bytes(self, content, **kwargs):
80 """Write the bytes `content`. If a command is full, fill the read."""
81 self._write_buffer += content
82 try:
83 p_write, p_read = self.comm_pairs[self._index]
84 except IndexError:
85 raise ValueError(f"No communication pair left to write {content}.")
86 if self._write_buffer == to_bytes(p_write):
87 assert self._read_buffer == b"", (
88 f"Unread response '{self._read_buffer}' present when writing. "
89 "Maybe a property's 'check_set_errors' is not accounted for, "
90 "a read() call is missing in a method, or the defined protocol is incorrect?"
91 )
92 # Clear the write buffer
93 self._write_buffer = b""
94 self._read_buffer = to_bytes(p_read)
95 self._index += 1
96 # If _write_buffer does _not_ agree with p_write, this is not cause for
97 # concern, because you can in principle compose a message over several writes.
98 # It's not clear how relevant this is in real-world use, but it's analogous
99 # to the possibility to fetch a (binary) message over several reads.
100
101 def _read(self, **kwargs):
102 """Return an already present or freshly fetched read buffer as a string."""
103 return self._read_bytes(-1).decode("utf-8")
104
105 def _read_bytes(self, count, **kwargs):
106 """Read `count` number of bytes from the buffer.
107
108 :param int count: Number of bytes to read. If -1, return the buffer.
109 """
110 if self._read_buffer:
111 if count == -1:
112 read = self._read_buffer
113 self._read_buffer = b""
114 else:
115 read = self._read_buffer[:count]
116 self._read_buffer = self._read_buffer[count:]
117 return read
118 else:
119 try:
120 p_write, p_read = self.comm_pairs[self._index]
121 except IndexError:
122 raise ValueError("No communication pair left for reading.")
123 assert p_write is None, (
124 f"Written {self._write_buffer} do not match expected {p_write} prior to read."
125 if self._write_buffer
126 else "Unexpected read without prior write.")
127 self._index += 1
128 if count == -1:
129 # _read_buffer is already empty, no action required.
130 return to_bytes(p_read)
131 else:
132 self._read_buffer = to_bytes(p_read)[count:]
133 return to_bytes(p_read)[:count]
134
[end of pymeasure/adapters/protocol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pymeasure/adapters/protocol.py b/pymeasure/adapters/protocol.py
--- a/pymeasure/adapters/protocol.py
+++ b/pymeasure/adapters/protocol.py
@@ -23,6 +23,7 @@
#
import logging
+from unittest.mock import MagicMock
from .adapter import Adapter
@@ -50,15 +51,29 @@
This adapter is primarily meant for use within :func:`pymeasure.test.expected_protocol()`.
+ The :attr:`connection` attribute is a :class:`unittest.mock.MagicMock` such
+ that every call returns. If you want to set a return value, you can use
+ :code:`adapter.connection.some_method.return_value = 7`,
+ such that a call to :code:`adapter.connection.some_method()` will return `7`.
+ Similarly, you can verify that this call to the connection method happened
+ with :code:`assert adapter.connection.some_method.called is True`.
+ You can specify dictionaries with return values of attributes and methods.
+
:param list comm_pairs: List of "reference" message pair tuples. The first element is
what is sent to the instrument, the second one is the returned message.
'None' indicates that a pair member (write or read) does not exist.
The messages do **not** include the termination characters.
+ :param connection_attributes: Dictionary of connection attributes and their values.
+ :param connection_methods: Dictionary of method names of the connection and their return values.
"""
- def __init__(self, comm_pairs=[], preprocess_reply=None, **kwargs):
+ def __init__(self, comm_pairs=[], preprocess_reply=None,
+ connection_attributes={},
+ connection_methods={},
+ **kwargs):
"""Generate the adapter and initialize internal buffers."""
super().__init__(preprocess_reply=preprocess_reply, **kwargs)
+ # Setup communication
assert isinstance(comm_pairs, (list, tuple)), (
"Parameter comm_pairs has to be a list or tuple.")
for pair in comm_pairs:
@@ -68,6 +83,15 @@
self._write_buffer = b""
self.comm_pairs = comm_pairs
self._index = 0
+ # Setup attributes
+ self._setup_connection(connection_attributes, connection_methods)
+
+ def _setup_connection(self, connection_attributes, connection_methods):
+ self.connection = MagicMock()
+ for key, value in connection_attributes.items():
+ setattr(self.connection, key, value)
+ for key, value in connection_methods.items():
+ getattr(self.connection, key).return_value = value
def _write(self, command, **kwargs):
"""Compare the command with the expected one and fill the read."""
| {"golden_diff": "diff --git a/pymeasure/adapters/protocol.py b/pymeasure/adapters/protocol.py\n--- a/pymeasure/adapters/protocol.py\n+++ b/pymeasure/adapters/protocol.py\n@@ -23,6 +23,7 @@\n #\n \n import logging\n+from unittest.mock import MagicMock\n \n from .adapter import Adapter\n \n@@ -50,15 +51,29 @@\n \n This adapter is primarily meant for use within :func:`pymeasure.test.expected_protocol()`.\n \n+ The :attr:`connection` attribute is a :class:`unittest.mock.MagicMock` such\n+ that every call returns. If you want to set a return value, you can use\n+ :code:`adapter.connection.some_method.return_value = 7`,\n+ such that a call to :code:`adapter.connection.some_method()` will return `7`.\n+ Similarly, you can verify that this call to the connection method happened\n+ with :code:`assert adapter.connection.some_method.called is True`.\n+ You can specify dictionaries with return values of attributes and methods.\n+\n :param list comm_pairs: List of \"reference\" message pair tuples. The first element is\n what is sent to the instrument, the second one is the returned message.\n 'None' indicates that a pair member (write or read) does not exist.\n The messages do **not** include the termination characters.\n+ :param connection_attributes: Dictionary of connection attributes and their values.\n+ :param connection_methods: Dictionary of method names of the connection and their return values.\n \"\"\"\n \n- def __init__(self, comm_pairs=[], preprocess_reply=None, **kwargs):\n+ def __init__(self, comm_pairs=[], preprocess_reply=None,\n+ connection_attributes={},\n+ connection_methods={},\n+ **kwargs):\n \"\"\"Generate the adapter and initialize internal buffers.\"\"\"\n super().__init__(preprocess_reply=preprocess_reply, **kwargs)\n+ # Setup communication\n assert isinstance(comm_pairs, (list, tuple)), (\n \"Parameter comm_pairs has to be a list or tuple.\")\n for pair in comm_pairs:\n@@ -68,6 +83,15 @@\n self._write_buffer = b\"\"\n self.comm_pairs = comm_pairs\n self._index = 0\n+ # Setup attributes\n+ self._setup_connection(connection_attributes, connection_methods)\n+\n+ def _setup_connection(self, connection_attributes, connection_methods):\n+ self.connection = MagicMock()\n+ for key, value in connection_attributes.items():\n+ setattr(self.connection, key, value)\n+ for key, value in connection_methods.items():\n+ getattr(self.connection, key).return_value = value\n \n def _write(self, command, **kwargs):\n \"\"\"Compare the command with the expected one and fill the read.\"\"\"\n", "issue": "How to test adapter.connection calls\nSome instruments call the adapter's `connection`property, like #697 which makes ProtocolAdapter fail. Instruments should not be adjusted to make the ProtocolAdapter work.\r\n\r\nThe problem is, that you are not supposed to use normally the adapter's connection directly, because the `connection` depends on the specific adapter type. If an instrument calls the connection, the ProtocolAdapter fails. Instruments should not be adjusted to make the ProtocolAdapter work.\r\n\r\nTwo solutions:\r\n\r\n- A generic mock connection seems good, because that would care for many use cases. Otherwise we had to add several methods (clear, close, timeout property, get_stb...) individually.\r\n- Or we could add timeout as a property to the `Adapter`, which will call its connection depending on the Adapter type (ProtocolAdapter stores it as a variable). Similarly the \"read_stb\" and `clear` etc.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2022 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nfrom .adapter import Adapter\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\ndef to_bytes(command):\n \"\"\"Change `command` to a bytes object\"\"\"\n if isinstance(command, (bytes, bytearray)):\n return command\n elif command is None:\n return b\"\"\n elif isinstance(command, str):\n return command.encode(\"utf-8\")\n elif isinstance(command, (list, tuple)):\n return bytes(command)\n elif isinstance(command, (int, float)):\n return str(command).encode(\"utf-8\")\n raise TypeError(f\"Invalid input of type {type(command).__name__}.\")\n\n\nclass ProtocolAdapter(Adapter):\n \"\"\" Adapter class for testing the command exchange protocol without instrument hardware.\n\n This adapter is primarily meant for use within :func:`pymeasure.test.expected_protocol()`.\n\n :param list comm_pairs: List of \"reference\" message pair tuples. The first element is\n what is sent to the instrument, the second one is the returned message.\n 'None' indicates that a pair member (write or read) does not exist.\n The messages do **not** include the termination characters.\n \"\"\"\n\n def __init__(self, comm_pairs=[], preprocess_reply=None, **kwargs):\n \"\"\"Generate the adapter and initialize internal buffers.\"\"\"\n super().__init__(preprocess_reply=preprocess_reply, **kwargs)\n assert isinstance(comm_pairs, (list, tuple)), (\n \"Parameter comm_pairs has to be a list or tuple.\")\n for pair in comm_pairs:\n if len(pair) != 2:\n raise ValueError(f'Comm_pairs element {pair} does not have two elements!')\n self._read_buffer = b\"\"\n self._write_buffer = b\"\"\n self.comm_pairs = comm_pairs\n self._index = 0\n\n def _write(self, command, **kwargs):\n \"\"\"Compare the command with the expected one and fill the read.\"\"\"\n self._write_bytes(to_bytes(command))\n assert self._write_buffer == b\"\", (\n f\"Written bytes '{self._write_buffer}' do not match expected \"\n f\"'{self.comm_pairs[self._index][0]}'.\")\n\n def _write_bytes(self, content, **kwargs):\n \"\"\"Write the bytes `content`. If a command is full, fill the read.\"\"\"\n self._write_buffer += content\n try:\n p_write, p_read = self.comm_pairs[self._index]\n except IndexError:\n raise ValueError(f\"No communication pair left to write {content}.\")\n if self._write_buffer == to_bytes(p_write):\n assert self._read_buffer == b\"\", (\n f\"Unread response '{self._read_buffer}' present when writing. \"\n \"Maybe a property's 'check_set_errors' is not accounted for, \"\n \"a read() call is missing in a method, or the defined protocol is incorrect?\"\n )\n # Clear the write buffer\n self._write_buffer = b\"\"\n self._read_buffer = to_bytes(p_read)\n self._index += 1\n # If _write_buffer does _not_ agree with p_write, this is not cause for\n # concern, because you can in principle compose a message over several writes.\n # It's not clear how relevant this is in real-world use, but it's analogous\n # to the possibility to fetch a (binary) message over several reads.\n\n def _read(self, **kwargs):\n \"\"\"Return an already present or freshly fetched read buffer as a string.\"\"\"\n return self._read_bytes(-1).decode(\"utf-8\")\n\n def _read_bytes(self, count, **kwargs):\n \"\"\"Read `count` number of bytes from the buffer.\n\n :param int count: Number of bytes to read. If -1, return the buffer.\n \"\"\"\n if self._read_buffer:\n if count == -1:\n read = self._read_buffer\n self._read_buffer = b\"\"\n else:\n read = self._read_buffer[:count]\n self._read_buffer = self._read_buffer[count:]\n return read\n else:\n try:\n p_write, p_read = self.comm_pairs[self._index]\n except IndexError:\n raise ValueError(\"No communication pair left for reading.\")\n assert p_write is None, (\n f\"Written {self._write_buffer} do not match expected {p_write} prior to read.\"\n if self._write_buffer\n else \"Unexpected read without prior write.\")\n self._index += 1\n if count == -1:\n # _read_buffer is already empty, no action required.\n return to_bytes(p_read)\n else:\n self._read_buffer = to_bytes(p_read)[count:]\n return to_bytes(p_read)[:count]\n", "path": "pymeasure/adapters/protocol.py"}]} | 2,275 | 597 |
gh_patches_debug_16783 | rasdani/github-patches | git_diff | kivy__python-for-android-2842 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
libzmq recipy build fail
### Logs
```
[1m[90m[DEBUG][39m[0m: CXX src/src_libzmq_la-router.lo
[1m[90m[DEBUG][39m[0m: In file included from src/mtrie.cpp:32:
[1m[90m[DEBUG][39m[0m: ./src/generic_mtrie_impl.hpp:52:46: error: ISO C++ requires the name after '::~' to be found in the same scope as the name before '::~' [-Werror,-Wdtor-name]
[1m[90m[DEBUG][39m[0m: template <typename T> zmq::generic_mtrie_t<T>::~generic_mtrie_t ()
[1m[90m[DEBUG][39m[0m: ~~~~~~~~~~~~~~~~~~~~~~~^~
[1m[90m[DEBUG][39m[0m: ::generic_mtrie_t
[1m[90m[DEBUG][39m[0m: CXX src/src_libzmq_la-scatter.lo
[1m[90m[DEBUG][39m[0m: 1 error generated.
```
</issue>
<code>
[start of pythonforandroid/recipes/libzmq/__init__.py]
1 from pythonforandroid.recipe import Recipe
2 from pythonforandroid.logger import shprint
3 from pythonforandroid.util import current_directory
4 from os.path import join
5 import sh
6
7
8 class LibZMQRecipe(Recipe):
9 version = '4.3.2'
10 url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'
11 depends = []
12 built_libraries = {'libzmq.so': 'src/.libs'}
13 need_stl_shared = True
14
15 def build_arch(self, arch):
16 env = self.get_recipe_env(arch)
17 #
18 # libsodium_recipe = Recipe.get_recipe('libsodium', self.ctx)
19 # libsodium_dir = libsodium_recipe.get_build_dir(arch.arch)
20 # env['sodium_CFLAGS'] = '-I{}'.format(join(
21 # libsodium_dir, 'src'))
22 # env['sodium_LDLAGS'] = '-L{}'.format(join(
23 # libsodium_dir, 'src', 'libsodium', '.libs'))
24
25 curdir = self.get_build_dir(arch.arch)
26 prefix = join(curdir, "install")
27
28 with current_directory(curdir):
29 bash = sh.Command('sh')
30 shprint(
31 bash, './configure',
32 '--host={}'.format(arch.command_prefix),
33 '--without-documentation',
34 '--prefix={}'.format(prefix),
35 '--with-libsodium=no',
36 '--disable-libunwind',
37 _env=env)
38 shprint(sh.make, _env=env)
39 shprint(sh.make, 'install', _env=env)
40
41
42 recipe = LibZMQRecipe()
43
[end of pythonforandroid/recipes/libzmq/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/libzmq/__init__.py b/pythonforandroid/recipes/libzmq/__init__.py
--- a/pythonforandroid/recipes/libzmq/__init__.py
+++ b/pythonforandroid/recipes/libzmq/__init__.py
@@ -6,7 +6,7 @@
class LibZMQRecipe(Recipe):
- version = '4.3.2'
+ version = '4.3.4'
url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'
depends = []
built_libraries = {'libzmq.so': 'src/.libs'}
@@ -34,6 +34,7 @@
'--prefix={}'.format(prefix),
'--with-libsodium=no',
'--disable-libunwind',
+ '--disable-Werror',
_env=env)
shprint(sh.make, _env=env)
shprint(sh.make, 'install', _env=env)
| {"golden_diff": "diff --git a/pythonforandroid/recipes/libzmq/__init__.py b/pythonforandroid/recipes/libzmq/__init__.py\n--- a/pythonforandroid/recipes/libzmq/__init__.py\n+++ b/pythonforandroid/recipes/libzmq/__init__.py\n@@ -6,7 +6,7 @@\n \n \n class LibZMQRecipe(Recipe):\n- version = '4.3.2'\n+ version = '4.3.4'\n url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'\n depends = []\n built_libraries = {'libzmq.so': 'src/.libs'}\n@@ -34,6 +34,7 @@\n '--prefix={}'.format(prefix),\n '--with-libsodium=no',\n '--disable-libunwind',\n+ '--disable-Werror',\n _env=env)\n shprint(sh.make, _env=env)\n shprint(sh.make, 'install', _env=env)\n", "issue": "libzmq recipy build fail\n\r\n\r\n### Logs\r\n\r\n```\r\n[1m[90m[DEBUG][39m[0m: \t CXX src/src_libzmq_la-router.lo\r\n[1m[90m[DEBUG][39m[0m: \tIn file included from src/mtrie.cpp:32:\r\n[1m[90m[DEBUG][39m[0m: \t./src/generic_mtrie_impl.hpp:52:46: error: ISO C++ requires the name after '::~' to be found in the same scope as the name before '::~' [-Werror,-Wdtor-name]\r\n[1m[90m[DEBUG][39m[0m: \ttemplate <typename T> zmq::generic_mtrie_t<T>::~generic_mtrie_t ()\r\n[1m[90m[DEBUG][39m[0m: \t ~~~~~~~~~~~~~~~~~~~~~~~^~\r\n[1m[90m[DEBUG][39m[0m: \t ::generic_mtrie_t\r\n[1m[90m[DEBUG][39m[0m: \t CXX src/src_libzmq_la-scatter.lo\r\n[1m[90m[DEBUG][39m[0m: \t1 error generated.\r\n```\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import Recipe\nfrom pythonforandroid.logger import shprint\nfrom pythonforandroid.util import current_directory\nfrom os.path import join\nimport sh\n\n\nclass LibZMQRecipe(Recipe):\n version = '4.3.2'\n url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'\n depends = []\n built_libraries = {'libzmq.so': 'src/.libs'}\n need_stl_shared = True\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n #\n # libsodium_recipe = Recipe.get_recipe('libsodium', self.ctx)\n # libsodium_dir = libsodium_recipe.get_build_dir(arch.arch)\n # env['sodium_CFLAGS'] = '-I{}'.format(join(\n # libsodium_dir, 'src'))\n # env['sodium_LDLAGS'] = '-L{}'.format(join(\n # libsodium_dir, 'src', 'libsodium', '.libs'))\n\n curdir = self.get_build_dir(arch.arch)\n prefix = join(curdir, \"install\")\n\n with current_directory(curdir):\n bash = sh.Command('sh')\n shprint(\n bash, './configure',\n '--host={}'.format(arch.command_prefix),\n '--without-documentation',\n '--prefix={}'.format(prefix),\n '--with-libsodium=no',\n '--disable-libunwind',\n _env=env)\n shprint(sh.make, _env=env)\n shprint(sh.make, 'install', _env=env)\n\n\nrecipe = LibZMQRecipe()\n", "path": "pythonforandroid/recipes/libzmq/__init__.py"}]} | 1,270 | 223 |
gh_patches_debug_47638 | rasdani/github-patches | git_diff | python-poetry__poetry-1948 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
poetry tries to reinstall from git when using annotated tags as references
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Debian Buster
- **Poetry version**: 1.0.2
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/jrmlhermitte/bbfda76d59b820560bf2ff4a1e57d42d
## Issue
This came from a discussion on issue #691
If we re-run the gist included above, poetry will try re-installing the referred library, even though the git tag has not changed.
This was discovered on the issue mentioned above. Here is the reply from @bibz
> Thanks for the MRE.
> I can reproduce your example now.
> You found a different bug than we had, due to how git handled signed annotated tags.
> To summarise:
> The (annotated) tag v5.1.3 itself is a git object with the short sha 73f60e6
> The tree pointed to by the tag has the short sha 4056bbb
> The lock file refers to the tag object, because it is what v5.1.3 resolves to directly.
Since the checked-out tree has a different sha, poetry thinks it is not up-to-date (the references are different) and then re-install it.
> I think we need a new issue for this.
Would it be possible to investigate this, and possibly change the behaviour to not re-install the package?
Thanks for the great work on this wonderful library, keep it up!
</issue>
<code>
[start of poetry/vcs/git.py]
1 # -*- coding: utf-8 -*-
2 import re
3 import subprocess
4
5 from collections import namedtuple
6
7 from poetry.utils._compat import decode
8
9
10 pattern_formats = {
11 "protocol": r"\w+",
12 "user": r"[a-zA-Z0-9_.-]+",
13 "resource": r"[a-zA-Z0-9_.-]+",
14 "port": r"\d+",
15 "path": r"[\w\-/\\]+",
16 "name": r"[\w\-]+",
17 "rev": r"[^@#]+",
18 }
19
20 PATTERNS = [
21 re.compile(
22 r"^(git\+)?"
23 r"(?P<protocol>https?|git|ssh|rsync|file)://"
24 r"(?:(?P<user>{user})@)?"
25 r"(?P<resource>{resource})?"
26 r"(:(?P<port>{port}))?"
27 r"(?P<pathname>[:/\\]({path}[/\\])?"
28 r"((?P<name>{name}?)(\.git|[/\\])?)?)"
29 r"([@#](?P<rev>{rev}))?"
30 r"$".format(
31 user=pattern_formats["user"],
32 resource=pattern_formats["resource"],
33 port=pattern_formats["port"],
34 path=pattern_formats["path"],
35 name=pattern_formats["name"],
36 rev=pattern_formats["rev"],
37 )
38 ),
39 re.compile(
40 r"(git\+)?"
41 r"((?P<protocol>{protocol})://)"
42 r"(?:(?P<user>{user})@)?"
43 r"(?P<resource>{resource}:?)"
44 r"(:(?P<port>{port}))?"
45 r"(?P<pathname>({path})"
46 r"(?P<name>{name})(\.git|/)?)"
47 r"([@#](?P<rev>{rev}))?"
48 r"$".format(
49 protocol=pattern_formats["protocol"],
50 user=pattern_formats["user"],
51 resource=pattern_formats["resource"],
52 port=pattern_formats["port"],
53 path=pattern_formats["path"],
54 name=pattern_formats["name"],
55 rev=pattern_formats["rev"],
56 )
57 ),
58 re.compile(
59 r"^(?:(?P<user>{user})@)?"
60 r"(?P<resource>{resource})"
61 r"(:(?P<port>{port}))?"
62 r"(?P<pathname>([:/]{path}/)"
63 r"(?P<name>{name})(\.git|/)?)"
64 r"([@#](?P<rev>{rev}))?"
65 r"$".format(
66 user=pattern_formats["user"],
67 resource=pattern_formats["resource"],
68 port=pattern_formats["port"],
69 path=pattern_formats["path"],
70 name=pattern_formats["name"],
71 rev=pattern_formats["rev"],
72 )
73 ),
74 re.compile(
75 r"((?P<user>{user})@)?"
76 r"(?P<resource>{resource})"
77 r"[:/]{{1,2}}"
78 r"(?P<pathname>({path})"
79 r"(?P<name>{name})(\.git|/)?)"
80 r"([@#](?P<rev>{rev}))?"
81 r"$".format(
82 user=pattern_formats["user"],
83 resource=pattern_formats["resource"],
84 path=pattern_formats["path"],
85 name=pattern_formats["name"],
86 rev=pattern_formats["rev"],
87 )
88 ),
89 ]
90
91
92 class ParsedUrl:
93 def __init__(self, protocol, resource, pathname, user, port, name, rev):
94 self.protocol = protocol
95 self.resource = resource
96 self.pathname = pathname
97 self.user = user
98 self.port = port
99 self.name = name
100 self.rev = rev
101
102 @classmethod
103 def parse(cls, url): # type: () -> ParsedUrl
104 for pattern in PATTERNS:
105 m = pattern.match(url)
106 if m:
107 groups = m.groupdict()
108 return ParsedUrl(
109 groups.get("protocol"),
110 groups.get("resource"),
111 groups.get("pathname"),
112 groups.get("user"),
113 groups.get("port"),
114 groups.get("name"),
115 groups.get("rev"),
116 )
117
118 raise ValueError('Invalid git url "{}"'.format(url))
119
120 @property
121 def url(self): # type: () -> str
122 return "{}{}{}{}{}".format(
123 "{}://".format(self.protocol) if self.protocol else "",
124 "{}@".format(self.user) if self.user else "",
125 self.resource,
126 ":{}".format(self.port) if self.port else "",
127 "/" + self.pathname.lstrip(":/"),
128 )
129
130 def format(self):
131 return "{}".format(self.url, "#{}".format(self.rev) if self.rev else "",)
132
133 def __str__(self): # type: () -> str
134 return self.format()
135
136
137 GitUrl = namedtuple("GitUrl", ["url", "revision"])
138
139
140 class GitConfig:
141 def __init__(self, requires_git_presence=False):
142 self._config = {}
143
144 try:
145 config_list = decode(
146 subprocess.check_output(
147 ["git", "config", "-l"], stderr=subprocess.STDOUT
148 )
149 )
150
151 m = re.findall("(?ms)^([^=]+)=(.*?)$", config_list)
152 if m:
153 for group in m:
154 self._config[group[0]] = group[1]
155 except (subprocess.CalledProcessError, OSError):
156 if requires_git_presence:
157 raise
158
159 def get(self, key, default=None):
160 return self._config.get(key, default)
161
162 def __getitem__(self, item):
163 return self._config[item]
164
165
166 class Git:
167 def __init__(self, work_dir=None):
168 self._config = GitConfig(requires_git_presence=True)
169 self._work_dir = work_dir
170
171 @classmethod
172 def normalize_url(cls, url): # type: (str) -> GitUrl
173 parsed = ParsedUrl.parse(url)
174
175 formatted = re.sub(r"^git\+", "", url)
176 if parsed.rev:
177 formatted = re.sub(r"[#@]{}$".format(parsed.rev), "", formatted)
178
179 altered = parsed.format() != formatted
180
181 if altered:
182 if re.match(r"^git\+https?", url) and re.match(
183 r"^/?:[^0-9]", parsed.pathname
184 ):
185 normalized = re.sub(r"git\+(.*:[^:]+):(.*)", "\\1/\\2", url)
186 elif re.match(r"^git\+file", url):
187 normalized = re.sub(r"git\+", "", url)
188 else:
189 normalized = re.sub(r"^(?:git\+)?ssh://", "", url)
190 else:
191 normalized = parsed.format()
192
193 return GitUrl(re.sub(r"#[^#]*$", "", normalized), parsed.rev)
194
195 @property
196 def config(self): # type: () -> GitConfig
197 return self._config
198
199 def clone(self, repository, dest): # type: (...) -> str
200 return self.run("clone", repository, str(dest))
201
202 def checkout(self, rev, folder=None): # type: (...) -> str
203 args = []
204 if folder is None and self._work_dir:
205 folder = self._work_dir
206
207 if folder:
208 args += [
209 "--git-dir",
210 (folder / ".git").as_posix(),
211 "--work-tree",
212 folder.as_posix(),
213 ]
214
215 args += ["checkout", rev]
216
217 return self.run(*args)
218
219 def rev_parse(self, rev, folder=None): # type: (...) -> str
220 args = []
221 if folder is None and self._work_dir:
222 folder = self._work_dir
223
224 if folder:
225 args += [
226 "--git-dir",
227 (folder / ".git").as_posix(),
228 "--work-tree",
229 folder.as_posix(),
230 ]
231
232 args += ["rev-parse", rev]
233
234 return self.run(*args)
235
236 def get_ignored_files(self, folder=None): # type: (...) -> list
237 args = []
238 if folder is None and self._work_dir:
239 folder = self._work_dir
240
241 if folder:
242 args += [
243 "--git-dir",
244 (folder / ".git").as_posix(),
245 "--work-tree",
246 folder.as_posix(),
247 ]
248
249 args += ["ls-files", "--others", "-i", "--exclude-standard"]
250 output = self.run(*args)
251
252 return output.strip().split("\n")
253
254 def remote_urls(self, folder=None): # type: (...) -> dict
255 output = self.run(
256 "config", "--get-regexp", r"remote\..*\.url", folder=folder
257 ).strip()
258
259 urls = {}
260 for url in output.splitlines():
261 name, url = url.split(" ", 1)
262 urls[name.strip()] = url.strip()
263
264 return urls
265
266 def remote_url(self, folder=None): # type: (...) -> str
267 urls = self.remote_urls(folder=folder)
268
269 return urls.get("remote.origin.url", urls[list(urls.keys())[0]])
270
271 def run(self, *args, **kwargs): # type: (...) -> str
272 folder = kwargs.pop("folder", None)
273 if folder:
274 args = (
275 "--git-dir",
276 (folder / ".git").as_posix(),
277 "--work-tree",
278 folder.as_posix(),
279 ) + args
280
281 return decode(
282 subprocess.check_output(["git"] + list(args), stderr=subprocess.STDOUT)
283 ).strip()
284
[end of poetry/vcs/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/vcs/git.py b/poetry/vcs/git.py
--- a/poetry/vcs/git.py
+++ b/poetry/vcs/git.py
@@ -229,7 +229,9 @@
folder.as_posix(),
]
- args += ["rev-parse", rev]
+ # We need "^{commit}" to ensure that the commit SHA of the commit the
+ # tag points to is returned, even in the case of annotated tags.
+ args += ["rev-parse", rev + "^{commit}"]
return self.run(*args)
| {"golden_diff": "diff --git a/poetry/vcs/git.py b/poetry/vcs/git.py\n--- a/poetry/vcs/git.py\n+++ b/poetry/vcs/git.py\n@@ -229,7 +229,9 @@\n folder.as_posix(),\n ]\n \n- args += [\"rev-parse\", rev]\n+ # We need \"^{commit}\" to ensure that the commit SHA of the commit the\n+ # tag points to is returned, even in the case of annotated tags.\n+ args += [\"rev-parse\", rev + \"^{commit}\"]\n \n return self.run(*args)\n", "issue": "poetry tries to reinstall from git when using annotated tags as references\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n\r\n- **OS version and name**: Debian Buster\r\n- **Poetry version**: 1.0.2\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/jrmlhermitte/bbfda76d59b820560bf2ff4a1e57d42d\r\n\r\n## Issue\r\nThis came from a discussion on issue #691 \r\n\r\nIf we re-run the gist included above, poetry will try re-installing the referred library, even though the git tag has not changed.\r\n\r\nThis was discovered on the issue mentioned above. Here is the reply from @bibz \r\n> Thanks for the MRE.\r\n\r\n> I can reproduce your example now.\r\n> You found a different bug than we had, due to how git handled signed annotated tags.\r\n\r\n> To summarise:\r\n\r\n> The (annotated) tag v5.1.3 itself is a git object with the short sha 73f60e6\r\n> The tree pointed to by the tag has the short sha 4056bbb\r\n\r\n> The lock file refers to the tag object, because it is what v5.1.3 resolves to directly.\r\n Since the checked-out tree has a different sha, poetry thinks it is not up-to-date (the references are different) and then re-install it.\r\n\r\n> I think we need a new issue for this.\r\n\r\n\r\nWould it be possible to investigate this, and possibly change the behaviour to not re-install the package?\r\n\r\nThanks for the great work on this wonderful library, keep it up!\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\nimport subprocess\n\nfrom collections import namedtuple\n\nfrom poetry.utils._compat import decode\n\n\npattern_formats = {\n \"protocol\": r\"\\w+\",\n \"user\": r\"[a-zA-Z0-9_.-]+\",\n \"resource\": r\"[a-zA-Z0-9_.-]+\",\n \"port\": r\"\\d+\",\n \"path\": r\"[\\w\\-/\\\\]+\",\n \"name\": r\"[\\w\\-]+\",\n \"rev\": r\"[^@#]+\",\n}\n\nPATTERNS = [\n re.compile(\n r\"^(git\\+)?\"\n r\"(?P<protocol>https?|git|ssh|rsync|file)://\"\n r\"(?:(?P<user>{user})@)?\"\n r\"(?P<resource>{resource})?\"\n r\"(:(?P<port>{port}))?\"\n r\"(?P<pathname>[:/\\\\]({path}[/\\\\])?\"\n r\"((?P<name>{name}?)(\\.git|[/\\\\])?)?)\"\n r\"([@#](?P<rev>{rev}))?\"\n r\"$\".format(\n user=pattern_formats[\"user\"],\n resource=pattern_formats[\"resource\"],\n port=pattern_formats[\"port\"],\n path=pattern_formats[\"path\"],\n name=pattern_formats[\"name\"],\n rev=pattern_formats[\"rev\"],\n )\n ),\n re.compile(\n r\"(git\\+)?\"\n r\"((?P<protocol>{protocol})://)\"\n r\"(?:(?P<user>{user})@)?\"\n r\"(?P<resource>{resource}:?)\"\n r\"(:(?P<port>{port}))?\"\n r\"(?P<pathname>({path})\"\n r\"(?P<name>{name})(\\.git|/)?)\"\n r\"([@#](?P<rev>{rev}))?\"\n r\"$\".format(\n protocol=pattern_formats[\"protocol\"],\n user=pattern_formats[\"user\"],\n resource=pattern_formats[\"resource\"],\n port=pattern_formats[\"port\"],\n path=pattern_formats[\"path\"],\n name=pattern_formats[\"name\"],\n rev=pattern_formats[\"rev\"],\n )\n ),\n re.compile(\n r\"^(?:(?P<user>{user})@)?\"\n r\"(?P<resource>{resource})\"\n r\"(:(?P<port>{port}))?\"\n r\"(?P<pathname>([:/]{path}/)\"\n r\"(?P<name>{name})(\\.git|/)?)\"\n r\"([@#](?P<rev>{rev}))?\"\n r\"$\".format(\n user=pattern_formats[\"user\"],\n resource=pattern_formats[\"resource\"],\n port=pattern_formats[\"port\"],\n path=pattern_formats[\"path\"],\n name=pattern_formats[\"name\"],\n rev=pattern_formats[\"rev\"],\n )\n ),\n re.compile(\n r\"((?P<user>{user})@)?\"\n r\"(?P<resource>{resource})\"\n r\"[:/]{{1,2}}\"\n r\"(?P<pathname>({path})\"\n r\"(?P<name>{name})(\\.git|/)?)\"\n r\"([@#](?P<rev>{rev}))?\"\n r\"$\".format(\n user=pattern_formats[\"user\"],\n resource=pattern_formats[\"resource\"],\n path=pattern_formats[\"path\"],\n name=pattern_formats[\"name\"],\n rev=pattern_formats[\"rev\"],\n )\n ),\n]\n\n\nclass ParsedUrl:\n def __init__(self, protocol, resource, pathname, user, port, name, rev):\n self.protocol = protocol\n self.resource = resource\n self.pathname = pathname\n self.user = user\n self.port = port\n self.name = name\n self.rev = rev\n\n @classmethod\n def parse(cls, url): # type: () -> ParsedUrl\n for pattern in PATTERNS:\n m = pattern.match(url)\n if m:\n groups = m.groupdict()\n return ParsedUrl(\n groups.get(\"protocol\"),\n groups.get(\"resource\"),\n groups.get(\"pathname\"),\n groups.get(\"user\"),\n groups.get(\"port\"),\n groups.get(\"name\"),\n groups.get(\"rev\"),\n )\n\n raise ValueError('Invalid git url \"{}\"'.format(url))\n\n @property\n def url(self): # type: () -> str\n return \"{}{}{}{}{}\".format(\n \"{}://\".format(self.protocol) if self.protocol else \"\",\n \"{}@\".format(self.user) if self.user else \"\",\n self.resource,\n \":{}\".format(self.port) if self.port else \"\",\n \"/\" + self.pathname.lstrip(\":/\"),\n )\n\n def format(self):\n return \"{}\".format(self.url, \"#{}\".format(self.rev) if self.rev else \"\",)\n\n def __str__(self): # type: () -> str\n return self.format()\n\n\nGitUrl = namedtuple(\"GitUrl\", [\"url\", \"revision\"])\n\n\nclass GitConfig:\n def __init__(self, requires_git_presence=False):\n self._config = {}\n\n try:\n config_list = decode(\n subprocess.check_output(\n [\"git\", \"config\", \"-l\"], stderr=subprocess.STDOUT\n )\n )\n\n m = re.findall(\"(?ms)^([^=]+)=(.*?)$\", config_list)\n if m:\n for group in m:\n self._config[group[0]] = group[1]\n except (subprocess.CalledProcessError, OSError):\n if requires_git_presence:\n raise\n\n def get(self, key, default=None):\n return self._config.get(key, default)\n\n def __getitem__(self, item):\n return self._config[item]\n\n\nclass Git:\n def __init__(self, work_dir=None):\n self._config = GitConfig(requires_git_presence=True)\n self._work_dir = work_dir\n\n @classmethod\n def normalize_url(cls, url): # type: (str) -> GitUrl\n parsed = ParsedUrl.parse(url)\n\n formatted = re.sub(r\"^git\\+\", \"\", url)\n if parsed.rev:\n formatted = re.sub(r\"[#@]{}$\".format(parsed.rev), \"\", formatted)\n\n altered = parsed.format() != formatted\n\n if altered:\n if re.match(r\"^git\\+https?\", url) and re.match(\n r\"^/?:[^0-9]\", parsed.pathname\n ):\n normalized = re.sub(r\"git\\+(.*:[^:]+):(.*)\", \"\\\\1/\\\\2\", url)\n elif re.match(r\"^git\\+file\", url):\n normalized = re.sub(r\"git\\+\", \"\", url)\n else:\n normalized = re.sub(r\"^(?:git\\+)?ssh://\", \"\", url)\n else:\n normalized = parsed.format()\n\n return GitUrl(re.sub(r\"#[^#]*$\", \"\", normalized), parsed.rev)\n\n @property\n def config(self): # type: () -> GitConfig\n return self._config\n\n def clone(self, repository, dest): # type: (...) -> str\n return self.run(\"clone\", repository, str(dest))\n\n def checkout(self, rev, folder=None): # type: (...) -> str\n args = []\n if folder is None and self._work_dir:\n folder = self._work_dir\n\n if folder:\n args += [\n \"--git-dir\",\n (folder / \".git\").as_posix(),\n \"--work-tree\",\n folder.as_posix(),\n ]\n\n args += [\"checkout\", rev]\n\n return self.run(*args)\n\n def rev_parse(self, rev, folder=None): # type: (...) -> str\n args = []\n if folder is None and self._work_dir:\n folder = self._work_dir\n\n if folder:\n args += [\n \"--git-dir\",\n (folder / \".git\").as_posix(),\n \"--work-tree\",\n folder.as_posix(),\n ]\n\n args += [\"rev-parse\", rev]\n\n return self.run(*args)\n\n def get_ignored_files(self, folder=None): # type: (...) -> list\n args = []\n if folder is None and self._work_dir:\n folder = self._work_dir\n\n if folder:\n args += [\n \"--git-dir\",\n (folder / \".git\").as_posix(),\n \"--work-tree\",\n folder.as_posix(),\n ]\n\n args += [\"ls-files\", \"--others\", \"-i\", \"--exclude-standard\"]\n output = self.run(*args)\n\n return output.strip().split(\"\\n\")\n\n def remote_urls(self, folder=None): # type: (...) -> dict\n output = self.run(\n \"config\", \"--get-regexp\", r\"remote\\..*\\.url\", folder=folder\n ).strip()\n\n urls = {}\n for url in output.splitlines():\n name, url = url.split(\" \", 1)\n urls[name.strip()] = url.strip()\n\n return urls\n\n def remote_url(self, folder=None): # type: (...) -> str\n urls = self.remote_urls(folder=folder)\n\n return urls.get(\"remote.origin.url\", urls[list(urls.keys())[0]])\n\n def run(self, *args, **kwargs): # type: (...) -> str\n folder = kwargs.pop(\"folder\", None)\n if folder:\n args = (\n \"--git-dir\",\n (folder / \".git\").as_posix(),\n \"--work-tree\",\n folder.as_posix(),\n ) + args\n\n return decode(\n subprocess.check_output([\"git\"] + list(args), stderr=subprocess.STDOUT)\n ).strip()\n", "path": "poetry/vcs/git.py"}]} | 3,827 | 133 |
gh_patches_debug_26869 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2065 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[pre-commit.ci] pre-commit autoupdate
updates:
- [github.com/asottile/pyupgrade: v2.25.0 → v2.26.0](https://github.com/asottile/pyupgrade/compare/v2.25.0...v2.26.0)
</issue>
<code>
[start of pre_commit/commands/install_uninstall.py]
1 import itertools
2 import logging
3 import os.path
4 import shutil
5 import sys
6 from typing import Optional
7 from typing import Sequence
8 from typing import Tuple
9
10 from pre_commit import git
11 from pre_commit import output
12 from pre_commit.clientlib import load_config
13 from pre_commit.repository import all_hooks
14 from pre_commit.repository import install_hook_envs
15 from pre_commit.store import Store
16 from pre_commit.util import make_executable
17 from pre_commit.util import resource_text
18
19
20 logger = logging.getLogger(__name__)
21
22 # This is used to identify the hook file we install
23 PRIOR_HASHES = (
24 b'4d9958c90bc262f47553e2c073f14cfe',
25 b'd8ee923c46731b42cd95cc869add4062',
26 b'49fd668cb42069aa1b6048464be5d395',
27 b'79f09a650522a87b0da915d0d983b2de',
28 b'e358c9dae00eac5d06b38dfdb1e33a8c',
29 )
30 CURRENT_HASH = b'138fd403232d2ddd5efb44317e38bf03'
31 TEMPLATE_START = '# start templated\n'
32 TEMPLATE_END = '# end templated\n'
33 # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
34 # #1312 os.defpath is too restrictive on BSD
35 POSIX_SEARCH_PATH = ('/usr/local/bin', '/usr/bin', '/bin')
36 SYS_EXE = os.path.basename(os.path.realpath(sys.executable))
37
38
39 def _hook_paths(
40 hook_type: str,
41 git_dir: Optional[str] = None,
42 ) -> Tuple[str, str]:
43 git_dir = git_dir if git_dir is not None else git.get_git_dir()
44 pth = os.path.join(git_dir, 'hooks', hook_type)
45 return pth, f'{pth}.legacy'
46
47
48 def is_our_script(filename: str) -> bool:
49 if not os.path.exists(filename): # pragma: win32 no cover (symlink)
50 return False
51 with open(filename, 'rb') as f:
52 contents = f.read()
53 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
54
55
56 def shebang() -> str:
57 if sys.platform == 'win32':
58 py, _ = os.path.splitext(SYS_EXE)
59 else:
60 exe_choices = [
61 f'python{sys.version_info[0]}.{sys.version_info[1]}',
62 f'python{sys.version_info[0]}',
63 ]
64 # avoid searching for bare `python` as it's likely to be python 2
65 if SYS_EXE != 'python':
66 exe_choices.append(SYS_EXE)
67 for path, exe in itertools.product(POSIX_SEARCH_PATH, exe_choices):
68 if os.access(os.path.join(path, exe), os.X_OK):
69 py = exe
70 break
71 else:
72 py = SYS_EXE
73 return f'#!/usr/bin/env {py}'
74
75
76 def _install_hook_script(
77 config_file: str,
78 hook_type: str,
79 overwrite: bool = False,
80 skip_on_missing_config: bool = False,
81 git_dir: Optional[str] = None,
82 ) -> None:
83 hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)
84
85 os.makedirs(os.path.dirname(hook_path), exist_ok=True)
86
87 # If we have an existing hook, move it to pre-commit.legacy
88 if os.path.lexists(hook_path) and not is_our_script(hook_path):
89 shutil.move(hook_path, legacy_path)
90
91 # If we specify overwrite, we simply delete the legacy file
92 if overwrite and os.path.exists(legacy_path):
93 os.remove(legacy_path)
94 elif os.path.exists(legacy_path):
95 output.write_line(
96 f'Running in migration mode with existing hooks at {legacy_path}\n'
97 f'Use -f to use only pre-commit.',
98 )
99
100 args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']
101 if skip_on_missing_config:
102 args.append('--skip-on-missing-config')
103 params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}
104
105 with open(hook_path, 'w') as hook_file:
106 contents = resource_text('hook-tmpl')
107 before, rest = contents.split(TEMPLATE_START)
108 to_template, after = rest.split(TEMPLATE_END)
109
110 before = before.replace('#!/usr/bin/env python3', shebang())
111
112 hook_file.write(before + TEMPLATE_START)
113 for line in to_template.splitlines():
114 var = line.split()[0]
115 hook_file.write(f'{var} = {params[var]!r}\n')
116 hook_file.write(TEMPLATE_END + after)
117 make_executable(hook_path)
118
119 output.write_line(f'pre-commit installed at {hook_path}')
120
121
122 def install(
123 config_file: str,
124 store: Store,
125 hook_types: Sequence[str],
126 overwrite: bool = False,
127 hooks: bool = False,
128 skip_on_missing_config: bool = False,
129 git_dir: Optional[str] = None,
130 ) -> int:
131 if git_dir is None and git.has_core_hookpaths_set():
132 logger.error(
133 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
134 'hint: `git config --unset-all core.hooksPath`',
135 )
136 return 1
137
138 for hook_type in hook_types:
139 _install_hook_script(
140 config_file, hook_type,
141 overwrite=overwrite,
142 skip_on_missing_config=skip_on_missing_config,
143 git_dir=git_dir,
144 )
145
146 if hooks:
147 install_hooks(config_file, store)
148
149 return 0
150
151
152 def install_hooks(config_file: str, store: Store) -> int:
153 install_hook_envs(all_hooks(load_config(config_file), store), store)
154 return 0
155
156
157 def _uninstall_hook_script(hook_type: str) -> None:
158 hook_path, legacy_path = _hook_paths(hook_type)
159
160 # If our file doesn't exist or it isn't ours, gtfo.
161 if not os.path.exists(hook_path) or not is_our_script(hook_path):
162 return
163
164 os.remove(hook_path)
165 output.write_line(f'{hook_type} uninstalled')
166
167 if os.path.exists(legacy_path):
168 os.replace(legacy_path, hook_path)
169 output.write_line(f'Restored previous hooks to {hook_path}')
170
171
172 def uninstall(hook_types: Sequence[str]) -> int:
173 for hook_type in hook_types:
174 _uninstall_hook_script(hook_type)
175 return 0
176
[end of pre_commit/commands/install_uninstall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -1,6 +1,7 @@
import itertools
import logging
import os.path
+import shlex
import shutil
import sys
from typing import Optional
@@ -100,19 +101,17 @@
args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']
if skip_on_missing_config:
args.append('--skip-on-missing-config')
- params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}
with open(hook_path, 'w') as hook_file:
contents = resource_text('hook-tmpl')
before, rest = contents.split(TEMPLATE_START)
- to_template, after = rest.split(TEMPLATE_END)
-
- before = before.replace('#!/usr/bin/env python3', shebang())
+ _, after = rest.split(TEMPLATE_END)
hook_file.write(before + TEMPLATE_START)
- for line in to_template.splitlines():
- var = line.split()[0]
- hook_file.write(f'{var} = {params[var]!r}\n')
+ hook_file.write(f'INSTALL_PYTHON={shlex.quote(sys.executable)}\n')
+ # TODO: python3.8+: shlex.join
+ args_s = ' '.join(shlex.quote(part) for part in args)
+ hook_file.write(f'ARGS=({args_s})\n')
hook_file.write(TEMPLATE_END + after)
make_executable(hook_path)
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -1,6 +1,7 @@\n import itertools\n import logging\n import os.path\n+import shlex\n import shutil\n import sys\n from typing import Optional\n@@ -100,19 +101,17 @@\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n- params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n \n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n- to_template, after = rest.split(TEMPLATE_END)\n-\n- before = before.replace('#!/usr/bin/env python3', shebang())\n+ _, after = rest.split(TEMPLATE_END)\n \n hook_file.write(before + TEMPLATE_START)\n- for line in to_template.splitlines():\n- var = line.split()[0]\n- hook_file.write(f'{var} = {params[var]!r}\\n')\n+ hook_file.write(f'INSTALL_PYTHON={shlex.quote(sys.executable)}\\n')\n+ # TODO: python3.8+: shlex.join\n+ args_s = ' '.join(shlex.quote(part) for part in args)\n+ hook_file.write(f'ARGS=({args_s})\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n", "issue": "[pre-commit.ci] pre-commit autoupdate\nupdates:\n- [github.com/asottile/pyupgrade: v2.25.0 \u2192 v2.26.0](https://github.com/asottile/pyupgrade/compare/v2.25.0...v2.26.0)\n\n", "before_files": [{"content": "import itertools\nimport logging\nimport os.path\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n b'4d9958c90bc262f47553e2c073f14cfe',\n b'd8ee923c46731b42cd95cc869add4062',\n b'49fd668cb42069aa1b6048464be5d395',\n b'79f09a650522a87b0da915d0d983b2de',\n b'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = b'138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n# Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n# #1312 os.defpath is too restrictive on BSD\nPOSIX_SEARCH_PATH = ('/usr/local/bin', '/usr/bin', '/bin')\nSYS_EXE = os.path.basename(os.path.realpath(sys.executable))\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: win32 no cover (symlink)\n return False\n with open(filename, 'rb') as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang() -> str:\n if sys.platform == 'win32':\n py, _ = os.path.splitext(SYS_EXE)\n else:\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n # avoid searching for bare `python` as it's likely to be python 2\n if SYS_EXE != 'python':\n exe_choices.append(SYS_EXE)\n for path, exe in itertools.product(POSIX_SEARCH_PATH, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n py = SYS_EXE\n return f'#!/usr/bin/env {py}'\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python3', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write(f'{var} = {params[var]!r}\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.replace(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 2,554 | 370 |
gh_patches_debug_1548 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-5188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SV production parser down
## Description
This is an automatic error report generated for El Salvador (SV).
Issues:
- No recent data found for `production` parser
## Suggestions
- Try running the parser locally using the command `poetry run test_parser SV production`
- <a href="https://storage.googleapis.com/electricitymap-parser-logs/SV.html">Explore the runtime logs</a>
You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
</issue>
<code>
[start of parsers/SV.py]
1 #!/usr/bin/env python3
2
3 import json
4 import re
5 from collections import defaultdict
6 from datetime import datetime
7 from logging import Logger, getLogger
8 from operator import itemgetter
9 from typing import Optional
10
11 import arrow
12 from bs4 import BeautifulSoup
13 from requests import Session
14
15 # This parser gets hourly electricity generation data from ut.com.sv for El Salvador.
16 # El Salvador does have wind generation but there is no data available.
17 # The 'Termico' category only consists of generation from oil/diesel according to historical data.
18 # See: https://www.iea.org/statistics/?country=ELSALVADOR&year=2016&category=Key%20indicators&indicator=ElecGenByFuel
19 # A new Liquid Natural Gas power plant may come online in 2020/2021.
20 # See: https://gastechinsights.com/article/what-energa-del-pacficos-lng-to-power-project-means-for-el-salvador
21
22 # Thanks to jarek for figuring out how to make the correct POST request to the data url.
23
24 url = "http://estadistico.ut.com.sv/OperacionDiaria.aspx"
25
26 generation_map = {
27 0: "biomass",
28 1: "wind",
29 2: "geothermal",
30 3: "hydro",
31 4: "interconnection",
32 5: "thermal",
33 6: "solar",
34 "datetime": "datetime",
35 }
36
37
38 def get_data(session: Optional[Session] = None):
39 """
40 Makes a get request to data url.
41 Parses the response then makes a post request to the same url using
42 parsed parameters from the get request.
43 Returns a requests response object.
44 """
45
46 s = session or Session()
47 pagereq = s.get(url)
48
49 soup = BeautifulSoup(pagereq.content, "html.parser")
50
51 # Find and define parameters needed to send a POST request for the actual data.
52 viewstategenerator = soup.find("input", attrs={"id": "__VIEWSTATEGENERATOR"})[
53 "value"
54 ]
55 viewstate = soup.find("input", attrs={"id": "__VIEWSTATE"})["value"]
56 eventvalidation = soup.find("input", attrs={"id": "__EVENTVALIDATION"})["value"]
57 DXCss = "1_33,1_4,1_9,1_5,15_2,15_4"
58 DXScript = "1_232,1_134,1_225,1_169,1_187,15_1,1_183,1_182,1_140,1_147,1_148,1_142,1_141,1_143,1_144,1_145,1_146,15_0,15_6,15_7"
59 callback_param_init = 'c0:{"Task":"Initialize","DashboardId":"OperacionDiaria","Settings":{"calculateHiddenTotals":false},"RequestMarker":0,"ClientState":{}}'
60
61 postdata = {
62 "__VIEWSTATE": viewstate,
63 "__VIEWSTATEGENERATOR": viewstategenerator,
64 "__EVENTVALIDATION": eventvalidation,
65 "__CALLBACKPARAM": callback_param_init,
66 "__CALLBACKID": "ASPxDashboardViewer1",
67 "DXScript": DXScript,
68 "DXCss": DXCss,
69 }
70
71 datareq = s.post(url, data=postdata)
72
73 return datareq
74
75
76 def data_parser(datareq) -> list:
77 """
78 Accepts a requests response.text object.
79 Slices the object down to a smaller size then converts to usable json.
80 Loads the data as json then finds the 'result' key.
81 Uses regex to find the start
82 and endpoints of the actual data.
83 Splits the data into datapoints then cleans them up for processing.
84 """
85
86 double_json = datareq.text[len("0|/*DX*/(") : -1]
87 double_json = double_json.replace("'", '"')
88 data = json.loads(double_json)
89 jsresult = data["result"]
90
91 startpoints = [m.end(0) for m in re.finditer('"Data":{', jsresult)]
92 endpoints = [m.start(0) for m in re.finditer('"KeyIds"', jsresult)]
93
94 sliced = jsresult[startpoints[1] : endpoints[2]]
95 sliced = "".join(sliced.split())
96 sliced = sliced[1:-4]
97
98 chopped = sliced.split(',"')
99
100 diced = []
101 for item in chopped:
102 item = item.replace("}", "")
103 np = item.split('":')
104 diced.append(np[0::2])
105
106 clean_data = []
107 for item in diced:
108 j = json.loads(item[0])
109 k = float(item[1])
110 j.append(k)
111 clean_data.append(j)
112
113 return clean_data
114
115
116 def data_processer(data) -> list:
117 """
118 Takes data in the form of a list of lists.
119 Converts each list to a dictionary.
120 Joins dictionaries based on shared datetime key.
121 Maps generation to type.
122 """
123
124 converted = []
125 for val in data:
126 newval = {"datetime": val[2], val[0]: val[3]}
127 converted.append(newval)
128
129 # Join dicts on 'datetime' key.
130 d = defaultdict(dict)
131 for elem in converted:
132 d[elem["datetime"]].update(elem)
133
134 joined_data = sorted(d.values(), key=itemgetter("datetime"))
135
136 def get_datetime(hour):
137 at = arrow.now("UTC-6").floor("hour")
138 dt = (at.replace(hour=int(hour), minute=0, second=0)).datetime
139 return dt
140
141 mapped_data = []
142 for point in joined_data:
143 point = {generation_map[num]: val for num, val in point.items()}
144 point["datetime"] = get_datetime(point["datetime"])
145 mapped_data.append(point)
146
147 return mapped_data
148
149
150 def fetch_production(
151 zone_key: str = "SV",
152 session: Optional[Session] = None,
153 target_datetime: Optional[datetime] = None,
154 logger: Logger = getLogger(__name__),
155 ) -> list:
156 """Requests the last known production mix (in MW) of a given country."""
157 if target_datetime:
158 raise NotImplementedError("This parser is not yet able to parse past dates")
159
160 req = get_data(session=None)
161 parsed = data_parser(req)
162 data = data_processer(parsed)
163 production_mix_by_hour = []
164 for hour in data:
165 production_mix = {
166 "zoneKey": zone_key,
167 "datetime": hour["datetime"],
168 "production": {
169 "biomass": hour.get("biomass", 0.0),
170 "coal": 0.0,
171 "gas": 0.0,
172 "hydro": hour.get("hydro", 0.0),
173 "nuclear": 0.0,
174 "oil": hour.get("thermal", 0.0),
175 "solar": hour.get("solar", 0.0),
176 "wind": hour.get("wind", 0.0),
177 "geothermal": hour.get("geothermal", 0.0),
178 "unknown": 0.0,
179 },
180 "storage": {
181 "hydro": None,
182 },
183 "source": "ut.com.sv",
184 }
185 production_mix_by_hour.append(production_mix)
186
187 return production_mix_by_hour
188
189
190 if __name__ == "__main__":
191 """Main method, never used by the Electricity Map backend, but handy for testing."""
192
193 print("fetch_production() ->")
194 print(fetch_production())
195
[end of parsers/SV.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/SV.py b/parsers/SV.py
--- a/parsers/SV.py
+++ b/parsers/SV.py
@@ -21,7 +21,7 @@
# Thanks to jarek for figuring out how to make the correct POST request to the data url.
-url = "http://estadistico.ut.com.sv/OperacionDiaria.aspx"
+url = "https://estadistico.ut.com.sv/OperacionDiaria.aspx"
generation_map = {
0: "biomass",
| {"golden_diff": "diff --git a/parsers/SV.py b/parsers/SV.py\n--- a/parsers/SV.py\n+++ b/parsers/SV.py\n@@ -21,7 +21,7 @@\n \n # Thanks to jarek for figuring out how to make the correct POST request to the data url.\n \n-url = \"http://estadistico.ut.com.sv/OperacionDiaria.aspx\"\n+url = \"https://estadistico.ut.com.sv/OperacionDiaria.aspx\"\n \n generation_map = {\n 0: \"biomass\",\n", "issue": "SV production parser down\n## Description\n\nThis is an automatic error report generated for El Salvador (SV).\n\nIssues:\n- No recent data found for `production` parser\n\n## Suggestions\n- Try running the parser locally using the command `poetry run test_parser SV production`\n- <a href=\"https://storage.googleapis.com/electricitymap-parser-logs/SV.html\">Explore the runtime logs</a>\n\nYou can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport json\nimport re\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom logging import Logger, getLogger\nfrom operator import itemgetter\nfrom typing import Optional\n\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom requests import Session\n\n# This parser gets hourly electricity generation data from ut.com.sv for El Salvador.\n# El Salvador does have wind generation but there is no data available.\n# The 'Termico' category only consists of generation from oil/diesel according to historical data.\n# See: https://www.iea.org/statistics/?country=ELSALVADOR&year=2016&category=Key%20indicators&indicator=ElecGenByFuel\n# A new Liquid Natural Gas power plant may come online in 2020/2021.\n# See: https://gastechinsights.com/article/what-energa-del-pacficos-lng-to-power-project-means-for-el-salvador\n\n# Thanks to jarek for figuring out how to make the correct POST request to the data url.\n\nurl = \"http://estadistico.ut.com.sv/OperacionDiaria.aspx\"\n\ngeneration_map = {\n 0: \"biomass\",\n 1: \"wind\",\n 2: \"geothermal\",\n 3: \"hydro\",\n 4: \"interconnection\",\n 5: \"thermal\",\n 6: \"solar\",\n \"datetime\": \"datetime\",\n}\n\n\ndef get_data(session: Optional[Session] = None):\n \"\"\"\n Makes a get request to data url.\n Parses the response then makes a post request to the same url using\n parsed parameters from the get request.\n Returns a requests response object.\n \"\"\"\n\n s = session or Session()\n pagereq = s.get(url)\n\n soup = BeautifulSoup(pagereq.content, \"html.parser\")\n\n # Find and define parameters needed to send a POST request for the actual data.\n viewstategenerator = soup.find(\"input\", attrs={\"id\": \"__VIEWSTATEGENERATOR\"})[\n \"value\"\n ]\n viewstate = soup.find(\"input\", attrs={\"id\": \"__VIEWSTATE\"})[\"value\"]\n eventvalidation = soup.find(\"input\", attrs={\"id\": \"__EVENTVALIDATION\"})[\"value\"]\n DXCss = \"1_33,1_4,1_9,1_5,15_2,15_4\"\n DXScript = \"1_232,1_134,1_225,1_169,1_187,15_1,1_183,1_182,1_140,1_147,1_148,1_142,1_141,1_143,1_144,1_145,1_146,15_0,15_6,15_7\"\n callback_param_init = 'c0:{\"Task\":\"Initialize\",\"DashboardId\":\"OperacionDiaria\",\"Settings\":{\"calculateHiddenTotals\":false},\"RequestMarker\":0,\"ClientState\":{}}'\n\n postdata = {\n \"__VIEWSTATE\": viewstate,\n \"__VIEWSTATEGENERATOR\": viewstategenerator,\n \"__EVENTVALIDATION\": eventvalidation,\n \"__CALLBACKPARAM\": callback_param_init,\n \"__CALLBACKID\": \"ASPxDashboardViewer1\",\n \"DXScript\": DXScript,\n \"DXCss\": DXCss,\n }\n\n datareq = s.post(url, data=postdata)\n\n return datareq\n\n\ndef data_parser(datareq) -> list:\n \"\"\"\n Accepts a requests response.text object.\n Slices the object down to a smaller size then converts to usable json.\n Loads the data as json then finds the 'result' key.\n Uses regex to find the start\n and endpoints of the actual data.\n Splits the data into datapoints then cleans them up for processing.\n \"\"\"\n\n double_json = datareq.text[len(\"0|/*DX*/(\") : -1]\n double_json = double_json.replace(\"'\", '\"')\n data = json.loads(double_json)\n jsresult = data[\"result\"]\n\n startpoints = [m.end(0) for m in re.finditer('\"Data\":{', jsresult)]\n endpoints = [m.start(0) for m in re.finditer('\"KeyIds\"', jsresult)]\n\n sliced = jsresult[startpoints[1] : endpoints[2]]\n sliced = \"\".join(sliced.split())\n sliced = sliced[1:-4]\n\n chopped = sliced.split(',\"')\n\n diced = []\n for item in chopped:\n item = item.replace(\"}\", \"\")\n np = item.split('\":')\n diced.append(np[0::2])\n\n clean_data = []\n for item in diced:\n j = json.loads(item[0])\n k = float(item[1])\n j.append(k)\n clean_data.append(j)\n\n return clean_data\n\n\ndef data_processer(data) -> list:\n \"\"\"\n Takes data in the form of a list of lists.\n Converts each list to a dictionary.\n Joins dictionaries based on shared datetime key.\n Maps generation to type.\n \"\"\"\n\n converted = []\n for val in data:\n newval = {\"datetime\": val[2], val[0]: val[3]}\n converted.append(newval)\n\n # Join dicts on 'datetime' key.\n d = defaultdict(dict)\n for elem in converted:\n d[elem[\"datetime\"]].update(elem)\n\n joined_data = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n def get_datetime(hour):\n at = arrow.now(\"UTC-6\").floor(\"hour\")\n dt = (at.replace(hour=int(hour), minute=0, second=0)).datetime\n return dt\n\n mapped_data = []\n for point in joined_data:\n point = {generation_map[num]: val for num, val in point.items()}\n point[\"datetime\"] = get_datetime(point[\"datetime\"])\n mapped_data.append(point)\n\n return mapped_data\n\n\ndef fetch_production(\n zone_key: str = \"SV\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n) -> list:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n\n req = get_data(session=None)\n parsed = data_parser(req)\n data = data_processer(parsed)\n production_mix_by_hour = []\n for hour in data:\n production_mix = {\n \"zoneKey\": zone_key,\n \"datetime\": hour[\"datetime\"],\n \"production\": {\n \"biomass\": hour.get(\"biomass\", 0.0),\n \"coal\": 0.0,\n \"gas\": 0.0,\n \"hydro\": hour.get(\"hydro\", 0.0),\n \"nuclear\": 0.0,\n \"oil\": hour.get(\"thermal\", 0.0),\n \"solar\": hour.get(\"solar\", 0.0),\n \"wind\": hour.get(\"wind\", 0.0),\n \"geothermal\": hour.get(\"geothermal\", 0.0),\n \"unknown\": 0.0,\n },\n \"storage\": {\n \"hydro\": None,\n },\n \"source\": \"ut.com.sv\",\n }\n production_mix_by_hour.append(production_mix)\n\n return production_mix_by_hour\n\n\nif __name__ == \"__main__\":\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print(\"fetch_production() ->\")\n print(fetch_production())\n", "path": "parsers/SV.py"}]} | 2,815 | 122 |
gh_patches_debug_10174 | rasdani/github-patches | git_diff | pre-commit__pre-commit-96 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
System hooks with spaces in entry are not runnable
It's pretty reasonable to have a system hook that looks like this:
```
- id: foo
name: foo
entry: python -m bar
language: system
```
Currently this fails:
```
$ pre-commit run foo --all-files
foo...................................................Failed
xargs: python -m bar: No such file or directory
```
</issue>
<code>
[start of pre_commit/languages/system.py]
1 ENVIRONMENT_DIR = None
2
3
4 def install_environment(repo_cmd_runner):
5 """Installation for system type is a noop."""
6
7
8 def run_hook(repo_cmd_runner, hook, file_args):
9 return repo_cmd_runner.run(
10 ['xargs', hook['entry']] + hook['args'],
11 # TODO: this is duplicated in pre_commit/languages/helpers.py
12 stdin='\n'.join(list(file_args) + ['']),
13 retcode=None,
14 )
15
[end of pre_commit/languages/system.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/system.py b/pre_commit/languages/system.py
--- a/pre_commit/languages/system.py
+++ b/pre_commit/languages/system.py
@@ -1,3 +1,6 @@
+import shlex
+
+
ENVIRONMENT_DIR = None
@@ -7,7 +10,7 @@
def run_hook(repo_cmd_runner, hook, file_args):
return repo_cmd_runner.run(
- ['xargs', hook['entry']] + hook['args'],
+ ['xargs'] + shlex.split(hook['entry']) + hook['args'],
# TODO: this is duplicated in pre_commit/languages/helpers.py
stdin='\n'.join(list(file_args) + ['']),
retcode=None,
| {"golden_diff": "diff --git a/pre_commit/languages/system.py b/pre_commit/languages/system.py\n--- a/pre_commit/languages/system.py\n+++ b/pre_commit/languages/system.py\n@@ -1,3 +1,6 @@\n+import shlex\n+\n+\n ENVIRONMENT_DIR = None\n \n \n@@ -7,7 +10,7 @@\n \n def run_hook(repo_cmd_runner, hook, file_args):\n return repo_cmd_runner.run(\n- ['xargs', hook['entry']] + hook['args'],\n+ ['xargs'] + shlex.split(hook['entry']) + hook['args'],\n # TODO: this is duplicated in pre_commit/languages/helpers.py\n stdin='\\n'.join(list(file_args) + ['']),\n retcode=None,\n", "issue": "System hooks with spaces in entry are not runnable\nIt's pretty reasonable to have a system hook that looks like this:\n\n```\n- id: foo\n name: foo\n entry: python -m bar\n language: system\n```\n\nCurrently this fails:\n\n```\n$ pre-commit run foo --all-files\nfoo...................................................Failed\n\nxargs: python -m bar: No such file or directory\n```\n\n", "before_files": [{"content": "ENVIRONMENT_DIR = None\n\n\ndef install_environment(repo_cmd_runner):\n \"\"\"Installation for system type is a noop.\"\"\"\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n return repo_cmd_runner.run(\n ['xargs', hook['entry']] + hook['args'],\n # TODO: this is duplicated in pre_commit/languages/helpers.py\n stdin='\\n'.join(list(file_args) + ['']),\n retcode=None,\n )\n", "path": "pre_commit/languages/system.py"}]} | 742 | 159 |
gh_patches_debug_431 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1145 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ReadTheDocs configuration issue
The RTD documentation is not updated with my changes in #1096 https://opentelemetry-python.readthedocs.io/en/latest/sdk/resources.html
However, it works fine locally.
**Steps to reproduce**
Reproduced in all builds on RTD. For example, see logs here: https://readthedocs.org/projects/opentelemetry-python/builds/11937984/ (all builds [here](https://readthedocs.org/projects/google-cloud-opentelemetry/builds/))
You can run the commands in the logs to repro locally and see many of these types of errors:
```
WARNING: autodoc: failed to import module 'zipkin' from module 'opentelemetry.exporter'; the following exception was raised:
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/sphinx/ext/autodoc/importer.py", line 32, in import_module
return importlib.import_module(modname)
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 783, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py", line 72, in <module>
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/__init__.py", line 19, in <module>
from . import metrics, trace, util
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py", line 33, in <module>
from opentelemetry.sdk.resources import Resource
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py", line 98, in <module>
OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 481, in get_distribution
dist = get_provider(dist)
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 357, in get_provider
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 900, in require
needed = self.resolve(parse_requirements(requirements))
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 786, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'opentelemetry-sdk' distribution was not found and is required by the application
```
**What is the expected behavior?**
<img width="1218" alt="Screen Shot 2020-09-22 at 3 23 21 PM" src="https://user-images.githubusercontent.com/1510004/93927952-9410df00-fce7-11ea-9328-2d4c9951089e.png">
**What is the actual behavior?**
See logs above
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from os import listdir
16 from os.path import isdir, join
17
18 # configure django to avoid the following exception:
19 # django.core.exceptions.ImproperlyConfigured: Requested settings, but settings
20 # are not configured. You must either define the environment variable
21 # DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.
22 from django.conf import settings
23
24 settings.configure()
25
26
27 source_dirs = [
28 os.path.abspath("../opentelemetry-api/src/"),
29 os.path.abspath("../opentelemetry-sdk/src/"),
30 os.path.abspath("../opentelemetry-instrumentation/src/"),
31 ]
32
33 exp = "../exporter"
34 exp_dirs = [
35 os.path.abspath("/".join(["../exporter", f, "src"]))
36 for f in listdir(exp)
37 if isdir(join(exp, f))
38 ]
39
40 instr = "../instrumentation"
41 instr_dirs = [
42 os.path.abspath("/".join(["../instrumentation", f, "src"]))
43 for f in listdir(instr)
44 if isdir(join(instr, f))
45 ]
46
47 sys.path[:0] = source_dirs + exp_dirs + instr_dirs
48
49 # -- Project information -----------------------------------------------------
50
51 project = "OpenTelemetry Python"
52 copyright = "OpenTelemetry Authors" # pylint: disable=redefined-builtin
53 author = "OpenTelemetry Authors"
54
55
56 # -- General configuration ---------------------------------------------------
57
58 # Easy automatic cross-references for `code in backticks`
59 default_role = "any"
60
61 # Add any Sphinx extension module names here, as strings. They can be
62 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
63 # ones.
64 extensions = [
65 # API doc generation
66 "sphinx.ext.autodoc",
67 # Support for google-style docstrings
68 "sphinx.ext.napoleon",
69 # Infer types from hints instead of docstrings
70 "sphinx_autodoc_typehints",
71 # Add links to source from generated docs
72 "sphinx.ext.viewcode",
73 # Link to other sphinx docs
74 "sphinx.ext.intersphinx",
75 # Add a .nojekyll file to the generated HTML docs
76 # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing
77 "sphinx.ext.githubpages",
78 # Support external links to different versions in the Github repo
79 "sphinx.ext.extlinks",
80 ]
81
82 intersphinx_mapping = {
83 "python": ("https://docs.python.org/3/", None),
84 "opentracing": (
85 "https://opentracing-python.readthedocs.io/en/latest/",
86 None,
87 ),
88 "aiohttp": ("https://aiohttp.readthedocs.io/en/stable/", None),
89 "wrapt": ("https://wrapt.readthedocs.io/en/latest/", None),
90 "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None),
91 }
92
93 # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky
94 # Sphinx will warn about all references where the target cannot be found.
95 nitpicky = True
96 # Sphinx does not recognize generic type TypeVars
97 # Container supposedly were fixed, but does not work
98 # https://github.com/sphinx-doc/sphinx/pull/3744
99 nitpick_ignore = [
100 ("py:class", "ValueT"),
101 ("py:class", "MetricT"),
102 # Even if wrapt is added to intersphinx_mapping, sphinx keeps failing
103 # with "class reference target not found: ObjectProxy".
104 ("py:class", "ObjectProxy"),
105 # TODO: Understand why sphinx is not able to find this local class
106 ("py:class", "opentelemetry.trace.propagation.textmap.TextMapPropagator",),
107 (
108 "any",
109 "opentelemetry.trace.propagation.textmap.TextMapPropagator.extract",
110 ),
111 (
112 "any",
113 "opentelemetry.trace.propagation.textmap.TextMapPropagator.inject",
114 ),
115 ]
116
117 # Add any paths that contain templates here, relative to this directory.
118 templates_path = ["_templates"]
119
120 # List of patterns, relative to source directory, that match files and
121 # directories to ignore when looking for source files.
122 # This pattern also affects html_static_path and html_extra_path.
123 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
124
125 autodoc_default_options = {
126 "members": True,
127 "undoc-members": True,
128 "show-inheritance": True,
129 "member-order": "bysource",
130 }
131
132 # -- Options for HTML output -------------------------------------------------
133
134 # The theme to use for HTML and HTML Help pages. See the documentation for
135 # a list of builtin themes.
136 #
137 html_theme = "sphinx_rtd_theme"
138
139 # Add any paths that contain custom static files (such as style sheets) here,
140 # relative to this directory. They are copied after the builtin static files,
141 # so a file named "default.css" will overwrite the builtin "default.css".
142 html_static_path = []
143
144 # Support external links to specific versions of the files in the Github repo
145 branch = os.environ.get("READTHEDOCS_VERSION")
146 if branch is None or branch == "latest":
147 branch = "master"
148
149 REPO = "open-telemetry/opentelemetry-python/"
150 scm_raw_web = "https://raw.githubusercontent.com/" + REPO + branch
151 scm_web = "https://github.com/" + REPO + "blob/" + branch
152
153 # Store variables in the epilogue so they are globally available.
154 rst_epilog = """
155 .. |SCM_WEB| replace:: {s}
156 .. |SCM_RAW_WEB| replace:: {sr}
157 .. |SCM_BRANCH| replace:: {b}
158 """.format(
159 s=scm_web, sr=scm_raw_web, b=branch
160 )
161
162 # used to have links to repo files
163 extlinks = {
164 "scm_raw_web": (scm_raw_web + "/%s", "scm_raw_web"),
165 "scm_web": (scm_web + "/%s", "scm_web"),
166 }
167
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -25,8 +25,6 @@
source_dirs = [
- os.path.abspath("../opentelemetry-api/src/"),
- os.path.abspath("../opentelemetry-sdk/src/"),
os.path.abspath("../opentelemetry-instrumentation/src/"),
]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -25,8 +25,6 @@\n \n \n source_dirs = [\n- os.path.abspath(\"../opentelemetry-api/src/\"),\n- os.path.abspath(\"../opentelemetry-sdk/src/\"),\n os.path.abspath(\"../opentelemetry-instrumentation/src/\"),\n ]\n", "issue": "ReadTheDocs configuration issue\nThe RTD documentation is not updated with my changes in #1096 https://opentelemetry-python.readthedocs.io/en/latest/sdk/resources.html\r\n\r\nHowever, it works fine locally.\r\n\r\n**Steps to reproduce**\r\nReproduced in all builds on RTD. For example, see logs here: https://readthedocs.org/projects/opentelemetry-python/builds/11937984/ (all builds [here](https://readthedocs.org/projects/google-cloud-opentelemetry/builds/))\r\n\r\nYou can run the commands in the logs to repro locally and see many of these types of errors:\r\n```\r\nWARNING: autodoc: failed to import module 'zipkin' from module 'opentelemetry.exporter'; the following exception was raised:\r\nTraceback (most recent call last):\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/sphinx/ext/autodoc/importer.py\", line 32, in import_module\r\n return importlib.import_module(modname)\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1014, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 991, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 975, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 671, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 783, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\", line 72, in <module>\r\n from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/__init__.py\", line 19, in <module>\r\n from . import metrics, trace, util\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py\", line 33, in <module>\r\n from opentelemetry.sdk.resources import Resource\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\", line 98, in <module>\r\n OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 481, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 357, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 900, in require\r\n needed = self.resolve(parse_requirements(requirements))\r\n File \"/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 786, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'opentelemetry-sdk' distribution was not found and is required by the application\r\n```\r\n\r\n**What is the expected behavior?**\r\n<img width=\"1218\" alt=\"Screen Shot 2020-09-22 at 3 23 21 PM\" src=\"https://user-images.githubusercontent.com/1510004/93927952-9410df00-fce7-11ea-9328-2d4c9951089e.png\">\r\n\r\n**What is the actual behavior?**\r\nSee logs above\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom os import listdir\nfrom os.path import isdir, join\n\n# configure django to avoid the following exception:\n# django.core.exceptions.ImproperlyConfigured: Requested settings, but settings\n# are not configured. You must either define the environment variable\n# DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.\nfrom django.conf import settings\n\nsettings.configure()\n\n\nsource_dirs = [\n os.path.abspath(\"../opentelemetry-api/src/\"),\n os.path.abspath(\"../opentelemetry-sdk/src/\"),\n os.path.abspath(\"../opentelemetry-instrumentation/src/\"),\n]\n\nexp = \"../exporter\"\nexp_dirs = [\n os.path.abspath(\"/\".join([\"../exporter\", f, \"src\"]))\n for f in listdir(exp)\n if isdir(join(exp, f))\n]\n\ninstr = \"../instrumentation\"\ninstr_dirs = [\n os.path.abspath(\"/\".join([\"../instrumentation\", f, \"src\"]))\n for f in listdir(instr)\n if isdir(join(instr, f))\n]\n\nsys.path[:0] = source_dirs + exp_dirs + instr_dirs\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry Python\"\ncopyright = \"OpenTelemetry Authors\" # pylint: disable=redefined-builtin\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n # Support external links to different versions in the Github repo\n \"sphinx.ext.extlinks\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"opentracing\": (\n \"https://opentracing-python.readthedocs.io/en/latest/\",\n None,\n ),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"wrapt\": (\"https://wrapt.readthedocs.io/en/latest/\", None),\n \"pymongo\": (\"https://pymongo.readthedocs.io/en/stable/\", None),\n}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\n# Sphinx does not recognize generic type TypeVars\n# Container supposedly were fixed, but does not work\n# https://github.com/sphinx-doc/sphinx/pull/3744\nnitpick_ignore = [\n (\"py:class\", \"ValueT\"),\n (\"py:class\", \"MetricT\"),\n # Even if wrapt is added to intersphinx_mapping, sphinx keeps failing\n # with \"class reference target not found: ObjectProxy\".\n (\"py:class\", \"ObjectProxy\"),\n # TODO: Understand why sphinx is not able to find this local class\n (\"py:class\", \"opentelemetry.trace.propagation.textmap.TextMapPropagator\",),\n (\n \"any\",\n \"opentelemetry.trace.propagation.textmap.TextMapPropagator.extract\",\n ),\n (\n \"any\",\n \"opentelemetry.trace.propagation.textmap.TextMapPropagator.inject\",\n ),\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Support external links to specific versions of the files in the Github repo\nbranch = os.environ.get(\"READTHEDOCS_VERSION\")\nif branch is None or branch == \"latest\":\n branch = \"master\"\n\nREPO = \"open-telemetry/opentelemetry-python/\"\nscm_raw_web = \"https://raw.githubusercontent.com/\" + REPO + branch\nscm_web = \"https://github.com/\" + REPO + \"blob/\" + branch\n\n# Store variables in the epilogue so they are globally available.\nrst_epilog = \"\"\"\n.. |SCM_WEB| replace:: {s}\n.. |SCM_RAW_WEB| replace:: {sr}\n.. |SCM_BRANCH| replace:: {b}\n\"\"\".format(\n s=scm_web, sr=scm_raw_web, b=branch\n)\n\n# used to have links to repo files\nextlinks = {\n \"scm_raw_web\": (scm_raw_web + \"/%s\", \"scm_raw_web\"),\n \"scm_web\": (scm_web + \"/%s\", \"scm_web\"),\n}\n", "path": "docs/conf.py"}]} | 3,344 | 82 |
gh_patches_debug_40621 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3881 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add is_remote_parent span flags to OTLP exported Spans and SpanLinks
**Is your feature request related to a problem?**
After updating protobuf schema to 1.2.0 in 1164ab6c0193f658712b55469ecafbf031badc3b we can set the is_remote_parent flag when exporting spans and span links with OTLP.
**Describe the solution you'd like**
Update OTLP exporter to fill the relevant bits in SpanFlags.
**Describe alternatives you've considered**
none
**Additional context**
Proto schema PR: https://github.com/open-telemetry/opentelemetry-proto/pull/484
Go implementation: https://github.com/open-telemetry/opentelemetry-go/pull/5194/files
</issue>
<code>
[start of exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from collections import defaultdict
17 from typing import List, Optional, Sequence
18
19 from opentelemetry.exporter.otlp.proto.common._internal import (
20 _encode_trace_id,
21 _encode_span_id,
22 _encode_instrumentation_scope,
23 _encode_attributes,
24 _encode_resource,
25 )
26 from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (
27 ExportTraceServiceRequest as PB2ExportTraceServiceRequest,
28 )
29 from opentelemetry.proto.trace.v1.trace_pb2 import (
30 ScopeSpans as PB2ScopeSpans,
31 )
32 from opentelemetry.proto.trace.v1.trace_pb2 import (
33 ResourceSpans as PB2ResourceSpans,
34 )
35 from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan
36 from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status
37 from opentelemetry.sdk.trace import Event, ReadableSpan
38 from opentelemetry.trace import Link
39 from opentelemetry.trace import SpanKind
40 from opentelemetry.trace.span import SpanContext, TraceState, Status
41
42 # pylint: disable=E1101
43 _SPAN_KIND_MAP = {
44 SpanKind.INTERNAL: PB2SPan.SpanKind.SPAN_KIND_INTERNAL,
45 SpanKind.SERVER: PB2SPan.SpanKind.SPAN_KIND_SERVER,
46 SpanKind.CLIENT: PB2SPan.SpanKind.SPAN_KIND_CLIENT,
47 SpanKind.PRODUCER: PB2SPan.SpanKind.SPAN_KIND_PRODUCER,
48 SpanKind.CONSUMER: PB2SPan.SpanKind.SPAN_KIND_CONSUMER,
49 }
50
51 _logger = logging.getLogger(__name__)
52
53
54 def encode_spans(
55 sdk_spans: Sequence[ReadableSpan],
56 ) -> PB2ExportTraceServiceRequest:
57 return PB2ExportTraceServiceRequest(
58 resource_spans=_encode_resource_spans(sdk_spans)
59 )
60
61
62 def _encode_resource_spans(
63 sdk_spans: Sequence[ReadableSpan],
64 ) -> List[PB2ResourceSpans]:
65 # We need to inspect the spans and group + structure them as:
66 #
67 # Resource
68 # Instrumentation Library
69 # Spans
70 #
71 # First loop organizes the SDK spans in this structure. Protobuf messages
72 # are not hashable so we stick with SDK data in this phase.
73 #
74 # Second loop encodes the data into Protobuf format.
75 #
76 sdk_resource_spans = defaultdict(lambda: defaultdict(list))
77
78 for sdk_span in sdk_spans:
79 sdk_resource = sdk_span.resource
80 sdk_instrumentation = sdk_span.instrumentation_scope or None
81 pb2_span = _encode_span(sdk_span)
82
83 sdk_resource_spans[sdk_resource][sdk_instrumentation].append(pb2_span)
84
85 pb2_resource_spans = []
86
87 for sdk_resource, sdk_instrumentations in sdk_resource_spans.items():
88 scope_spans = []
89 for sdk_instrumentation, pb2_spans in sdk_instrumentations.items():
90 scope_spans.append(
91 PB2ScopeSpans(
92 scope=(_encode_instrumentation_scope(sdk_instrumentation)),
93 spans=pb2_spans,
94 )
95 )
96 pb2_resource_spans.append(
97 PB2ResourceSpans(
98 resource=_encode_resource(sdk_resource),
99 scope_spans=scope_spans,
100 schema_url=sdk_resource.schema_url,
101 )
102 )
103
104 return pb2_resource_spans
105
106
107 def _encode_span(sdk_span: ReadableSpan) -> PB2SPan:
108 span_context = sdk_span.get_span_context()
109 return PB2SPan(
110 trace_id=_encode_trace_id(span_context.trace_id),
111 span_id=_encode_span_id(span_context.span_id),
112 trace_state=_encode_trace_state(span_context.trace_state),
113 parent_span_id=_encode_parent_id(sdk_span.parent),
114 name=sdk_span.name,
115 kind=_SPAN_KIND_MAP[sdk_span.kind],
116 start_time_unix_nano=sdk_span.start_time,
117 end_time_unix_nano=sdk_span.end_time,
118 attributes=_encode_attributes(sdk_span.attributes),
119 events=_encode_events(sdk_span.events),
120 links=_encode_links(sdk_span.links),
121 status=_encode_status(sdk_span.status),
122 dropped_attributes_count=sdk_span.dropped_attributes,
123 dropped_events_count=sdk_span.dropped_events,
124 dropped_links_count=sdk_span.dropped_links,
125 )
126
127
128 def _encode_events(
129 events: Sequence[Event],
130 ) -> Optional[List[PB2SPan.Event]]:
131 pb2_events = None
132 if events:
133 pb2_events = []
134 for event in events:
135 encoded_event = PB2SPan.Event(
136 name=event.name,
137 time_unix_nano=event.timestamp,
138 attributes=_encode_attributes(event.attributes),
139 dropped_attributes_count=event.attributes.dropped,
140 )
141 pb2_events.append(encoded_event)
142 return pb2_events
143
144
145 def _encode_links(links: Sequence[Link]) -> Sequence[PB2SPan.Link]:
146 pb2_links = None
147 if links:
148 pb2_links = []
149 for link in links:
150 encoded_link = PB2SPan.Link(
151 trace_id=_encode_trace_id(link.context.trace_id),
152 span_id=_encode_span_id(link.context.span_id),
153 attributes=_encode_attributes(link.attributes),
154 dropped_attributes_count=link.attributes.dropped,
155 )
156 pb2_links.append(encoded_link)
157 return pb2_links
158
159
160 def _encode_status(status: Status) -> Optional[PB2Status]:
161 pb2_status = None
162 if status is not None:
163 pb2_status = PB2Status(
164 code=status.status_code.value,
165 message=status.description,
166 )
167 return pb2_status
168
169
170 def _encode_trace_state(trace_state: TraceState) -> Optional[str]:
171 pb2_trace_state = None
172 if trace_state is not None:
173 pb2_trace_state = ",".join(
174 [f"{key}={value}" for key, value in (trace_state.items())]
175 )
176 return pb2_trace_state
177
178
179 def _encode_parent_id(context: Optional[SpanContext]) -> Optional[bytes]:
180 if context:
181 return _encode_span_id(context.span_id)
182 return None
183
[end of exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py
--- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py
@@ -17,27 +17,25 @@
from typing import List, Optional, Sequence
from opentelemetry.exporter.otlp.proto.common._internal import (
- _encode_trace_id,
- _encode_span_id,
- _encode_instrumentation_scope,
_encode_attributes,
+ _encode_instrumentation_scope,
_encode_resource,
+ _encode_span_id,
+ _encode_trace_id,
)
from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (
ExportTraceServiceRequest as PB2ExportTraceServiceRequest,
)
-from opentelemetry.proto.trace.v1.trace_pb2 import (
- ScopeSpans as PB2ScopeSpans,
-)
from opentelemetry.proto.trace.v1.trace_pb2 import (
ResourceSpans as PB2ResourceSpans,
)
+from opentelemetry.proto.trace.v1.trace_pb2 import ScopeSpans as PB2ScopeSpans
from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan
+from opentelemetry.proto.trace.v1.trace_pb2 import SpanFlags as PB2SpanFlags
from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status
from opentelemetry.sdk.trace import Event, ReadableSpan
-from opentelemetry.trace import Link
-from opentelemetry.trace import SpanKind
-from opentelemetry.trace.span import SpanContext, TraceState, Status
+from opentelemetry.trace import Link, SpanKind
+from opentelemetry.trace.span import SpanContext, Status, TraceState
# pylint: disable=E1101
_SPAN_KIND_MAP = {
@@ -104,6 +102,13 @@
return pb2_resource_spans
+def _span_flags(parent_span_context: Optional[SpanContext]) -> int:
+ flags = PB2SpanFlags.SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
+ if parent_span_context and parent_span_context.is_remote:
+ flags |= PB2SpanFlags.SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
+ return flags
+
+
def _encode_span(sdk_span: ReadableSpan) -> PB2SPan:
span_context = sdk_span.get_span_context()
return PB2SPan(
@@ -122,6 +127,7 @@
dropped_attributes_count=sdk_span.dropped_attributes,
dropped_events_count=sdk_span.dropped_events,
dropped_links_count=sdk_span.dropped_links,
+ flags=_span_flags(sdk_span.parent),
)
@@ -152,6 +158,7 @@
span_id=_encode_span_id(link.context.span_id),
attributes=_encode_attributes(link.attributes),
dropped_attributes_count=link.attributes.dropped,
+ flags=_span_flags(link.context),
)
pb2_links.append(encoded_link)
return pb2_links
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py\n@@ -17,27 +17,25 @@\n from typing import List, Optional, Sequence\n \n from opentelemetry.exporter.otlp.proto.common._internal import (\n- _encode_trace_id,\n- _encode_span_id,\n- _encode_instrumentation_scope,\n _encode_attributes,\n+ _encode_instrumentation_scope,\n _encode_resource,\n+ _encode_span_id,\n+ _encode_trace_id,\n )\n from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (\n ExportTraceServiceRequest as PB2ExportTraceServiceRequest,\n )\n-from opentelemetry.proto.trace.v1.trace_pb2 import (\n- ScopeSpans as PB2ScopeSpans,\n-)\n from opentelemetry.proto.trace.v1.trace_pb2 import (\n ResourceSpans as PB2ResourceSpans,\n )\n+from opentelemetry.proto.trace.v1.trace_pb2 import ScopeSpans as PB2ScopeSpans\n from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan\n+from opentelemetry.proto.trace.v1.trace_pb2 import SpanFlags as PB2SpanFlags\n from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status\n from opentelemetry.sdk.trace import Event, ReadableSpan\n-from opentelemetry.trace import Link\n-from opentelemetry.trace import SpanKind\n-from opentelemetry.trace.span import SpanContext, TraceState, Status\n+from opentelemetry.trace import Link, SpanKind\n+from opentelemetry.trace.span import SpanContext, Status, TraceState\n \n # pylint: disable=E1101\n _SPAN_KIND_MAP = {\n@@ -104,6 +102,13 @@\n return pb2_resource_spans\n \n \n+def _span_flags(parent_span_context: Optional[SpanContext]) -> int:\n+ flags = PB2SpanFlags.SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK\n+ if parent_span_context and parent_span_context.is_remote:\n+ flags |= PB2SpanFlags.SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK\n+ return flags\n+\n+\n def _encode_span(sdk_span: ReadableSpan) -> PB2SPan:\n span_context = sdk_span.get_span_context()\n return PB2SPan(\n@@ -122,6 +127,7 @@\n dropped_attributes_count=sdk_span.dropped_attributes,\n dropped_events_count=sdk_span.dropped_events,\n dropped_links_count=sdk_span.dropped_links,\n+ flags=_span_flags(sdk_span.parent),\n )\n \n \n@@ -152,6 +158,7 @@\n span_id=_encode_span_id(link.context.span_id),\n attributes=_encode_attributes(link.attributes),\n dropped_attributes_count=link.attributes.dropped,\n+ flags=_span_flags(link.context),\n )\n pb2_links.append(encoded_link)\n return pb2_links\n", "issue": "Add is_remote_parent span flags to OTLP exported Spans and SpanLinks\n**Is your feature request related to a problem?**\r\n\r\nAfter updating protobuf schema to 1.2.0 in 1164ab6c0193f658712b55469ecafbf031badc3b we can set the is_remote_parent flag when exporting spans and span links with OTLP. \r\n\r\n**Describe the solution you'd like**\r\n\r\nUpdate OTLP exporter to fill the relevant bits in SpanFlags.\r\n\r\n**Describe alternatives you've considered**\r\n\r\nnone\r\n\r\n**Additional context**\r\n\r\nProto schema PR: https://github.com/open-telemetry/opentelemetry-proto/pull/484\r\nGo implementation: https://github.com/open-telemetry/opentelemetry-go/pull/5194/files\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom collections import defaultdict\nfrom typing import List, Optional, Sequence\n\nfrom opentelemetry.exporter.otlp.proto.common._internal import (\n _encode_trace_id,\n _encode_span_id,\n _encode_instrumentation_scope,\n _encode_attributes,\n _encode_resource,\n)\nfrom opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (\n ExportTraceServiceRequest as PB2ExportTraceServiceRequest,\n)\nfrom opentelemetry.proto.trace.v1.trace_pb2 import (\n ScopeSpans as PB2ScopeSpans,\n)\nfrom opentelemetry.proto.trace.v1.trace_pb2 import (\n ResourceSpans as PB2ResourceSpans,\n)\nfrom opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan\nfrom opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status\nfrom opentelemetry.sdk.trace import Event, ReadableSpan\nfrom opentelemetry.trace import Link\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.trace.span import SpanContext, TraceState, Status\n\n# pylint: disable=E1101\n_SPAN_KIND_MAP = {\n SpanKind.INTERNAL: PB2SPan.SpanKind.SPAN_KIND_INTERNAL,\n SpanKind.SERVER: PB2SPan.SpanKind.SPAN_KIND_SERVER,\n SpanKind.CLIENT: PB2SPan.SpanKind.SPAN_KIND_CLIENT,\n SpanKind.PRODUCER: PB2SPan.SpanKind.SPAN_KIND_PRODUCER,\n SpanKind.CONSUMER: PB2SPan.SpanKind.SPAN_KIND_CONSUMER,\n}\n\n_logger = logging.getLogger(__name__)\n\n\ndef encode_spans(\n sdk_spans: Sequence[ReadableSpan],\n) -> PB2ExportTraceServiceRequest:\n return PB2ExportTraceServiceRequest(\n resource_spans=_encode_resource_spans(sdk_spans)\n )\n\n\ndef _encode_resource_spans(\n sdk_spans: Sequence[ReadableSpan],\n) -> List[PB2ResourceSpans]:\n # We need to inspect the spans and group + structure them as:\n #\n # Resource\n # Instrumentation Library\n # Spans\n #\n # First loop organizes the SDK spans in this structure. Protobuf messages\n # are not hashable so we stick with SDK data in this phase.\n #\n # Second loop encodes the data into Protobuf format.\n #\n sdk_resource_spans = defaultdict(lambda: defaultdict(list))\n\n for sdk_span in sdk_spans:\n sdk_resource = sdk_span.resource\n sdk_instrumentation = sdk_span.instrumentation_scope or None\n pb2_span = _encode_span(sdk_span)\n\n sdk_resource_spans[sdk_resource][sdk_instrumentation].append(pb2_span)\n\n pb2_resource_spans = []\n\n for sdk_resource, sdk_instrumentations in sdk_resource_spans.items():\n scope_spans = []\n for sdk_instrumentation, pb2_spans in sdk_instrumentations.items():\n scope_spans.append(\n PB2ScopeSpans(\n scope=(_encode_instrumentation_scope(sdk_instrumentation)),\n spans=pb2_spans,\n )\n )\n pb2_resource_spans.append(\n PB2ResourceSpans(\n resource=_encode_resource(sdk_resource),\n scope_spans=scope_spans,\n schema_url=sdk_resource.schema_url,\n )\n )\n\n return pb2_resource_spans\n\n\ndef _encode_span(sdk_span: ReadableSpan) -> PB2SPan:\n span_context = sdk_span.get_span_context()\n return PB2SPan(\n trace_id=_encode_trace_id(span_context.trace_id),\n span_id=_encode_span_id(span_context.span_id),\n trace_state=_encode_trace_state(span_context.trace_state),\n parent_span_id=_encode_parent_id(sdk_span.parent),\n name=sdk_span.name,\n kind=_SPAN_KIND_MAP[sdk_span.kind],\n start_time_unix_nano=sdk_span.start_time,\n end_time_unix_nano=sdk_span.end_time,\n attributes=_encode_attributes(sdk_span.attributes),\n events=_encode_events(sdk_span.events),\n links=_encode_links(sdk_span.links),\n status=_encode_status(sdk_span.status),\n dropped_attributes_count=sdk_span.dropped_attributes,\n dropped_events_count=sdk_span.dropped_events,\n dropped_links_count=sdk_span.dropped_links,\n )\n\n\ndef _encode_events(\n events: Sequence[Event],\n) -> Optional[List[PB2SPan.Event]]:\n pb2_events = None\n if events:\n pb2_events = []\n for event in events:\n encoded_event = PB2SPan.Event(\n name=event.name,\n time_unix_nano=event.timestamp,\n attributes=_encode_attributes(event.attributes),\n dropped_attributes_count=event.attributes.dropped,\n )\n pb2_events.append(encoded_event)\n return pb2_events\n\n\ndef _encode_links(links: Sequence[Link]) -> Sequence[PB2SPan.Link]:\n pb2_links = None\n if links:\n pb2_links = []\n for link in links:\n encoded_link = PB2SPan.Link(\n trace_id=_encode_trace_id(link.context.trace_id),\n span_id=_encode_span_id(link.context.span_id),\n attributes=_encode_attributes(link.attributes),\n dropped_attributes_count=link.attributes.dropped,\n )\n pb2_links.append(encoded_link)\n return pb2_links\n\n\ndef _encode_status(status: Status) -> Optional[PB2Status]:\n pb2_status = None\n if status is not None:\n pb2_status = PB2Status(\n code=status.status_code.value,\n message=status.description,\n )\n return pb2_status\n\n\ndef _encode_trace_state(trace_state: TraceState) -> Optional[str]:\n pb2_trace_state = None\n if trace_state is not None:\n pb2_trace_state = \",\".join(\n [f\"{key}={value}\" for key, value in (trace_state.items())]\n )\n return pb2_trace_state\n\n\ndef _encode_parent_id(context: Optional[SpanContext]) -> Optional[bytes]:\n if context:\n return _encode_span_id(context.span_id)\n return None\n", "path": "exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py"}]} | 2,651 | 749 |
gh_patches_debug_13746 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lost visibility into bulk Elasticsearch operations with 0.48.0
With upgrade to ddtrace 0.48.0, we appear to have lost visibility into Elasticsearch bulk operations.
Here's the view in APM for our indexer application that writes to ES. We seem to have lost all visibility. The first screenshot shows that APM istelf is still working, while the second screenshot makes it clear that we lost all visibility into Elasticsearch.


Within a trace, I can see the `http.client.request` to ES, but again, no visibility into ES as a service.
### Which version of dd-trace-py are you using?
0.48.0
### Which version of pip are you using?
18.1
### Which version of the libraries are you using?
```
elasticsearch7==7.12.0
elasticsearch7-dsl==7.2.1
```
### How can we reproduce your problem?
Setup APM on an application that writes to ES with bulk operations.
### What is the result that you get?
No span for the bulk write to ES
### What is the result that you expected?
You should get a span for the bulk write to ES
</issue>
<code>
[start of ddtrace/monkey.py]
1 """Patch libraries to be automatically instrumented.
2
3 It can monkey patch supported standard libraries and third party modules.
4 A patched module will automatically report spans with its default configuration.
5
6 A library instrumentation can be configured (for instance, to report as another service)
7 using Pin. For that, check its documentation.
8 """
9 import importlib
10 import os
11 import sys
12 import threading
13 from typing import Any
14 from typing import Callable
15 from typing import Dict
16 from typing import List
17
18 from ddtrace.vendor.wrapt.importer import when_imported
19
20 from .internal.logger import get_logger
21 from .settings import _config as config
22 from .utils import formats
23
24
25 log = get_logger(__name__)
26
27 # Default set of modules to automatically patch or not
28 PATCH_MODULES = {
29 "asyncio": True,
30 "boto": True,
31 "botocore": True,
32 "bottle": False,
33 "cassandra": True,
34 "celery": True,
35 "consul": True,
36 "django": True,
37 "elasticsearch": True,
38 "algoliasearch": True,
39 "futures": True,
40 "grpc": True,
41 "mongoengine": True,
42 "mysql": True,
43 "mysqldb": True,
44 "pymysql": True,
45 "psycopg": True,
46 "pylibmc": True,
47 "pymemcache": True,
48 "pymongo": True,
49 "redis": True,
50 "rediscluster": True,
51 "requests": True,
52 "sanic": True,
53 "sqlalchemy": False, # Prefer DB client instrumentation
54 "sqlite3": True,
55 "aiohttp": True, # requires asyncio (Python 3.4+)
56 "aiopg": True,
57 "aiobotocore": False,
58 "httplib": False,
59 "urllib3": False,
60 "vertica": True,
61 "molten": True,
62 "jinja2": True,
63 "mako": True,
64 "flask": True,
65 "kombu": False,
66 "starlette": True,
67 # Ignore some web framework integrations that might be configured explicitly in code
68 "falcon": False,
69 "pylons": False,
70 "pyramid": False,
71 # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true
72 "logging": config.logs_injection,
73 "pynamodb": True,
74 "pyodbc": True,
75 "fastapi": True,
76 "dogpile_cache": True,
77 }
78
79 _LOCK = threading.Lock()
80 _PATCHED_MODULES = set()
81
82 # Modules which are patched on first use
83 # DEV: These modules are patched when the user first imports them, rather than
84 # explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`
85 # DEV: This ensures we do not patch a module until it is needed
86 # DEV: <contrib name> => <list of module names that trigger a patch>
87 _PATCH_ON_IMPORT = {
88 "aiohttp": ("aiohttp",),
89 "aiobotocore": ("aiobotocore",),
90 "celery": ("celery",),
91 "flask": ("flask",),
92 "gevent": ("gevent",),
93 "requests": ("requests",),
94 "botocore": ("botocore",),
95 "elasticsearch": (
96 "elasticsearch",
97 "elasticsearch2",
98 "elasticsearch5",
99 "elasticsearch6",
100 "elasticsearch7",
101 ),
102 "pynamodb": ("pynamodb",),
103 }
104
105
106 class PatchException(Exception):
107 """Wraps regular `Exception` class when patching modules"""
108
109 pass
110
111
112 class ModuleNotFoundException(PatchException):
113 pass
114
115
116 def _on_import_factory(module, raise_errors=True):
117 # type: (str, bool) -> Callable[[Any], None]
118 """Factory to create an import hook for the provided module name"""
119
120 def on_import(hook):
121 # Import and patch module
122 path = "ddtrace.contrib.%s" % module
123 try:
124 imported_module = importlib.import_module(path)
125 except ImportError:
126 if raise_errors:
127 raise
128 log.error("failed to import ddtrace module %r when patching on import", path, exc_info=True)
129 else:
130 imported_module.patch()
131
132 return on_import
133
134
135 def patch_all(**patch_modules):
136 # type: (Dict[str, bool]) -> None
137 """Automatically patches all available modules.
138
139 In addition to ``patch_modules``, an override can be specified via an
140 environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.
141
142 ``patch_modules`` have the highest precedence for overriding.
143
144 :param dict patch_modules: Override whether particular modules are patched or not.
145
146 >>> patch_all(redis=False, cassandra=False)
147 """
148 modules = PATCH_MODULES.copy()
149
150 # The enabled setting can be overridden by environment variables
151 for module, enabled in modules.items():
152 env_var = "DD_TRACE_%s_ENABLED" % module.upper()
153 if env_var not in os.environ:
154 continue
155
156 override_enabled = formats.asbool(os.environ[env_var])
157 modules[module] = override_enabled
158
159 # Arguments take precedence over the environment and the defaults.
160 modules.update(patch_modules)
161
162 patch(raise_errors=False, **modules)
163
164
165 def patch(raise_errors=True, **patch_modules):
166 # type: (bool, Dict[str, bool]) -> None
167 """Patch only a set of given modules.
168
169 :param bool raise_errors: Raise error if one patch fail.
170 :param dict patch_modules: List of modules to patch.
171
172 >>> patch(psycopg=True, elasticsearch=True)
173 """
174 modules = [m for (m, should_patch) in patch_modules.items() if should_patch]
175 for module in modules:
176 if module in _PATCH_ON_IMPORT:
177 modules_to_poi = _PATCH_ON_IMPORT[module]
178 for m in modules_to_poi:
179 # If the module has already been imported then patch immediately
180 if m in sys.modules:
181 patch_module(m, raise_errors=raise_errors)
182 # Otherwise, add a hook to patch when it is imported for the first time
183 else:
184 # Use factory to create handler to close over `module` and `raise_errors` values from this loop
185 when_imported(m)(_on_import_factory(module, raise_errors))
186
187 # manually add module to patched modules
188 with _LOCK:
189 _PATCHED_MODULES.add(module)
190 else:
191 patch_module(module, raise_errors=raise_errors)
192
193 patched_modules = get_patched_modules()
194 log.info(
195 "patched %s/%s modules (%s)",
196 len(patched_modules),
197 len(modules),
198 ",".join(patched_modules),
199 )
200
201
202 def patch_module(module, raise_errors=True):
203 # type: (str, bool) -> bool
204 """Patch a single module
205
206 Returns if the module got properly patched.
207 """
208 try:
209 return _patch_module(module)
210 except ModuleNotFoundException:
211 if raise_errors:
212 raise
213 return False
214 except Exception:
215 if raise_errors:
216 raise
217 log.debug("failed to patch %s", module, exc_info=True)
218 return False
219
220
221 def get_patched_modules():
222 # type: () -> List[str]
223 """Get the list of patched modules"""
224 with _LOCK:
225 return sorted(_PATCHED_MODULES)
226
227
228 def _patch_module(module):
229 # type: (str) -> bool
230 """_patch_module will attempt to monkey patch the module.
231
232 Returns if the module got patched.
233 Can also raise errors if it fails.
234 """
235 path = "ddtrace.contrib.%s" % module
236 with _LOCK:
237 if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:
238 log.debug("already patched: %s", path)
239 return False
240
241 try:
242 imported_module = importlib.import_module(path)
243 except ImportError:
244 # if the import fails, the integration is not available
245 raise PatchException("integration '%s' not available" % path)
246 else:
247 # if patch() is not available in the module, it means
248 # that the library is not installed in the environment
249 if not hasattr(imported_module, "patch"):
250 raise ModuleNotFoundException("module '%s' not installed" % module)
251
252 imported_module.patch() # type: ignore
253 _PATCHED_MODULES.add(module)
254 return True
255
[end of ddtrace/monkey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py
--- a/ddtrace/monkey.py
+++ b/ddtrace/monkey.py
@@ -178,7 +178,8 @@
for m in modules_to_poi:
# If the module has already been imported then patch immediately
if m in sys.modules:
- patch_module(m, raise_errors=raise_errors)
+ patch_module(module, raise_errors=raise_errors)
+ break
# Otherwise, add a hook to patch when it is imported for the first time
else:
# Use factory to create handler to close over `module` and `raise_errors` values from this loop
| {"golden_diff": "diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -178,7 +178,8 @@\n for m in modules_to_poi:\n # If the module has already been imported then patch immediately\n if m in sys.modules:\n- patch_module(m, raise_errors=raise_errors)\n+ patch_module(module, raise_errors=raise_errors)\n+ break\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n", "issue": "Lost visibility into bulk Elasticsearch operations with 0.48.0\nWith upgrade to ddtrace 0.48.0, we appear to have lost visibility into Elasticsearch bulk operations.\r\n\r\nHere's the view in APM for our indexer application that writes to ES. We seem to have lost all visibility. The first screenshot shows that APM istelf is still working, while the second screenshot makes it clear that we lost all visibility into Elasticsearch.\r\n\r\n\r\n\r\n\r\n\r\nWithin a trace, I can see the `http.client.request` to ES, but again, no visibility into ES as a service.\r\n\r\n\r\n### Which version of dd-trace-py are you using?\r\n0.48.0\r\n\r\n### Which version of pip are you using?\r\n18.1\r\n\r\n### Which version of the libraries are you using?\r\n```\r\nelasticsearch7==7.12.0\r\nelasticsearch7-dsl==7.2.1\r\n```\r\n\r\n### How can we reproduce your problem?\r\nSetup APM on an application that writes to ES with bulk operations.\r\n\r\n### What is the result that you get?\r\nNo span for the bulk write to ES\r\n\r\n### What is the result that you expected?\r\nYou should get a span for the bulk write to ES\n", "before_files": [{"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import _config as config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": True,\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"urllib3\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n \"dogpile_cache\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask\",),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\n \"elasticsearch\",\n \"elasticsearch2\",\n \"elasticsearch5\",\n \"elasticsearch6\",\n \"elasticsearch7\",\n ),\n \"pynamodb\": (\"pynamodb\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n # type: (str, bool) -> Callable[[Any], None]\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n if raise_errors:\n raise\n log.error(\"failed to import ddtrace module %r when patching on import\", path, exc_info=True)\n else:\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n # type: (Dict[str, bool]) -> None\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n # type: (bool, Dict[str, bool]) -> None\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n modules_to_poi = _PATCH_ON_IMPORT[module]\n for m in modules_to_poi:\n # If the module has already been imported then patch immediately\n if m in sys.modules:\n patch_module(m, raise_errors=raise_errors)\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(m)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n # type: (str, bool) -> bool\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n # type: () -> List[str]\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n # type: (str) -> bool\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch() # type: ignore\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]} | 3,409 | 147 |
gh_patches_debug_19921 | rasdani/github-patches | git_diff | cal-itp__benefits-35 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update favicon
current dev has old Cal-ITP Logo as favicon.
</issue>
<code>
[start of benefits/discounts/views.py]
1 """
2 The discounts application: view definitions for the discounts association flow.
3 """
4 import logging
5
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.decorators import decorator_from_middleware
9 from django.utils.translation import pgettext, ugettext as _
10
11 from benefits.core import middleware, models, session, viewmodels
12 from benefits.core.views import PageTemplateResponse
13 from . import api, forms
14
15
16 logger = logging.getLogger(__name__)
17
18
19 def _check_access_token(request, agency):
20 """
21 Ensure the request's session is configured with an access token.
22 """
23 if not session.valid_token(request):
24 response = api.AccessTokenClient(agency).get()
25 session.update(request, token=response.access_token, token_exp=response.expiry)
26
27
28 def _index(request):
29 """Helper handles GET requests to discounts index."""
30 agency = session.agency(request)
31
32 _check_access_token(request, agency)
33
34 tokenize_button = "tokenize_card"
35 tokenize_retry_form = forms.CardTokenizeFailForm("discounts:retry")
36 tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix="")
37
38 page = viewmodels.Page(
39 title=_("discounts.index.title"),
40 content_title=_("discounts.index.content_title"),
41 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
42 paragraphs=[_("discounts.index.p1"), _("discounts.index.p2")],
43 classes="text-lg-center",
44 forms=[tokenize_retry_form, tokenize_success_form],
45 buttons=[
46 viewmodels.Button.primary(
47 text=_("discounts.buttons.paymentpartner"), id=tokenize_button, url=f"#{tokenize_button}"
48 ),
49 viewmodels.Button.link(
50 classes="btn-sm", text=_("discounts.buttons.paymentoptions"), url=reverse("core:payment_options")
51 ),
52 ],
53 )
54 context = page.context_dict()
55
56 # add agency details
57 agency_vm = viewmodels.TransitAgency(agency)
58 context.update(agency_vm.context_dict())
59
60 # and discount provider details
61 provider_vm = viewmodels.DiscountProvider(
62 model=agency.discount_provider,
63 access_token=session.token(request),
64 element_id=f"#{tokenize_button}",
65 color="#046b99",
66 name=f"{agency.long_name} {_('partnered with')} {agency.discount_provider.name}",
67 )
68 context.update(provider_vm.context_dict())
69 logger.info(f"card_tokenize_url: {context['provider'].card_tokenize_url}")
70
71 # the tokenize form URLs are injected to page-generated Javascript
72 context.update(
73 {
74 "forms": {
75 "tokenize_retry": reverse(tokenize_retry_form.action_url),
76 "tokenize_success": reverse(tokenize_success_form.action_url),
77 }
78 }
79 )
80
81 return TemplateResponse(request, "discounts/index.html", context)
82
83
84 @decorator_from_middleware(middleware.AgencySessionRequired)
85 def index(request):
86 """View handler for the discounts landing page."""
87 if request.method == "POST":
88 response = _associate_discount(request)
89 else:
90 response = _index(request)
91
92 return response
93
94
95 def _associate_discount(request):
96 """Helper calls the discount APIs."""
97 logger.debug("Read tokenized card")
98 form = forms.CardTokenizeSuccessForm(request.POST)
99 if not form.is_valid():
100 raise Exception("Invalid card token form")
101 card_token = form.cleaned_data.get("card_token")
102
103 eligibility = session.eligibility(request)
104 if len(eligibility) > 0:
105 eligibility = eligibility[0]
106 if len(eligibility) == 1:
107 logger.debug(f"Session contains 1 {models.EligibilityType.__name__}")
108 else:
109 logger.debug(f"Session contains ({len(eligibility)}) {models.EligibilityType.__name__}s")
110 else:
111 raise Exception("Session contains no eligibility information")
112
113 agency = session.agency(request)
114
115 logger.debug("Call customer API")
116 response = api.CustomerClient(agency).get(card_token)
117 customer_id = response.id
118
119 logger.debug("Call group API")
120 eligibility = models.EligibilityType.by_name(eligibility)
121 response = api.GroupClient(agency).enroll_customer(customer_id, eligibility.group_id)
122 if response.updated_customer_id == customer_id:
123 return success(request)
124 else:
125 raise Exception("Updated customer_id does not match enrolled customer_id")
126
127
128 @decorator_from_middleware(middleware.AgencySessionRequired)
129 def retry(request):
130 """View handler for a recoverable failure condition."""
131 if request.method == "POST":
132 form = forms.CardTokenizeFailForm(request.POST)
133 if form.is_valid():
134 agency = session.agency(request)
135 page = viewmodels.Page(
136 title=_("discounts.retry.title"),
137 icon=viewmodels.Icon("bankcardquestion", pgettext("image alt text", "core.icons.bankcardquestion")),
138 content_title=_("discounts.retry.title"),
139 paragraphs=[_("discounts.retry.p1")],
140 buttons=[
141 viewmodels.Button.agency_phone_link(agency),
142 viewmodels.Button.primary(text=_("discounts.retry.button"), url=session.origin(request)),
143 ],
144 )
145 return PageTemplateResponse(request, page)
146 else:
147 raise Exception("Invalid retry submission.")
148 else:
149 raise Exception("This view method only supports POST.")
150
151
152 def success(request):
153 """View handler for the final success page."""
154 page = viewmodels.Page(
155 title=_("discounts.success.title"),
156 icon=viewmodels.Icon("paymentcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
157 content_title=_("discounts.success.title"),
158 paragraphs=[_("discounts.success.p1"), _("discounts.success.p2")],
159 )
160
161 return TemplateResponse(request, "discounts/success.html", page.context_dict())
162
[end of benefits/discounts/views.py]
[start of benefits/core/views.py]
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.template import loader
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.translation import pgettext, ugettext as _
9
10 from . import models, session, viewmodels
11
12
13 def PageTemplateResponse(request, page_vm):
14 """Helper returns a TemplateResponse using the common page template."""
15 return TemplateResponse(request, "core/page.html", page_vm.context_dict())
16
17
18 def _index_content_title():
19 """Helper returns the content title for the common index page."""
20 return _("core.index.content_title")
21
22
23 def _index_image():
24 """Helper returns a viewmodels.Image for the common index page."""
25 return viewmodels.Image("riderboardingbusandtapping.svg", pgettext("image alt text", "core.index.image"))
26
27
28 def _index_paragraphs():
29 """Helper returns the content paragraphs for the common index page."""
30 return [_("core.index.p1"), _("core.index.p2"), _("core.index.p3")]
31
32
33 def _index_url():
34 """Helper computes the index url path."""
35 return reverse("core:index")
36
37
38 def index(request):
39 """View handler for the main entry page."""
40 session.reset(request)
41
42 # generate a button to the landing page for each active agency
43 agencies = models.TransitAgency.all_active()
44 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
45 buttons[0].classes.append("mt-3")
46 buttons[0].label = _("core.index.chooseprovider")
47
48 page = viewmodels.Page(
49 content_title=_index_content_title(),
50 paragraphs=_index_paragraphs(),
51 image=_index_image(),
52 buttons=buttons,
53 classes="home",
54 )
55
56 return PageTemplateResponse(request, page)
57
58
59 def agency_index(request, agency):
60 """View handler for an agency entry page."""
61 session.reset(request)
62 session.update(request, agency=agency, origin=agency.index_url)
63
64 page = viewmodels.Page(
65 content_title=_index_content_title(),
66 paragraphs=_index_paragraphs(),
67 image=_index_image(),
68 button=viewmodels.Button.primary(text=_("core.index.continue"), url=reverse("eligibility:index")),
69 classes="home",
70 )
71
72 return PageTemplateResponse(request, page)
73
74
75 def help(request):
76 """View handler for the help page."""
77 if session.active_agency(request):
78 agency = session.agency(request)
79 buttons = [viewmodels.Button.agency_phone_link(agency)]
80 else:
81 buttons = [viewmodels.Button.agency_phone_link(a) for a in models.TransitAgency.all_active()]
82
83 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
84
85 page = viewmodels.Page(
86 title=_("core.help"),
87 content_title=_("core.help"),
88 paragraphs=[_("core.help.p1"), _("core.help.p2")],
89 buttons=buttons,
90 classes="text-lg-center",
91 )
92
93 return TemplateResponse(request, "core/help.html", page.context_dict())
94
95
96 def payment_options(request):
97 """View handler for the Payment Options page."""
98 page = viewmodels.Page(
99 title=_("core.payment-options"),
100 icon=viewmodels.Icon("bankcard", pgettext("image alt text", "core.icons.bankcard")),
101 content_title=_("core.payment-options"),
102 buttons=viewmodels.Button.home(request, text=_("core.buttons.back")),
103 )
104
105 return TemplateResponse(request, "core/payment-options.html", page.context_dict())
106
107
108 def bad_request(request, exception, template_name="400.html"):
109 """View handler for HTTP 400 Bad Request responses."""
110 if session.active_agency(request):
111 session.update(request, origin=session.agency(request).index_url)
112 else:
113 session.update(request, origin=_index_url())
114
115 home = viewmodels.Button.home(request)
116 page = viewmodels.ErrorPage.error(button=home)
117 t = loader.get_template(template_name)
118
119 return HttpResponseBadRequest(t.render(page.context_dict()))
120
121
122 def page_not_found(request, exception, template_name="404.html"):
123 """View handler for HTTP 404 Not Found responses."""
124 if session.active_agency(request):
125 session.update(request, origin=session.agency(request).index_url)
126 else:
127 session.update(request, origin=_index_url())
128
129 home = viewmodels.Button.home(request)
130 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
131 t = loader.get_template(template_name)
132
133 return HttpResponseNotFound(t.render(page.context_dict()))
134
135
136 def server_error(request, template_name="500.html"):
137 """View handler for HTTP 500 Server Error responses."""
138 if session.active_agency(request):
139 session.update(request, origin=session.agency(request).index_url)
140 else:
141 session.update(request, origin=_index_url())
142
143 home = viewmodels.Button.home(request)
144 page = viewmodels.ErrorPage.error(button=home)
145 t = loader.get_template(template_name)
146
147 return HttpResponseServerError(t.render(page.context_dict()))
148
[end of benefits/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -22,7 +22,7 @@
def _index_image():
"""Helper returns a viewmodels.Image for the common index page."""
- return viewmodels.Image("riderboardingbusandtapping.svg", pgettext("image alt text", "core.index.image"))
+ return viewmodels.Image("ridertappingbankcard.png", pgettext("image alt text", "core.index.image"))
def _index_paragraphs():
diff --git a/benefits/discounts/views.py b/benefits/discounts/views.py
--- a/benefits/discounts/views.py
+++ b/benefits/discounts/views.py
@@ -153,7 +153,7 @@
"""View handler for the final success page."""
page = viewmodels.Page(
title=_("discounts.success.title"),
- icon=viewmodels.Icon("paymentcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
+ icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
content_title=_("discounts.success.title"),
paragraphs=[_("discounts.success.p1"), _("discounts.success.p2")],
)
| {"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -22,7 +22,7 @@\n \n def _index_image():\n \"\"\"Helper returns a viewmodels.Image for the common index page.\"\"\"\n- return viewmodels.Image(\"riderboardingbusandtapping.svg\", pgettext(\"image alt text\", \"core.index.image\"))\n+ return viewmodels.Image(\"ridertappingbankcard.png\", pgettext(\"image alt text\", \"core.index.image\"))\n \n \n def _index_paragraphs():\ndiff --git a/benefits/discounts/views.py b/benefits/discounts/views.py\n--- a/benefits/discounts/views.py\n+++ b/benefits/discounts/views.py\n@@ -153,7 +153,7 @@\n \"\"\"View handler for the final success page.\"\"\"\n page = viewmodels.Page(\n title=_(\"discounts.success.title\"),\n- icon=viewmodels.Icon(\"paymentcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n+ icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n content_title=_(\"discounts.success.title\"),\n paragraphs=[_(\"discounts.success.p1\"), _(\"discounts.success.p2\")],\n )\n", "issue": "update favicon\ncurrent dev has old Cal-ITP Logo as favicon. \n", "before_files": [{"content": "\"\"\"\nThe discounts application: view definitions for the discounts association flow.\n\"\"\"\nimport logging\n\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, ugettext as _\n\nfrom benefits.core import middleware, models, session, viewmodels\nfrom benefits.core.views import PageTemplateResponse\nfrom . import api, forms\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_access_token(request, agency):\n \"\"\"\n Ensure the request's session is configured with an access token.\n \"\"\"\n if not session.valid_token(request):\n response = api.AccessTokenClient(agency).get()\n session.update(request, token=response.access_token, token_exp=response.expiry)\n\n\ndef _index(request):\n \"\"\"Helper handles GET requests to discounts index.\"\"\"\n agency = session.agency(request)\n\n _check_access_token(request, agency)\n\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(\"discounts:retry\")\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n page = viewmodels.Page(\n title=_(\"discounts.index.title\"),\n content_title=_(\"discounts.index.content_title\"),\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n paragraphs=[_(\"discounts.index.p1\"), _(\"discounts.index.p2\")],\n classes=\"text-lg-center\",\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"discounts.buttons.paymentpartner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n viewmodels.Button.link(\n classes=\"btn-sm\", text=_(\"discounts.buttons.paymentoptions\"), url=reverse(\"core:payment_options\")\n ),\n ],\n )\n context = page.context_dict()\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and discount provider details\n provider_vm = viewmodels.DiscountProvider(\n model=agency.discount_provider,\n access_token=session.token(request),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.discount_provider.name}\",\n )\n context.update(provider_vm.context_dict())\n logger.info(f\"card_tokenize_url: {context['provider'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context.update(\n {\n \"forms\": {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n }\n )\n\n return TemplateResponse(request, \"discounts/index.html\", context)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the discounts landing page.\"\"\"\n if request.method == \"POST\":\n response = _associate_discount(request)\n else:\n response = _index(request)\n\n return response\n\n\ndef _associate_discount(request):\n \"\"\"Helper calls the discount APIs.\"\"\"\n logger.debug(\"Read tokenized card\")\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n eligibility = session.eligibility(request)\n if len(eligibility) > 0:\n eligibility = eligibility[0]\n if len(eligibility) == 1:\n logger.debug(f\"Session contains 1 {models.EligibilityType.__name__}\")\n else:\n logger.debug(f\"Session contains ({len(eligibility)}) {models.EligibilityType.__name__}s\")\n else:\n raise Exception(\"Session contains no eligibility information\")\n\n agency = session.agency(request)\n\n logger.debug(\"Call customer API\")\n response = api.CustomerClient(agency).get(card_token)\n customer_id = response.id\n\n logger.debug(\"Call group API\")\n eligibility = models.EligibilityType.by_name(eligibility)\n response = api.GroupClient(agency).enroll_customer(customer_id, eligibility.group_id)\n if response.updated_customer_id == customer_id:\n return success(request)\n else:\n raise Exception(\"Updated customer_id does not match enrolled customer_id\")\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n title=_(\"discounts.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n content_title=_(\"discounts.retry.title\"),\n paragraphs=[_(\"discounts.retry.p1\")],\n buttons=[\n viewmodels.Button.agency_phone_link(agency),\n viewmodels.Button.primary(text=_(\"discounts.retry.button\"), url=session.origin(request)),\n ],\n )\n return PageTemplateResponse(request, page)\n else:\n raise Exception(\"Invalid retry submission.\")\n else:\n raise Exception(\"This view method only supports POST.\")\n\n\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n page = viewmodels.Page(\n title=_(\"discounts.success.title\"),\n icon=viewmodels.Icon(\"paymentcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n content_title=_(\"discounts.success.title\"),\n paragraphs=[_(\"discounts.success.p1\"), _(\"discounts.success.p2\")],\n )\n\n return TemplateResponse(request, \"discounts/success.html\", page.context_dict())\n", "path": "benefits/discounts/views.py"}, {"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, ugettext as _\n\nfrom . import models, session, viewmodels\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.index.content_title\")\n\n\ndef _index_image():\n \"\"\"Helper returns a viewmodels.Image for the common index page.\"\"\"\n return viewmodels.Image(\"riderboardingbusandtapping.svg\", pgettext(\"image alt text\", \"core.index.image\"))\n\n\ndef _index_paragraphs():\n \"\"\"Helper returns the content paragraphs for the common index page.\"\"\"\n return [_(\"core.index.p1\"), _(\"core.index.p2\"), _(\"core.index.p3\")]\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n # generate a button to the landing page for each active agency\n agencies = models.TransitAgency.all_active()\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.index.chooseprovider\")\n\n page = viewmodels.Page(\n content_title=_index_content_title(),\n paragraphs=_index_paragraphs(),\n image=_index_image(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n page = viewmodels.Page(\n content_title=_index_content_title(),\n paragraphs=_index_paragraphs(),\n image=_index_image(),\n button=viewmodels.Button.primary(text=_(\"core.index.continue\"), url=reverse(\"eligibility:index\")),\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = [viewmodels.Button.agency_phone_link(agency)]\n else:\n buttons = [viewmodels.Button.agency_phone_link(a) for a in models.TransitAgency.all_active()]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.help\"),\n content_title=_(\"core.help\"),\n paragraphs=[_(\"core.help.p1\"), _(\"core.help.p2\")],\n buttons=buttons,\n classes=\"text-lg-center\",\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\ndef payment_options(request):\n \"\"\"View handler for the Payment Options page.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.payment-options\"),\n icon=viewmodels.Icon(\"bankcard\", pgettext(\"image alt text\", \"core.icons.bankcard\")),\n content_title=_(\"core.payment-options\"),\n buttons=viewmodels.Button.home(request, text=_(\"core.buttons.back\")),\n )\n\n return TemplateResponse(request, \"core/payment-options.html\", page.context_dict())\n\n\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]} | 3,594 | 287 |
gh_patches_debug_11297 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1800 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GLMeshItem memory leak in drawing of faces
### Short description
When rendering faces, there appears to be a memory leak relating to line 199 and 200 of GLMeshItem.py
### Code to reproduce
This can be reproduced using the GLMeshItem example script. Using top to check memory usage, the amount used by the process slowly increases as updates are called in the scene (triggered by rotating the view, etc.). If there are no changes the memory usage is constant, but otherwise will grow without bound (albeit slowly in the example). Larger meshes with cause faster accumulation.
### Expected behavior
Near-constant memory usage when rendering the faces in the scene.
### Real behavior
Increasing memory usage when rendering faces.
### Tested environment(s)
* PyQtGraph version: tested on both 0.11.0 and 0.12.1
* Qt Python binding: 5.15.4
* Python version: 3.8
* NumPy version: 1.17.3
* Operating system: Ubuntu 20.04
* Installation method: pip
### Additional context
changing astype on faces from np.uint to np.uint32 or np.uintc on line 199 of pyqtgraph/opengl/items/GLMeshItem.py resolves the issue for me, but I don't know if that is the correct solution.
</issue>
<code>
[start of pyqtgraph/opengl/items/GLMeshItem.py]
1 from OpenGL.GL import *
2 from .. GLGraphicsItem import GLGraphicsItem
3 from .. MeshData import MeshData
4 from ...Qt import QtGui
5 from .. import shaders
6 from ... import functions as fn
7 import numpy as np
8
9
10
11 __all__ = ['GLMeshItem']
12
13 class GLMeshItem(GLGraphicsItem):
14 """
15 **Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
16
17 Displays a 3D triangle mesh.
18 """
19 def __init__(self, **kwds):
20 """
21 ============== =====================================================
22 **Arguments:**
23 meshdata MeshData object from which to determine geometry for
24 this item.
25 color Default face color used if no vertex or face colors
26 are specified.
27 edgeColor Default edge color to use if no edge colors are
28 specified in the mesh data.
29 drawEdges If True, a wireframe mesh will be drawn.
30 (default=False)
31 drawFaces If True, mesh faces are drawn. (default=True)
32 shader Name of shader program to use when drawing faces.
33 (None for no shader)
34 smooth If True, normal vectors are computed for each vertex
35 and interpolated within each face.
36 computeNormals If False, then computation of normal vectors is
37 disabled. This can provide a performance boost for
38 meshes that do not make use of normals.
39 ============== =====================================================
40 """
41 self.opts = {
42 'meshdata': None,
43 'color': (1., 1., 1., 1.),
44 'drawEdges': False,
45 'drawFaces': True,
46 'edgeColor': (0.5, 0.5, 0.5, 1.0),
47 'shader': None,
48 'smooth': True,
49 'computeNormals': True,
50 }
51
52 GLGraphicsItem.__init__(self)
53 glopts = kwds.pop('glOptions', 'opaque')
54 self.setGLOptions(glopts)
55 shader = kwds.pop('shader', None)
56 self.setShader(shader)
57
58 self.setMeshData(**kwds)
59
60 ## storage for data compiled from MeshData object
61 self.vertexes = None
62 self.normals = None
63 self.colors = None
64 self.faces = None
65
66 def setShader(self, shader):
67 """Set the shader used when rendering faces in the mesh. (see the GL shaders example)"""
68 self.opts['shader'] = shader
69 self.update()
70
71 def shader(self):
72 shader = self.opts['shader']
73 if isinstance(shader, shaders.ShaderProgram):
74 return shader
75 else:
76 return shaders.getShaderProgram(shader)
77
78 def setColor(self, c):
79 """Set the default color to use when no vertex or face colors are specified."""
80 self.opts['color'] = c
81 self.update()
82
83 def setMeshData(self, **kwds):
84 """
85 Set mesh data for this item. This can be invoked two ways:
86
87 1. Specify *meshdata* argument with a new MeshData object
88 2. Specify keyword arguments to be passed to MeshData(..) to create a new instance.
89 """
90 md = kwds.get('meshdata', None)
91 if md is None:
92 opts = {}
93 for k in ['vertexes', 'faces', 'edges', 'vertexColors', 'faceColors']:
94 try:
95 opts[k] = kwds.pop(k)
96 except KeyError:
97 pass
98 md = MeshData(**opts)
99
100 self.opts['meshdata'] = md
101 self.opts.update(kwds)
102 self.meshDataChanged()
103 self.update()
104
105
106 def meshDataChanged(self):
107 """
108 This method must be called to inform the item that the MeshData object
109 has been altered.
110 """
111
112 self.vertexes = None
113 self.faces = None
114 self.normals = None
115 self.colors = None
116 self.edges = None
117 self.edgeColors = None
118 self.update()
119
120 def parseMeshData(self):
121 ## interpret vertex / normal data before drawing
122 ## This can:
123 ## - automatically generate normals if they were not specified
124 ## - pull vertexes/noormals/faces from MeshData if that was specified
125
126 if self.vertexes is not None and self.normals is not None:
127 return
128 #if self.opts['normals'] is None:
129 #if self.opts['meshdata'] is None:
130 #self.opts['meshdata'] = MeshData(vertexes=self.opts['vertexes'], faces=self.opts['faces'])
131 if self.opts['meshdata'] is not None:
132 md = self.opts['meshdata']
133 if self.opts['smooth'] and not md.hasFaceIndexedData():
134 self.vertexes = md.vertexes()
135 if self.opts['computeNormals']:
136 self.normals = md.vertexNormals()
137 self.faces = md.faces()
138 if md.hasVertexColor():
139 self.colors = md.vertexColors()
140 if md.hasFaceColor():
141 self.colors = md.faceColors()
142 else:
143 self.vertexes = md.vertexes(indexed='faces')
144 if self.opts['computeNormals']:
145 if self.opts['smooth']:
146 self.normals = md.vertexNormals(indexed='faces')
147 else:
148 self.normals = md.faceNormals(indexed='faces')
149 self.faces = None
150 if md.hasVertexColor():
151 self.colors = md.vertexColors(indexed='faces')
152 elif md.hasFaceColor():
153 self.colors = md.faceColors(indexed='faces')
154
155 if self.opts['drawEdges']:
156 if not md.hasFaceIndexedData():
157 self.edges = md.edges()
158 self.edgeVerts = md.vertexes()
159 else:
160 self.edges = md.edges()
161 self.edgeVerts = md.vertexes(indexed='faces')
162 return
163
164 def paint(self):
165 self.setupGLState()
166
167 self.parseMeshData()
168
169 if self.opts['drawFaces']:
170 with self.shader():
171 verts = self.vertexes
172 norms = self.normals
173 color = self.colors
174 faces = self.faces
175 if verts is None:
176 return
177 glEnableClientState(GL_VERTEX_ARRAY)
178 try:
179 glVertexPointerf(verts)
180
181 if self.colors is None:
182 color = self.opts['color']
183 if isinstance(color, QtGui.QColor):
184 glColor4f(*fn.glColor(color))
185 else:
186 glColor4f(*color)
187 else:
188 glEnableClientState(GL_COLOR_ARRAY)
189 glColorPointerf(color)
190
191
192 if norms is not None:
193 glEnableClientState(GL_NORMAL_ARRAY)
194 glNormalPointerf(norms)
195
196 if faces is None:
197 glDrawArrays(GL_TRIANGLES, 0, np.product(verts.shape[:-1]))
198 else:
199 faces = faces.astype(np.uint).flatten()
200 glDrawElements(GL_TRIANGLES, faces.shape[0], GL_UNSIGNED_INT, faces)
201 finally:
202 glDisableClientState(GL_NORMAL_ARRAY)
203 glDisableClientState(GL_VERTEX_ARRAY)
204 glDisableClientState(GL_COLOR_ARRAY)
205
206 if self.opts['drawEdges']:
207 verts = self.edgeVerts
208 edges = self.edges
209 glEnableClientState(GL_VERTEX_ARRAY)
210 try:
211 glVertexPointerf(verts)
212
213 if self.edgeColors is None:
214 color = self.opts['edgeColor']
215 if isinstance(color, QtGui.QColor):
216 glColor4f(*fn.glColor(color))
217 else:
218 glColor4f(*color)
219 else:
220 glEnableClientState(GL_COLOR_ARRAY)
221 glColorPointerf(color)
222 edges = edges.flatten()
223 glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT, edges)
224 finally:
225 glDisableClientState(GL_VERTEX_ARRAY)
226 glDisableClientState(GL_COLOR_ARRAY)
227
228
[end of pyqtgraph/opengl/items/GLMeshItem.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/opengl/items/GLMeshItem.py b/pyqtgraph/opengl/items/GLMeshItem.py
--- a/pyqtgraph/opengl/items/GLMeshItem.py
+++ b/pyqtgraph/opengl/items/GLMeshItem.py
@@ -196,7 +196,7 @@
if faces is None:
glDrawArrays(GL_TRIANGLES, 0, np.product(verts.shape[:-1]))
else:
- faces = faces.astype(np.uint).flatten()
+ faces = faces.astype(np.uint32).flatten()
glDrawElements(GL_TRIANGLES, faces.shape[0], GL_UNSIGNED_INT, faces)
finally:
glDisableClientState(GL_NORMAL_ARRAY)
| {"golden_diff": "diff --git a/pyqtgraph/opengl/items/GLMeshItem.py b/pyqtgraph/opengl/items/GLMeshItem.py\n--- a/pyqtgraph/opengl/items/GLMeshItem.py\n+++ b/pyqtgraph/opengl/items/GLMeshItem.py\n@@ -196,7 +196,7 @@\n if faces is None:\n glDrawArrays(GL_TRIANGLES, 0, np.product(verts.shape[:-1]))\n else:\n- faces = faces.astype(np.uint).flatten()\n+ faces = faces.astype(np.uint32).flatten()\n glDrawElements(GL_TRIANGLES, faces.shape[0], GL_UNSIGNED_INT, faces)\n finally:\n glDisableClientState(GL_NORMAL_ARRAY)\n", "issue": "GLMeshItem memory leak in drawing of faces\n### Short description\r\nWhen rendering faces, there appears to be a memory leak relating to line 199 and 200 of GLMeshItem.py\r\n\r\n### Code to reproduce\r\nThis can be reproduced using the GLMeshItem example script. Using top to check memory usage, the amount used by the process slowly increases as updates are called in the scene (triggered by rotating the view, etc.). If there are no changes the memory usage is constant, but otherwise will grow without bound (albeit slowly in the example). Larger meshes with cause faster accumulation.\r\n\r\n### Expected behavior\r\nNear-constant memory usage when rendering the faces in the scene.\r\n\r\n### Real behavior\r\nIncreasing memory usage when rendering faces.\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: tested on both 0.11.0 and 0.12.1\r\n * Qt Python binding: 5.15.4\r\n * Python version: 3.8\r\n * NumPy version: 1.17.3\r\n * Operating system: Ubuntu 20.04\r\n * Installation method: pip\r\n\r\n### Additional context\r\nchanging astype on faces from np.uint to np.uint32 or np.uintc on line 199 of pyqtgraph/opengl/items/GLMeshItem.py resolves the issue for me, but I don't know if that is the correct solution.\r\n\n", "before_files": [{"content": "from OpenGL.GL import *\nfrom .. GLGraphicsItem import GLGraphicsItem\nfrom .. MeshData import MeshData\nfrom ...Qt import QtGui\nfrom .. import shaders\nfrom ... import functions as fn\nimport numpy as np\n\n\n\n__all__ = ['GLMeshItem']\n\nclass GLMeshItem(GLGraphicsItem):\n \"\"\"\n **Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`\n \n Displays a 3D triangle mesh. \n \"\"\"\n def __init__(self, **kwds):\n \"\"\"\n ============== =====================================================\n **Arguments:**\n meshdata MeshData object from which to determine geometry for \n this item.\n color Default face color used if no vertex or face colors \n are specified.\n edgeColor Default edge color to use if no edge colors are\n specified in the mesh data.\n drawEdges If True, a wireframe mesh will be drawn. \n (default=False)\n drawFaces If True, mesh faces are drawn. (default=True)\n shader Name of shader program to use when drawing faces.\n (None for no shader)\n smooth If True, normal vectors are computed for each vertex\n and interpolated within each face.\n computeNormals If False, then computation of normal vectors is \n disabled. This can provide a performance boost for \n meshes that do not make use of normals.\n ============== =====================================================\n \"\"\"\n self.opts = {\n 'meshdata': None,\n 'color': (1., 1., 1., 1.),\n 'drawEdges': False,\n 'drawFaces': True,\n 'edgeColor': (0.5, 0.5, 0.5, 1.0),\n 'shader': None,\n 'smooth': True,\n 'computeNormals': True,\n }\n \n GLGraphicsItem.__init__(self)\n glopts = kwds.pop('glOptions', 'opaque')\n self.setGLOptions(glopts)\n shader = kwds.pop('shader', None)\n self.setShader(shader)\n \n self.setMeshData(**kwds)\n \n ## storage for data compiled from MeshData object\n self.vertexes = None\n self.normals = None\n self.colors = None\n self.faces = None\n \n def setShader(self, shader):\n \"\"\"Set the shader used when rendering faces in the mesh. (see the GL shaders example)\"\"\"\n self.opts['shader'] = shader\n self.update()\n \n def shader(self):\n shader = self.opts['shader']\n if isinstance(shader, shaders.ShaderProgram):\n return shader\n else:\n return shaders.getShaderProgram(shader)\n \n def setColor(self, c):\n \"\"\"Set the default color to use when no vertex or face colors are specified.\"\"\"\n self.opts['color'] = c\n self.update()\n \n def setMeshData(self, **kwds):\n \"\"\"\n Set mesh data for this item. This can be invoked two ways:\n \n 1. Specify *meshdata* argument with a new MeshData object\n 2. Specify keyword arguments to be passed to MeshData(..) to create a new instance.\n \"\"\"\n md = kwds.get('meshdata', None)\n if md is None:\n opts = {}\n for k in ['vertexes', 'faces', 'edges', 'vertexColors', 'faceColors']:\n try:\n opts[k] = kwds.pop(k)\n except KeyError:\n pass\n md = MeshData(**opts)\n \n self.opts['meshdata'] = md\n self.opts.update(kwds)\n self.meshDataChanged()\n self.update()\n \n \n def meshDataChanged(self):\n \"\"\"\n This method must be called to inform the item that the MeshData object\n has been altered.\n \"\"\"\n \n self.vertexes = None\n self.faces = None\n self.normals = None\n self.colors = None\n self.edges = None\n self.edgeColors = None\n self.update()\n \n def parseMeshData(self):\n ## interpret vertex / normal data before drawing\n ## This can:\n ## - automatically generate normals if they were not specified\n ## - pull vertexes/noormals/faces from MeshData if that was specified\n \n if self.vertexes is not None and self.normals is not None:\n return\n #if self.opts['normals'] is None:\n #if self.opts['meshdata'] is None:\n #self.opts['meshdata'] = MeshData(vertexes=self.opts['vertexes'], faces=self.opts['faces'])\n if self.opts['meshdata'] is not None:\n md = self.opts['meshdata']\n if self.opts['smooth'] and not md.hasFaceIndexedData():\n self.vertexes = md.vertexes()\n if self.opts['computeNormals']:\n self.normals = md.vertexNormals()\n self.faces = md.faces()\n if md.hasVertexColor():\n self.colors = md.vertexColors()\n if md.hasFaceColor():\n self.colors = md.faceColors()\n else:\n self.vertexes = md.vertexes(indexed='faces')\n if self.opts['computeNormals']:\n if self.opts['smooth']:\n self.normals = md.vertexNormals(indexed='faces')\n else:\n self.normals = md.faceNormals(indexed='faces')\n self.faces = None\n if md.hasVertexColor():\n self.colors = md.vertexColors(indexed='faces')\n elif md.hasFaceColor():\n self.colors = md.faceColors(indexed='faces')\n \n if self.opts['drawEdges']:\n if not md.hasFaceIndexedData():\n self.edges = md.edges()\n self.edgeVerts = md.vertexes()\n else:\n self.edges = md.edges()\n self.edgeVerts = md.vertexes(indexed='faces')\n return\n \n def paint(self):\n self.setupGLState()\n \n self.parseMeshData() \n \n if self.opts['drawFaces']:\n with self.shader():\n verts = self.vertexes\n norms = self.normals\n color = self.colors\n faces = self.faces\n if verts is None:\n return\n glEnableClientState(GL_VERTEX_ARRAY)\n try:\n glVertexPointerf(verts)\n \n if self.colors is None:\n color = self.opts['color']\n if isinstance(color, QtGui.QColor):\n glColor4f(*fn.glColor(color))\n else:\n glColor4f(*color)\n else:\n glEnableClientState(GL_COLOR_ARRAY)\n glColorPointerf(color)\n \n \n if norms is not None:\n glEnableClientState(GL_NORMAL_ARRAY)\n glNormalPointerf(norms)\n \n if faces is None:\n glDrawArrays(GL_TRIANGLES, 0, np.product(verts.shape[:-1]))\n else:\n faces = faces.astype(np.uint).flatten()\n glDrawElements(GL_TRIANGLES, faces.shape[0], GL_UNSIGNED_INT, faces)\n finally:\n glDisableClientState(GL_NORMAL_ARRAY)\n glDisableClientState(GL_VERTEX_ARRAY)\n glDisableClientState(GL_COLOR_ARRAY)\n \n if self.opts['drawEdges']:\n verts = self.edgeVerts\n edges = self.edges\n glEnableClientState(GL_VERTEX_ARRAY)\n try:\n glVertexPointerf(verts)\n \n if self.edgeColors is None:\n color = self.opts['edgeColor']\n if isinstance(color, QtGui.QColor):\n glColor4f(*fn.glColor(color))\n else:\n glColor4f(*color)\n else:\n glEnableClientState(GL_COLOR_ARRAY)\n glColorPointerf(color)\n edges = edges.flatten()\n glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT, edges)\n finally:\n glDisableClientState(GL_VERTEX_ARRAY)\n glDisableClientState(GL_COLOR_ARRAY)\n \n", "path": "pyqtgraph/opengl/items/GLMeshItem.py"}]} | 3,071 | 147 |
gh_patches_debug_15229 | rasdani/github-patches | git_diff | feast-dev__feast-1955 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Solve feast conflict dependencies for [gcp]
## Expected Behavior
`pip-compile` should run without error and result in a nice lock files of the libraries to install
## Current Behavior
`pip-compile` will find conflict in the current feast[gcp] dependencies. Did not try for [aws] or [redis]
<img width="1774" alt="image" src="https://user-images.githubusercontent.com/18557047/135331637-5c3d47ad-ebe0-4a27-b335-93617675027f.png">
## Steps to reproduce
```bash
echo "-e sdk/python[gcp]" > sdk/python/requirements.txt
pip-compile --dry-run sdk/python/requirements.txt
```
<img width="1244" alt="image" src="https://user-images.githubusercontent.com/18557047/135332916-c368ca80-3276-40ab-a3bd-42c48d52c2e9.png">
### Specifications
- Version:
- Platform:
- Subsystem:
## Possible Solution
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import glob
15 import os
16 import re
17 import shutil
18 import subprocess
19 import pathlib
20
21 from distutils.cmd import Command
22 from setuptools import find_packages
23
24 try:
25 from setuptools import setup
26 from setuptools.command.install import install
27 from setuptools.command.develop import develop
28 from setuptools.command.egg_info import egg_info
29 from setuptools.command.sdist import sdist
30 from setuptools.command.build_py import build_py
31 except ImportError:
32 from distutils.core import setup
33 from distutils.command.install import install
34 from distutils.command.build_py import build_py
35
36 NAME = "feast"
37 DESCRIPTION = "Python SDK for Feast"
38 URL = "https://github.com/feast-dev/feast"
39 AUTHOR = "Feast"
40 REQUIRES_PYTHON = ">=3.7.0"
41
42 REQUIRED = [
43 "Click==7.*",
44 "colorama>=0.3.9",
45 "dill==0.3.*",
46 "fastavro>=1.1.0",
47 "google-api-core>=1.23.0",
48 "googleapis-common-protos==1.52.*",
49 "grpcio>=1.34.0",
50 "grpcio-reflection>=1.34.0"
51 "Jinja2>=2.0.0",
52 "jsonschema",
53 "mmh3",
54 "pandas>=1.0.0",
55 "pandavro==1.5.*",
56 "protobuf>=3.10",
57 "pyarrow>=4.0.0",
58 "pydantic>=1.0.0",
59 "PyYAML>=5.4.*",
60 "tabulate==0.8.*",
61 "tenacity>=7.*",
62 "toml==0.10.*",
63 "tqdm==4.*",
64 "fastapi>=0.68.0",
65 "uvicorn[standard]>=0.14.0",
66 ]
67
68 GCP_REQUIRED = [
69 "google-cloud-bigquery>=2.28.1",
70 "google-cloud-bigquery-storage >= 2.0.0",
71 "google-cloud-datastore>=2.1.*",
72 "google-cloud-storage>=1.34.*",
73 "google-cloud-core==1.4.*",
74 ]
75
76 REDIS_REQUIRED = [
77 "redis-py-cluster==2.1.2",
78 ]
79
80 AWS_REQUIRED = [
81 "boto3==1.17.*",
82 "docker>=5.0.2",
83 ]
84
85 CI_REQUIRED = [
86 "cryptography==3.3.2",
87 "flake8",
88 "black==19.10b0",
89 "isort>=5",
90 "grpcio-tools==1.34.0",
91 "grpcio-testing==1.34.0",
92 "minio==7.1.0",
93 "mock==2.0.0",
94 "moto",
95 "mypy==0.790",
96 "mypy-protobuf==1.24",
97 "avro==1.10.0",
98 "gcsfs",
99 "urllib3>=1.25.4",
100 "pytest==6.0.0",
101 "pytest-cov",
102 "pytest-xdist",
103 "pytest-benchmark>=3.4.1",
104 "pytest-lazy-fixture==0.6.3",
105 "pytest-timeout==1.4.2",
106 "pytest-ordering==0.6.*",
107 "pytest-mock==1.10.4",
108 "Sphinx!=4.0.0",
109 "sphinx-rtd-theme",
110 "testcontainers==3.4.2",
111 "adlfs==0.5.9",
112 "firebase-admin==4.5.2",
113 "pre-commit",
114 "assertpy==1.1",
115 "google-cloud-bigquery>=2.28.1",
116 "google-cloud-bigquery-storage >= 2.0.0",
117 "google-cloud-datastore>=2.1.*",
118 "google-cloud-storage>=1.20.*",
119 "google-cloud-core==1.4.*",
120 "redis-py-cluster==2.1.2",
121 "boto3==1.17.*",
122 ]
123
124 # Get git repo root directory
125 repo_root = str(pathlib.Path(__file__).resolve().parent.parent.parent)
126
127 # README file from Feast repo root directory
128 README_FILE = os.path.join(repo_root, "README.md")
129 with open(README_FILE, "r") as f:
130 LONG_DESCRIPTION = f.read()
131
132 # Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
133 # Regex modified from default tag regex in:
134 # https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
135 TAG_REGEX = re.compile(
136 r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
137 )
138
139 # Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)
140 if shutil.which("git"):
141 use_scm_version = {"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX}
142 else:
143 use_scm_version = None
144
145
146 class BuildProtoCommand(Command):
147 description = "Builds the proto files into python files."
148
149 def initialize_options(self):
150 self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
151 self.proto_folder = os.path.join(repo_root, "protos")
152 self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
153 self.sub_folders = ["core", "serving", "types", "storage"]
154
155 def finalize_options(self):
156 pass
157
158 def _generate_protos(self, path):
159 proto_files = glob.glob(os.path.join(self.proto_folder, path))
160
161 subprocess.check_call(self.protoc + [
162 '-I', self.proto_folder,
163 '--python_out', self.this_package,
164 '--grpc_python_out', self.this_package,
165 '--mypy_out', self.this_package] + proto_files)
166
167 def run(self):
168 for sub_folder in self.sub_folders:
169 self._generate_protos(f'feast/{sub_folder}/*.proto')
170
171 from pathlib import Path
172
173 for path in Path('feast/protos').rglob('*.py'):
174 for folder in self.sub_folders:
175 # Read in the file
176 with open(path, 'r') as file:
177 filedata = file.read()
178
179 # Replace the target string
180 filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
181
182 # Write the file out again
183 with open(path, 'w') as file:
184 file.write(filedata)
185
186
187 class BuildCommand(build_py):
188 """Custom build command."""
189
190 def run(self):
191 self.run_command('build_proto')
192 build_py.run(self)
193
194
195 class DevelopCommand(develop):
196 """Custom develop command."""
197
198 def run(self):
199 self.run_command('build_proto')
200 develop.run(self)
201
202
203 setup(
204 name=NAME,
205 author=AUTHOR,
206 description=DESCRIPTION,
207 long_description=LONG_DESCRIPTION,
208 long_description_content_type="text/markdown",
209 python_requires=REQUIRES_PYTHON,
210 url=URL,
211 packages=find_packages(exclude=("tests",)),
212 install_requires=REQUIRED,
213 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
214 # Install dev requirements with: pip install -e .[dev]
215 extras_require={
216 "dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
217 "ci": CI_REQUIRED,
218 "gcp": GCP_REQUIRED,
219 "aws": AWS_REQUIRED,
220 "redis": REDIS_REQUIRED,
221 },
222 include_package_data=True,
223 license="Apache",
224 classifiers=[
225 # Trove classifiers
226 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
227 "License :: OSI Approved :: Apache Software License",
228 "Programming Language :: Python",
229 "Programming Language :: Python :: 3",
230 "Programming Language :: Python :: 3.7",
231 ],
232 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
233 use_scm_version=use_scm_version,
234 setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf", "sphinx!=4.0.0"],
235 package_data={
236 "": [
237 "protos/feast/**/*.proto",
238 "protos/feast/third_party/grpc/health/v1/*.proto",
239 "protos/tensorflow_metadata/proto/v0/*.proto",
240 "feast/protos/feast/**/*.py",
241 "tensorflow_metadata/proto/v0/*.py"
242 ],
243 },
244 cmdclass={
245 "build_proto": BuildProtoCommand,
246 "build_py": BuildCommand,
247 "develop": DevelopCommand,
248 },
249 )
250
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -69,7 +69,7 @@
"google-cloud-bigquery>=2.28.1",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
- "google-cloud-storage>=1.34.*",
+ "google-cloud-storage>=1.34.*,<1.41",
"google-cloud-core==1.4.*",
]
@@ -115,7 +115,7 @@
"google-cloud-bigquery>=2.28.1",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
- "google-cloud-storage>=1.20.*",
+ "google-cloud-storage>=1.20.*,<1.41",
"google-cloud-core==1.4.*",
"redis-py-cluster==2.1.2",
"boto3==1.17.*",
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -69,7 +69,7 @@\n \"google-cloud-bigquery>=2.28.1\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n- \"google-cloud-storage>=1.34.*\",\n+ \"google-cloud-storage>=1.34.*,<1.41\",\n \"google-cloud-core==1.4.*\",\n ]\n \n@@ -115,7 +115,7 @@\n \"google-cloud-bigquery>=2.28.1\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n- \"google-cloud-storage>=1.20.*\",\n+ \"google-cloud-storage>=1.20.*,<1.41\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n", "issue": "Solve feast conflict dependencies for [gcp]\n## Expected Behavior \r\n`pip-compile` should run without error and result in a nice lock files of the libraries to install\r\n\r\n## Current Behavior\r\n`pip-compile` will find conflict in the current feast[gcp] dependencies. Did not try for [aws] or [redis]\r\n<img width=\"1774\" alt=\"image\" src=\"https://user-images.githubusercontent.com/18557047/135331637-5c3d47ad-ebe0-4a27-b335-93617675027f.png\">\r\n\r\n## Steps to reproduce\r\n```bash\r\necho \"-e sdk/python[gcp]\" > sdk/python/requirements.txt\r\npip-compile --dry-run sdk/python/requirements.txt\r\n```\r\n<img width=\"1244\" alt=\"image\" src=\"https://user-images.githubusercontent.com/18557047/135332916-c368ca80-3276-40ab-a3bd-42c48d52c2e9.png\">\r\n\r\n### Specifications\r\n\r\n- Version:\r\n- Platform:\r\n- Subsystem:\r\n\r\n## Possible Solution\r\n\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport shutil\nimport subprocess\nimport pathlib\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"dill==0.3.*\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"grpcio-reflection>=1.34.0\"\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=4.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML>=5.4.*\",\n \"tabulate==0.8.*\",\n \"tenacity>=7.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n \"fastapi>=0.68.0\",\n \"uvicorn[standard]>=0.14.0\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.28.1\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.34.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nAWS_REQUIRED = [\n \"boto3==1.17.*\",\n \"docker>=5.0.2\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"testcontainers==3.4.2\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.28.1\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n \"boto3==1.17.*\",\n]\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent.parent.parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}]} | 3,618 | 247 |
gh_patches_debug_32997 | rasdani/github-patches | git_diff | opendatacube__datacube-core-422 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Masking Tools could be a lot more user friendly
`datacube.storage.masking.describe_variable_flags()` should return an object with not only `__repr__()` but also `_repr_html_()` defined, so that the table of information is legible in a jupyter notebook (like how pandas works).
Current output is about 50 lines illegibly formatted like this:
```
'Flag name | Description | Bit. No |
Value | Meaning \n-------------------+--------------------------------------------------+-------------------------
-----------------------+-------+----------------\nga_good_pixel | Best Quality Pixel |
[13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] | 16383 | True \nblue_saturated | Blue band is saturated
| 0 | 0 | True \n |
| 0 | 1 | False \n-------------------+-------------------------
-------------------------+------------------------------------------------+-------+----------------\ngreen_saturated |
Green band is saturated | 1 | 0 | True
\n | | 1
```
This would also be an opportunity to simplify the masking api. For example, once you retrieve this pretty-displaying object, actually generating the mask could be as simple as reading an attribute/property from the same object.
</issue>
<code>
[start of datacube/storage/masking.py]
1 """
2 Tools for masking data based on a bit-mask variable with attached definition.
3
4 The main functions are `make_mask(variable)` `describe_flags(variable)`
5 """
6 from __future__ import absolute_import
7
8 import collections
9 import warnings
10
11 from datacube.utils import generate_table
12
13 from xarray import DataArray, Dataset
14
15 FLAGS_ATTR_NAME = 'flags_definition'
16
17
18 def list_flag_names(variable):
19 """
20 Returns the available masking flags for the variable
21
22 :param variable: Masking xarray.Dataset or xarray.DataArray
23 :return: list
24 """
25 flags_def = get_flags_def(variable)
26 return sorted(list(flags_def.keys()))
27
28
29 def describe_variable_flags(variable):
30 """
31 Returns a string describing the available flags for a masking variable
32
33 Interprets the `flags_definition` attribute on the provided variable and returns
34 a string like::
35
36 Bits are listed from the MSB (bit 13) to the LSB (bit 0)
37 Bit Value Flag Name Description
38 13 0 cloud_shadow_fmask Cloud Shadow (Fmask)
39 12 0 cloud_shadow_acca Cloud Shadow (ACCA)
40 11 0 cloud_fmask Cloud (Fmask)
41 10 0 cloud_acca Cloud (ACCA)
42
43 :param variable: Masking xarray.Dataset or xarray.DataArray
44 :return: str
45 """
46 flags_def = get_flags_def(variable)
47
48 return describe_flags_def(flags_def)
49
50
51 def describe_flags_def(flags_def):
52 return '\n'.join(generate_table(list(_table_contents(flags_def))))
53
54
55 def _table_contents(flags_def):
56 yield 'Flag name', 'Description', 'Bit. No', 'Value', 'Meaning'
57 for name, defn in sorted(flags_def.items(), key=_order_bitdefs_by_bits):
58 name, desc = name, defn['description']
59 for value, meaning in defn['values'].items():
60 yield name, desc, str(defn['bits']), str(value), str(meaning)
61 name, desc = '', ''
62
63
64 def _order_bitdefs_by_bits(bitdef):
65 name, defn = bitdef
66 try:
67 return min(defn['bits'])
68 except TypeError:
69 return defn['bits']
70
71
72 def make_mask(variable, **flags):
73 """
74 Returns a mask array, based on provided flags
75
76 When multiple flags are provided, they will be combined in a logical AND fashion.
77
78 For example:
79
80 >>> make_mask(pqa, cloud_acca=False, cloud_fmask=False, land_obs=True) # doctest: +SKIP
81
82 OR
83
84 >>> make_mask(pqa, **GOOD_PIXEL_FLAGS) # doctest: +SKIP
85
86 where `GOOD_PIXEL_FLAGS` is a dict of flag_name to True/False
87
88 :param variable:
89 :type variable: xarray.Dataset or xarray.DataArray
90 :param flags: list of boolean flags
91 :return: boolean xarray.DataArray or xarray.Dataset
92 """
93 flags_def = get_flags_def(variable)
94
95 mask, mask_value = create_mask_value(flags_def, **flags)
96
97 return variable & mask == mask_value
98
99
100 def valid_data_mask(data):
101 """
102 Returns bool arrays where the data is not `nodata`
103
104 :param Dataset or DataArray data:
105 :return: Dataset or DataArray
106 """
107 if isinstance(data, Dataset):
108 return data.apply(valid_data_mask)
109
110 if isinstance(data, DataArray):
111 if 'nodata' not in data.attrs:
112 return True
113 return data != data.nodata
114
115 raise TypeError('valid_data_mask not supported for type {}'.format(type(data)))
116
117
118 def mask_valid_data(data, keep_attrs=True):
119 """
120 Deprecated. This function was poorly named. It is now available as `mask_invalid_data`.
121 """
122 warnings.warn("`mask_valid_data` has been renamed to `mask_invalid_data`. Please use that instead.",
123 DeprecationWarning)
124 return mask_invalid_data(data, keep_attrs=keep_attrs)
125
126
127 def mask_invalid_data(data, keep_attrs=True):
128 """
129 Sets all `nodata` values to ``nan``.
130
131 This will convert converts numeric data to type `float`.
132
133 :param Dataset or DataArray data:
134 :param bool keep_attrs: If the attributes of the data should be included in the returned .
135 :return: Dataset or DataArray
136 """
137 if isinstance(data, Dataset):
138 # Pass keep_attrs as a positional arg to the DataArray func
139 return data.apply(mask_invalid_data, keep_attrs=keep_attrs, args=(keep_attrs,))
140
141 if isinstance(data, DataArray):
142 if 'nodata' not in data.attrs:
143 return data
144 out_data_array = data.where(data != data.nodata)
145 if keep_attrs:
146 out_data_array.attrs = data.attrs
147 return out_data_array
148
149 raise TypeError('mask_invalid_data not supported for type {}'.format(type(data)))
150
151
152 def create_mask_value(bits_def, **flags):
153 mask = 0
154 value = 0
155
156 for flag_name, flag_ref in flags.items():
157 defn = bits_def[flag_name]
158
159 try:
160 [flag_value] = (bit_val
161 for bit_val, val_ref in defn['values'].items()
162 if val_ref == flag_ref)
163 flag_value = int(flag_value) # Might be string if coming from DB
164 except ValueError:
165 raise ValueError('Unknown value %s specified for flag %s' %
166 (flag_ref, flag_name))
167
168 if isinstance(defn['bits'], collections.Iterable): # Multi-bit flag
169 # Set mask
170 for bit in defn['bits']:
171 mask = set_value_at_index(mask, bit, True)
172
173 shift = min(defn['bits'])
174 real_val = flag_value << shift
175
176 value |= real_val
177
178 else:
179 bit = defn['bits']
180 mask = set_value_at_index(mask, bit, True)
181 value = set_value_at_index(value, bit, flag_value)
182
183 return mask, value
184
185
186 def mask_to_dict(bits_def, mask_value):
187 """
188 Describes which flags are set for a mask value
189
190 :param bits_def:
191 :param mask_value:
192 :return: Mapping of flag_name -> set_value
193 :rtype: dict
194 """
195 return_dict = {}
196 for flag_name, flag_defn in bits_def.items():
197
198 # Make bits a list, even if there is only one
199 flag_bits = flag_defn['bits']
200 if not isinstance(flag_defn['bits'], list):
201 flag_bits = [flag_bits]
202
203 # The amount to shift flag_value to line up with mask_value
204 flag_shift = min(flag_bits)
205
206 # Mask our mask_value, we are only interested in the bits for this flag
207 flag_mask = 0
208 for i in flag_bits:
209 flag_mask |= (1 << i)
210 masked_mask_value = mask_value & flag_mask
211
212 for flag_value, value in flag_defn['values'].items():
213 shifted_value = int(flag_value) << flag_shift
214 if shifted_value == masked_mask_value:
215 assert flag_name not in return_dict
216 return_dict[flag_name] = value
217 return return_dict
218
219
220 def _get_minimum_bit(bit_or_bits):
221 try:
222 return min(bit_or_bits)
223 except TypeError:
224 return bit_or_bits
225
226
227 def get_flags_def(variable):
228 try:
229 return getattr(variable, FLAGS_ATTR_NAME)
230 except AttributeError:
231 # Maybe we have a DataSet, not a DataArray
232 for var in variable.data_vars.values():
233 if _is_data_var(var):
234 try:
235 return getattr(var, FLAGS_ATTR_NAME)
236 except AttributeError:
237 pass
238
239 raise ValueError('No masking variable found')
240
241
242 def _is_data_var(variable):
243 return variable.name != 'crs' and len(variable.coords) > 1
244
245
246 def set_value_at_index(bitmask, index, value):
247 """
248 Set a bit value onto an integer bitmask
249
250 eg. set bits 2 and 4 to True
251 >>> mask = 0
252 >>> mask = set_value_at_index(mask, 2, True)
253 >>> mask = set_value_at_index(mask, 4, True)
254 >>> print(bin(mask))
255 0b10100
256 >>> mask = set_value_at_index(mask, 2, False)
257 >>> print(bin(mask))
258 0b10000
259
260 :param bitmask: existing int bitmask to alter
261 :type bitmask: int
262 :type index: int
263 :type value: bool
264 """
265 bit_val = 2 ** index
266 if value:
267 bitmask |= bit_val
268 else:
269 bitmask &= (~bit_val)
270 return bitmask
271
[end of datacube/storage/masking.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datacube/storage/masking.py b/datacube/storage/masking.py
--- a/datacube/storage/masking.py
+++ b/datacube/storage/masking.py
@@ -7,6 +7,7 @@
import collections
import warnings
+import pandas
from datacube.utils import generate_table
@@ -26,12 +27,13 @@
return sorted(list(flags_def.keys()))
-def describe_variable_flags(variable):
+def describe_variable_flags(variable, with_pandas=True):
"""
- Returns a string describing the available flags for a masking variable
+ Returns either a Pandas Dataframe (with_pandas=True - default) or a string
+ (with_pandas=False) describing the available flags for a masking variable
- Interprets the `flags_definition` attribute on the provided variable and returns
- a string like::
+ Interprets the `flags_definition` attribute on the provided variable and
+ returns a Pandas Dataframe or string like::
Bits are listed from the MSB (bit 13) to the LSB (bit 0)
Bit Value Flag Name Description
@@ -41,11 +43,14 @@
10 0 cloud_acca Cloud (ACCA)
:param variable: Masking xarray.Dataset or xarray.DataArray
- :return: str
+ :return: Pandas Dataframe or str
"""
flags_def = get_flags_def(variable)
- return describe_flags_def(flags_def)
+ if not with_pandas:
+ return describe_flags_def(flags_def)
+
+ return pandas.DataFrame.from_dict(flags_def, orient='index')
def describe_flags_def(flags_def):
@@ -128,7 +133,7 @@
"""
Sets all `nodata` values to ``nan``.
- This will convert converts numeric data to type `float`.
+ This will convert numeric data to type `float`.
:param Dataset or DataArray data:
:param bool keep_attrs: If the attributes of the data should be included in the returned .
| {"golden_diff": "diff --git a/datacube/storage/masking.py b/datacube/storage/masking.py\n--- a/datacube/storage/masking.py\n+++ b/datacube/storage/masking.py\n@@ -7,6 +7,7 @@\n \n import collections\n import warnings\n+import pandas\n \n from datacube.utils import generate_table\n \n@@ -26,12 +27,13 @@\n return sorted(list(flags_def.keys()))\n \n \n-def describe_variable_flags(variable):\n+def describe_variable_flags(variable, with_pandas=True):\n \"\"\"\n- Returns a string describing the available flags for a masking variable\n+ Returns either a Pandas Dataframe (with_pandas=True - default) or a string\n+ (with_pandas=False) describing the available flags for a masking variable\n \n- Interprets the `flags_definition` attribute on the provided variable and returns\n- a string like::\n+ Interprets the `flags_definition` attribute on the provided variable and\n+ returns a Pandas Dataframe or string like::\n \n Bits are listed from the MSB (bit 13) to the LSB (bit 0)\n Bit Value Flag Name Description\n@@ -41,11 +43,14 @@\n 10 0 cloud_acca Cloud (ACCA)\n \n :param variable: Masking xarray.Dataset or xarray.DataArray\n- :return: str\n+ :return: Pandas Dataframe or str\n \"\"\"\n flags_def = get_flags_def(variable)\n \n- return describe_flags_def(flags_def)\n+ if not with_pandas:\n+ return describe_flags_def(flags_def)\n+\n+ return pandas.DataFrame.from_dict(flags_def, orient='index')\n \n \n def describe_flags_def(flags_def):\n@@ -128,7 +133,7 @@\n \"\"\"\n Sets all `nodata` values to ``nan``.\n \n- This will convert converts numeric data to type `float`.\n+ This will convert numeric data to type `float`.\n \n :param Dataset or DataArray data:\n :param bool keep_attrs: If the attributes of the data should be included in the returned .\n", "issue": "Masking Tools could be a lot more user friendly\n`datacube.storage.masking.describe_variable_flags()` should return an object with not only `__repr__()` but also `_repr_html_()` defined, so that the table of information is legible in a jupyter notebook (like how pandas works). \r\n\r\nCurrent output is about 50 lines illegibly formatted like this:\r\n```\r\n'Flag name | Description | Bit. No | \r\nValue | Meaning \\n-------------------+--------------------------------------------------+-------------------------\r\n-----------------------+-------+----------------\\nga_good_pixel | Best Quality Pixel |\r\n[13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] | 16383 | True \\nblue_saturated | Blue band is saturated \r\n| 0 | 0 | True \\n | \r\n| 0 | 1 | False \\n-------------------+-------------------------\r\n-------------------------+------------------------------------------------+-------+----------------\\ngreen_saturated |\r\nGreen band is saturated | 1 | 0 | True \r\n\\n | | 1 \r\n```\r\n\r\nThis would also be an opportunity to simplify the masking api. For example, once you retrieve this pretty-displaying object, actually generating the mask could be as simple as reading an attribute/property from the same object. \n", "before_files": [{"content": "\"\"\"\nTools for masking data based on a bit-mask variable with attached definition.\n\nThe main functions are `make_mask(variable)` `describe_flags(variable)`\n\"\"\"\nfrom __future__ import absolute_import\n\nimport collections\nimport warnings\n\nfrom datacube.utils import generate_table\n\nfrom xarray import DataArray, Dataset\n\nFLAGS_ATTR_NAME = 'flags_definition'\n\n\ndef list_flag_names(variable):\n \"\"\"\n Returns the available masking flags for the variable\n\n :param variable: Masking xarray.Dataset or xarray.DataArray\n :return: list\n \"\"\"\n flags_def = get_flags_def(variable)\n return sorted(list(flags_def.keys()))\n\n\ndef describe_variable_flags(variable):\n \"\"\"\n Returns a string describing the available flags for a masking variable\n\n Interprets the `flags_definition` attribute on the provided variable and returns\n a string like::\n\n Bits are listed from the MSB (bit 13) to the LSB (bit 0)\n Bit Value Flag Name Description\n 13 0 cloud_shadow_fmask Cloud Shadow (Fmask)\n 12 0 cloud_shadow_acca Cloud Shadow (ACCA)\n 11 0 cloud_fmask Cloud (Fmask)\n 10 0 cloud_acca Cloud (ACCA)\n\n :param variable: Masking xarray.Dataset or xarray.DataArray\n :return: str\n \"\"\"\n flags_def = get_flags_def(variable)\n\n return describe_flags_def(flags_def)\n\n\ndef describe_flags_def(flags_def):\n return '\\n'.join(generate_table(list(_table_contents(flags_def))))\n\n\ndef _table_contents(flags_def):\n yield 'Flag name', 'Description', 'Bit. No', 'Value', 'Meaning'\n for name, defn in sorted(flags_def.items(), key=_order_bitdefs_by_bits):\n name, desc = name, defn['description']\n for value, meaning in defn['values'].items():\n yield name, desc, str(defn['bits']), str(value), str(meaning)\n name, desc = '', ''\n\n\ndef _order_bitdefs_by_bits(bitdef):\n name, defn = bitdef\n try:\n return min(defn['bits'])\n except TypeError:\n return defn['bits']\n\n\ndef make_mask(variable, **flags):\n \"\"\"\n Returns a mask array, based on provided flags\n\n When multiple flags are provided, they will be combined in a logical AND fashion.\n\n For example:\n\n >>> make_mask(pqa, cloud_acca=False, cloud_fmask=False, land_obs=True) # doctest: +SKIP\n\n OR\n\n >>> make_mask(pqa, **GOOD_PIXEL_FLAGS) # doctest: +SKIP\n\n where `GOOD_PIXEL_FLAGS` is a dict of flag_name to True/False\n\n :param variable:\n :type variable: xarray.Dataset or xarray.DataArray\n :param flags: list of boolean flags\n :return: boolean xarray.DataArray or xarray.Dataset\n \"\"\"\n flags_def = get_flags_def(variable)\n\n mask, mask_value = create_mask_value(flags_def, **flags)\n\n return variable & mask == mask_value\n\n\ndef valid_data_mask(data):\n \"\"\"\n Returns bool arrays where the data is not `nodata`\n\n :param Dataset or DataArray data:\n :return: Dataset or DataArray\n \"\"\"\n if isinstance(data, Dataset):\n return data.apply(valid_data_mask)\n\n if isinstance(data, DataArray):\n if 'nodata' not in data.attrs:\n return True\n return data != data.nodata\n\n raise TypeError('valid_data_mask not supported for type {}'.format(type(data)))\n\n\ndef mask_valid_data(data, keep_attrs=True):\n \"\"\"\n Deprecated. This function was poorly named. It is now available as `mask_invalid_data`.\n \"\"\"\n warnings.warn(\"`mask_valid_data` has been renamed to `mask_invalid_data`. Please use that instead.\",\n DeprecationWarning)\n return mask_invalid_data(data, keep_attrs=keep_attrs)\n\n\ndef mask_invalid_data(data, keep_attrs=True):\n \"\"\"\n Sets all `nodata` values to ``nan``.\n\n This will convert converts numeric data to type `float`.\n\n :param Dataset or DataArray data:\n :param bool keep_attrs: If the attributes of the data should be included in the returned .\n :return: Dataset or DataArray\n \"\"\"\n if isinstance(data, Dataset):\n # Pass keep_attrs as a positional arg to the DataArray func\n return data.apply(mask_invalid_data, keep_attrs=keep_attrs, args=(keep_attrs,))\n\n if isinstance(data, DataArray):\n if 'nodata' not in data.attrs:\n return data\n out_data_array = data.where(data != data.nodata)\n if keep_attrs:\n out_data_array.attrs = data.attrs\n return out_data_array\n\n raise TypeError('mask_invalid_data not supported for type {}'.format(type(data)))\n\n\ndef create_mask_value(bits_def, **flags):\n mask = 0\n value = 0\n\n for flag_name, flag_ref in flags.items():\n defn = bits_def[flag_name]\n\n try:\n [flag_value] = (bit_val\n for bit_val, val_ref in defn['values'].items()\n if val_ref == flag_ref)\n flag_value = int(flag_value) # Might be string if coming from DB\n except ValueError:\n raise ValueError('Unknown value %s specified for flag %s' %\n (flag_ref, flag_name))\n\n if isinstance(defn['bits'], collections.Iterable): # Multi-bit flag\n # Set mask\n for bit in defn['bits']:\n mask = set_value_at_index(mask, bit, True)\n\n shift = min(defn['bits'])\n real_val = flag_value << shift\n\n value |= real_val\n\n else:\n bit = defn['bits']\n mask = set_value_at_index(mask, bit, True)\n value = set_value_at_index(value, bit, flag_value)\n\n return mask, value\n\n\ndef mask_to_dict(bits_def, mask_value):\n \"\"\"\n Describes which flags are set for a mask value\n\n :param bits_def:\n :param mask_value:\n :return: Mapping of flag_name -> set_value\n :rtype: dict\n \"\"\"\n return_dict = {}\n for flag_name, flag_defn in bits_def.items():\n\n # Make bits a list, even if there is only one\n flag_bits = flag_defn['bits']\n if not isinstance(flag_defn['bits'], list):\n flag_bits = [flag_bits]\n\n # The amount to shift flag_value to line up with mask_value\n flag_shift = min(flag_bits)\n\n # Mask our mask_value, we are only interested in the bits for this flag\n flag_mask = 0\n for i in flag_bits:\n flag_mask |= (1 << i)\n masked_mask_value = mask_value & flag_mask\n\n for flag_value, value in flag_defn['values'].items():\n shifted_value = int(flag_value) << flag_shift\n if shifted_value == masked_mask_value:\n assert flag_name not in return_dict\n return_dict[flag_name] = value\n return return_dict\n\n\ndef _get_minimum_bit(bit_or_bits):\n try:\n return min(bit_or_bits)\n except TypeError:\n return bit_or_bits\n\n\ndef get_flags_def(variable):\n try:\n return getattr(variable, FLAGS_ATTR_NAME)\n except AttributeError:\n # Maybe we have a DataSet, not a DataArray\n for var in variable.data_vars.values():\n if _is_data_var(var):\n try:\n return getattr(var, FLAGS_ATTR_NAME)\n except AttributeError:\n pass\n\n raise ValueError('No masking variable found')\n\n\ndef _is_data_var(variable):\n return variable.name != 'crs' and len(variable.coords) > 1\n\n\ndef set_value_at_index(bitmask, index, value):\n \"\"\"\n Set a bit value onto an integer bitmask\n\n eg. set bits 2 and 4 to True\n >>> mask = 0\n >>> mask = set_value_at_index(mask, 2, True)\n >>> mask = set_value_at_index(mask, 4, True)\n >>> print(bin(mask))\n 0b10100\n >>> mask = set_value_at_index(mask, 2, False)\n >>> print(bin(mask))\n 0b10000\n\n :param bitmask: existing int bitmask to alter\n :type bitmask: int\n :type index: int\n :type value: bool\n \"\"\"\n bit_val = 2 ** index\n if value:\n bitmask |= bit_val\n else:\n bitmask &= (~bit_val)\n return bitmask\n", "path": "datacube/storage/masking.py"}]} | 3,471 | 459 |
gh_patches_debug_43309 | rasdani/github-patches | git_diff | carpentries__amy-1858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Memberships: prevent changing consortium to non-consortium if there are >1 members assigned
Current implementation allows for membership to be changed from consortium (multiple members) to non-consortium (only one member), but a validation should be implemented to prevent that change when more than one member exists.
@maneesha may provide additional logic for the change, for example _the remaining member should receive a different membership role_.
</issue>
<code>
[start of amy/fiscal/forms.py]
1 from crispy_forms.layout import Div, HTML, Field
2 from django import forms
3 from django.core.exceptions import ValidationError
4 from django.core.validators import RegexValidator
5 from django.dispatch import receiver
6 from django.urls import reverse
7 from markdownx.fields import MarkdownxFormField
8
9 from fiscal.models import MembershipTask
10 from workshops.forms import (
11 BootstrapHelper,
12 form_saved_add_comment,
13 )
14 from workshops.models import (
15 Organization,
16 Member,
17 Membership,
18 )
19
20 # this is used instead of Django Autocomplete Light widgets
21 # see issue #1330: https://github.com/swcarpentry/amy/issues/1330
22 from workshops.fields import (
23 ModelSelect2Widget,
24 )
25 from workshops.signals import create_comment_signal
26
27
28 # settings for Select2
29 # this makes it possible for autocomplete widget to fit in low-width sidebar
30 SIDEBAR_DAL_WIDTH = {
31 "data-width": "100%",
32 "width": "style",
33 }
34
35
36 class OrganizationForm(forms.ModelForm):
37 domain = forms.CharField(
38 max_length=Organization._meta.get_field("domain").max_length,
39 validators=[
40 RegexValidator(
41 r"[^\w\.-]+",
42 inverse_match=True,
43 message='Please enter only the domain (such as "math.esu.edu")'
44 ' without a leading "http://" or a trailing "/".',
45 )
46 ],
47 )
48
49 helper = BootstrapHelper(add_cancel_button=False, duplicate_buttons_on_top=True)
50
51 class Meta:
52 model = Organization
53 fields = ["domain", "fullname", "country", "latitude", "longitude"]
54
55
56 class OrganizationCreateForm(OrganizationForm):
57 comment = MarkdownxFormField(
58 label="Comment",
59 help_text="This will be added to comments after the organization "
60 "is created.",
61 widget=forms.Textarea,
62 required=False,
63 )
64
65 class Meta(OrganizationForm.Meta):
66 fields = OrganizationForm.Meta.fields.copy()
67 fields.append("comment")
68
69 def save(self, *args, **kwargs):
70 res = super().save(*args, **kwargs)
71
72 create_comment_signal.send(
73 sender=self.__class__,
74 content_object=res,
75 comment=self.cleaned_data["comment"],
76 timestamp=None,
77 )
78
79 return res
80
81
82 class MembershipForm(forms.ModelForm):
83 helper = BootstrapHelper(add_cancel_button=False)
84
85 class Meta:
86 model = Membership
87 fields = [
88 "name",
89 "consortium",
90 "public_status",
91 "variant",
92 "agreement_start",
93 "agreement_end",
94 "contribution_type",
95 "registration_code",
96 "agreement_link",
97 "workshops_without_admin_fee_per_agreement",
98 "public_instructor_training_seats",
99 "additional_public_instructor_training_seats",
100 "inhouse_instructor_training_seats",
101 "additional_inhouse_instructor_training_seats",
102 "emergency_contact",
103 ]
104
105 def __init__(self, *args, **kwargs):
106 super().__init__(*args, **kwargs)
107
108 # set up a layout object for the helper
109 self.helper.layout = self.helper.build_default_layout(self)
110
111 # add warning alert for dates falling within next 2-3 months
112 INVALID_AGREEMENT_DURATION_WARNING = (
113 "The selected agreement dates fall out of the typical 1-year long duration."
114 )
115 pos_index = self.helper.layout.fields.index("agreement_end")
116 self.helper.layout.insert(
117 pos_index + 1,
118 Div(
119 Div(
120 HTML(INVALID_AGREEMENT_DURATION_WARNING),
121 css_class="alert alert-warning offset-lg-2 col-lg-8 col-12",
122 ),
123 id="agreement_duration_warning",
124 css_class="form-group row d-none",
125 ),
126 )
127
128 def clean(self):
129 super().clean()
130 errors = dict()
131
132 # validate agreement end date is no sooner than start date
133 agreement_start = self.cleaned_data.get("agreement_start")
134 agreement_end = self.cleaned_data.get("agreement_end")
135 try:
136 if agreement_end < agreement_start:
137 errors["agreement_end"] = ValidationError(
138 "Agreement end date can't be sooner than the start date."
139 )
140 except TypeError:
141 pass
142
143 if errors:
144 raise ValidationError(errors)
145
146
147 class MembershipCreateForm(MembershipForm):
148 comment = MarkdownxFormField(
149 label="Comment",
150 help_text="This will be added to comments after the membership is created.",
151 widget=forms.Textarea,
152 required=False,
153 )
154
155 helper = BootstrapHelper(add_cancel_button=True)
156
157 main_organization = forms.ModelChoiceField(
158 Organization.objects.all(),
159 label="Main organisation",
160 required=True,
161 widget=ModelSelect2Widget(data_view="organization-lookup"),
162 help_text="Select main organisation (e.g. Signatory in case of consortium).",
163 )
164
165 class Meta(MembershipForm.Meta):
166 fields = MembershipForm.Meta.fields.copy()
167 fields.insert(0, "main_organization")
168 fields.append("comment")
169
170 class Media:
171 js = ("membership_create.js",)
172
173 def __init__(self, *args, **kwargs):
174 super().__init__(*args, **kwargs)
175
176 self.fields["consortium"].help_text += (
177 "<br>If you select this option, you'll be taken to the next screen to "
178 "select organisations engaged in consortium. You must create the "
179 "organisation (<a href='{}'>here</a>) before applying it to this "
180 "membership."
181 ).format(reverse("organization_add"))
182
183 def save(self, *args, **kwargs):
184 res = super().save(*args, **kwargs)
185
186 create_comment_signal.send(
187 sender=self.__class__,
188 content_object=res,
189 comment=self.cleaned_data["comment"],
190 timestamp=None,
191 )
192
193 return res
194
195
196 class MembershipRollOverForm(MembershipCreateForm):
197 main_organization = None # remove the additional field
198
199 class Meta(MembershipCreateForm.Meta):
200 fields = [
201 "name",
202 "consortium",
203 "public_status",
204 "variant",
205 "agreement_start",
206 "agreement_end",
207 "contribution_type",
208 "registration_code",
209 "agreement_link",
210 "workshops_without_admin_fee_per_agreement",
211 "workshops_without_admin_fee_rolled_from_previous",
212 "public_instructor_training_seats",
213 "additional_public_instructor_training_seats",
214 "public_instructor_training_seats_rolled_from_previous",
215 "inhouse_instructor_training_seats",
216 "additional_inhouse_instructor_training_seats",
217 "inhouse_instructor_training_seats_rolled_from_previous",
218 "emergency_contact",
219 "comment",
220 ]
221
222 def __init__(self, *args, **kwargs):
223 super().__init__(*args, **kwargs)
224 self["workshops_without_admin_fee_rolled_from_previous"].field.disabled = True
225 self[
226 "public_instructor_training_seats_rolled_from_previous"
227 ].field.disabled = True
228 self[
229 "inhouse_instructor_training_seats_rolled_from_previous"
230 ].field.disabled = True
231
232
233 class MemberForm(forms.ModelForm):
234 """Form intended to use in formset for creating multiple membership members."""
235
236 helper = BootstrapHelper(
237 add_cancel_button=False, form_tag=False, add_submit_button=False
238 )
239 helper_empty_form = BootstrapHelper(
240 add_cancel_button=False, form_tag=False, add_submit_button=False
241 )
242
243 class Meta:
244 model = Member
245 fields = [
246 "organization",
247 "role",
248 ]
249 widgets = {
250 "organization": ModelSelect2Widget(data_view="organization-lookup"),
251 "role": ModelSelect2Widget(data_view="memberrole-lookup"),
252 }
253
254 def __init__(self, *args, **kwargs):
255 super().__init__(*args, **kwargs)
256
257 # set up layout objects for the helpers - they're identical except for
258 # visibility of the delete checkbox
259 self.helper.layout = self.helper.build_default_layout(self)
260 self.helper_empty_form.layout = self.helper.build_default_layout(self)
261 self.helper.layout.append(Field("id"))
262 self.helper.layout.append(Field("DELETE")) # visible; formset adds it
263 self.helper_empty_form.layout.append(Field("id"))
264 self.helper_empty_form.layout.append(
265 Div(Field("DELETE"), css_class="d-none") # hidden
266 )
267
268
269 class MembershipTaskForm(forms.ModelForm):
270 """Form intended to use in formset for creating multiple membership members."""
271
272 helper = BootstrapHelper(
273 add_cancel_button=False, form_tag=False, add_submit_button=False
274 )
275 helper_empty_form = BootstrapHelper(
276 add_cancel_button=False, form_tag=False, add_submit_button=False
277 )
278
279 class Meta:
280 model = MembershipTask
281 fields = [
282 "person",
283 "role",
284 ]
285 widgets = {
286 "person": ModelSelect2Widget(data_view="person-lookup"),
287 "role": ModelSelect2Widget(data_view="membershippersonrole-lookup"),
288 }
289
290 def __init__(self, *args, **kwargs):
291 super().__init__(*args, **kwargs)
292
293 # set up layout objects for the helpers - they're identical except for
294 # visibility of the delete checkbox
295 self.helper.layout = self.helper.build_default_layout(self)
296 self.helper_empty_form.layout = self.helper.build_default_layout(self)
297 self.helper.layout.append(Field("id"))
298 self.helper.layout.append(Field("DELETE")) # visible; formset adds it
299 self.helper_empty_form.layout.append(Field("id"))
300 self.helper_empty_form.layout.append(
301 Div(Field("DELETE"), css_class="d-none") # hidden
302 )
303
304
305 class MembershipExtensionForm(forms.Form):
306 agreement_start = forms.DateField(disabled=True, required=False)
307 agreement_end = forms.DateField(disabled=True, required=False)
308 extension = forms.IntegerField(
309 min_value=1,
310 required=True,
311 help_text="Number of days the agreement should be extended.",
312 )
313 new_agreement_end = forms.DateField(disabled=True, required=False)
314
315 helper = BootstrapHelper()
316
317 class Media:
318 js = ("membership_extend.js", "date_yyyymmdd.js")
319
320
321 # ----------------------------------------------------------
322 # Signals
323
324 # adding @receiver decorator to the function defined in `workshops.forms`
325 form_saved_add_comment = receiver(
326 create_comment_signal,
327 sender=OrganizationCreateForm,
328 )(form_saved_add_comment)
329
330 # adding @receiver decorator to the function defined in `workshops.forms`
331 form_saved_add_comment = receiver(
332 create_comment_signal,
333 sender=MembershipCreateForm,
334 )(form_saved_add_comment)
335
[end of amy/fiscal/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/fiscal/forms.py b/amy/fiscal/forms.py
--- a/amy/fiscal/forms.py
+++ b/amy/fiscal/forms.py
@@ -1,4 +1,4 @@
-from crispy_forms.layout import Div, HTML, Field
+from crispy_forms.layout import Div, HTML
from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
@@ -140,6 +140,16 @@
except TypeError:
pass
+ # check if multiple members are assigned - then disallow changing to
+ # non-consortium
+ new_consortium = self.cleaned_data.get("consortium")
+ members_count = self.instance.member_set.count()
+ if not new_consortium and members_count > 1:
+ errors["consortium"] = ValidationError(
+ "Cannot change to non-consortium when there are multiple members "
+ "assigned. Remove the members so that at most 1 is left."
+ )
+
if errors:
raise ValidationError(errors)
@@ -230,7 +240,20 @@
].field.disabled = True
-class MemberForm(forms.ModelForm):
+class EditableFormsetFormMixin(forms.ModelForm):
+ EDITABLE = forms.BooleanField(
+ label="Change",
+ required=False,
+ widget=forms.CheckboxInput(attrs={"data-form-editable-check": ""}),
+ )
+
+ def clean(self):
+ if self.has_changed() and not self.cleaned_data["EDITABLE"]:
+ raise ValidationError("Form values weren't supposed to be changed.")
+ return super().clean()
+
+
+class MemberForm(EditableFormsetFormMixin, forms.ModelForm):
"""Form intended to use in formset for creating multiple membership members."""
helper = BootstrapHelper(
@@ -257,16 +280,19 @@
# set up layout objects for the helpers - they're identical except for
# visibility of the delete checkbox
self.helper.layout = self.helper.build_default_layout(self)
+ self.helper.layout.append("id")
+ self.helper.layout.append("DELETE") # visible; formset adds it
self.helper_empty_form.layout = self.helper.build_default_layout(self)
- self.helper.layout.append(Field("id"))
- self.helper.layout.append(Field("DELETE")) # visible; formset adds it
- self.helper_empty_form.layout.append(Field("id"))
+ self.helper_empty_form.layout.append("id")
self.helper_empty_form.layout.append(
- Div(Field("DELETE"), css_class="d-none") # hidden
+ Div("DELETE", css_class="d-none") # hidden
)
+ # remove EDITABLE checkbox from empty helper form
+ pos_index = self.helper_empty_form.layout.fields.index("EDITABLE")
+ self.helper_empty_form.layout.pop(pos_index)
-class MembershipTaskForm(forms.ModelForm):
+class MembershipTaskForm(EditableFormsetFormMixin, forms.ModelForm):
"""Form intended to use in formset for creating multiple membership members."""
helper = BootstrapHelper(
@@ -293,13 +319,16 @@
# set up layout objects for the helpers - they're identical except for
# visibility of the delete checkbox
self.helper.layout = self.helper.build_default_layout(self)
+ self.helper.layout.append("id")
+ self.helper.layout.append("DELETE") # visible; formset adds it
self.helper_empty_form.layout = self.helper.build_default_layout(self)
- self.helper.layout.append(Field("id"))
- self.helper.layout.append(Field("DELETE")) # visible; formset adds it
- self.helper_empty_form.layout.append(Field("id"))
+ self.helper_empty_form.layout.append("id")
self.helper_empty_form.layout.append(
- Div(Field("DELETE"), css_class="d-none") # hidden
+ Div("DELETE", css_class="d-none") # hidden
)
+ # remove EDITABLE checkbox from empty helper form
+ pos_index = self.helper_empty_form.layout.fields.index("EDITABLE")
+ self.helper_empty_form.layout.pop(pos_index)
class MembershipExtensionForm(forms.Form):
| {"golden_diff": "diff --git a/amy/fiscal/forms.py b/amy/fiscal/forms.py\n--- a/amy/fiscal/forms.py\n+++ b/amy/fiscal/forms.py\n@@ -1,4 +1,4 @@\n-from crispy_forms.layout import Div, HTML, Field\n+from crispy_forms.layout import Div, HTML\n from django import forms\n from django.core.exceptions import ValidationError\n from django.core.validators import RegexValidator\n@@ -140,6 +140,16 @@\n except TypeError:\n pass\n \n+ # check if multiple members are assigned - then disallow changing to\n+ # non-consortium\n+ new_consortium = self.cleaned_data.get(\"consortium\")\n+ members_count = self.instance.member_set.count()\n+ if not new_consortium and members_count > 1:\n+ errors[\"consortium\"] = ValidationError(\n+ \"Cannot change to non-consortium when there are multiple members \"\n+ \"assigned. Remove the members so that at most 1 is left.\"\n+ )\n+\n if errors:\n raise ValidationError(errors)\n \n@@ -230,7 +240,20 @@\n ].field.disabled = True\n \n \n-class MemberForm(forms.ModelForm):\n+class EditableFormsetFormMixin(forms.ModelForm):\n+ EDITABLE = forms.BooleanField(\n+ label=\"Change\",\n+ required=False,\n+ widget=forms.CheckboxInput(attrs={\"data-form-editable-check\": \"\"}),\n+ )\n+\n+ def clean(self):\n+ if self.has_changed() and not self.cleaned_data[\"EDITABLE\"]:\n+ raise ValidationError(\"Form values weren't supposed to be changed.\")\n+ return super().clean()\n+\n+\n+class MemberForm(EditableFormsetFormMixin, forms.ModelForm):\n \"\"\"Form intended to use in formset for creating multiple membership members.\"\"\"\n \n helper = BootstrapHelper(\n@@ -257,16 +280,19 @@\n # set up layout objects for the helpers - they're identical except for\n # visibility of the delete checkbox\n self.helper.layout = self.helper.build_default_layout(self)\n+ self.helper.layout.append(\"id\")\n+ self.helper.layout.append(\"DELETE\") # visible; formset adds it\n self.helper_empty_form.layout = self.helper.build_default_layout(self)\n- self.helper.layout.append(Field(\"id\"))\n- self.helper.layout.append(Field(\"DELETE\")) # visible; formset adds it\n- self.helper_empty_form.layout.append(Field(\"id\"))\n+ self.helper_empty_form.layout.append(\"id\")\n self.helper_empty_form.layout.append(\n- Div(Field(\"DELETE\"), css_class=\"d-none\") # hidden\n+ Div(\"DELETE\", css_class=\"d-none\") # hidden\n )\n+ # remove EDITABLE checkbox from empty helper form\n+ pos_index = self.helper_empty_form.layout.fields.index(\"EDITABLE\")\n+ self.helper_empty_form.layout.pop(pos_index)\n \n \n-class MembershipTaskForm(forms.ModelForm):\n+class MembershipTaskForm(EditableFormsetFormMixin, forms.ModelForm):\n \"\"\"Form intended to use in formset for creating multiple membership members.\"\"\"\n \n helper = BootstrapHelper(\n@@ -293,13 +319,16 @@\n # set up layout objects for the helpers - they're identical except for\n # visibility of the delete checkbox\n self.helper.layout = self.helper.build_default_layout(self)\n+ self.helper.layout.append(\"id\")\n+ self.helper.layout.append(\"DELETE\") # visible; formset adds it\n self.helper_empty_form.layout = self.helper.build_default_layout(self)\n- self.helper.layout.append(Field(\"id\"))\n- self.helper.layout.append(Field(\"DELETE\")) # visible; formset adds it\n- self.helper_empty_form.layout.append(Field(\"id\"))\n+ self.helper_empty_form.layout.append(\"id\")\n self.helper_empty_form.layout.append(\n- Div(Field(\"DELETE\"), css_class=\"d-none\") # hidden\n+ Div(\"DELETE\", css_class=\"d-none\") # hidden\n )\n+ # remove EDITABLE checkbox from empty helper form\n+ pos_index = self.helper_empty_form.layout.fields.index(\"EDITABLE\")\n+ self.helper_empty_form.layout.pop(pos_index)\n \n \n class MembershipExtensionForm(forms.Form):\n", "issue": "Memberships: prevent changing consortium to non-consortium if there are >1 members assigned\nCurrent implementation allows for membership to be changed from consortium (multiple members) to non-consortium (only one member), but a validation should be implemented to prevent that change when more than one member exists.\r\n\r\n@maneesha may provide additional logic for the change, for example _the remaining member should receive a different membership role_.\n", "before_files": [{"content": "from crispy_forms.layout import Div, HTML, Field\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import RegexValidator\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom markdownx.fields import MarkdownxFormField\n\nfrom fiscal.models import MembershipTask\nfrom workshops.forms import (\n BootstrapHelper,\n form_saved_add_comment,\n)\nfrom workshops.models import (\n Organization,\n Member,\n Membership,\n)\n\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import (\n ModelSelect2Widget,\n)\nfrom workshops.signals import create_comment_signal\n\n\n# settings for Select2\n# this makes it possible for autocomplete widget to fit in low-width sidebar\nSIDEBAR_DAL_WIDTH = {\n \"data-width\": \"100%\",\n \"width\": \"style\",\n}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field(\"domain\").max_length,\n validators=[\n RegexValidator(\n r\"[^\\w\\.-]+\",\n inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".',\n )\n ],\n )\n\n helper = BootstrapHelper(add_cancel_button=False, duplicate_buttons_on_top=True)\n\n class Meta:\n model = Organization\n fields = [\"domain\", \"fullname\", \"country\", \"latitude\", \"longitude\"]\n\n\nclass OrganizationCreateForm(OrganizationForm):\n comment = MarkdownxFormField(\n label=\"Comment\",\n help_text=\"This will be added to comments after the organization \"\n \"is created.\",\n widget=forms.Textarea,\n required=False,\n )\n\n class Meta(OrganizationForm.Meta):\n fields = OrganizationForm.Meta.fields.copy()\n fields.append(\"comment\")\n\n def save(self, *args, **kwargs):\n res = super().save(*args, **kwargs)\n\n create_comment_signal.send(\n sender=self.__class__,\n content_object=res,\n comment=self.cleaned_data[\"comment\"],\n timestamp=None,\n )\n\n return res\n\n\nclass MembershipForm(forms.ModelForm):\n helper = BootstrapHelper(add_cancel_button=False)\n\n class Meta:\n model = Membership\n fields = [\n \"name\",\n \"consortium\",\n \"public_status\",\n \"variant\",\n \"agreement_start\",\n \"agreement_end\",\n \"contribution_type\",\n \"registration_code\",\n \"agreement_link\",\n \"workshops_without_admin_fee_per_agreement\",\n \"public_instructor_training_seats\",\n \"additional_public_instructor_training_seats\",\n \"inhouse_instructor_training_seats\",\n \"additional_inhouse_instructor_training_seats\",\n \"emergency_contact\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set up a layout object for the helper\n self.helper.layout = self.helper.build_default_layout(self)\n\n # add warning alert for dates falling within next 2-3 months\n INVALID_AGREEMENT_DURATION_WARNING = (\n \"The selected agreement dates fall out of the typical 1-year long duration.\"\n )\n pos_index = self.helper.layout.fields.index(\"agreement_end\")\n self.helper.layout.insert(\n pos_index + 1,\n Div(\n Div(\n HTML(INVALID_AGREEMENT_DURATION_WARNING),\n css_class=\"alert alert-warning offset-lg-2 col-lg-8 col-12\",\n ),\n id=\"agreement_duration_warning\",\n css_class=\"form-group row d-none\",\n ),\n )\n\n def clean(self):\n super().clean()\n errors = dict()\n\n # validate agreement end date is no sooner than start date\n agreement_start = self.cleaned_data.get(\"agreement_start\")\n agreement_end = self.cleaned_data.get(\"agreement_end\")\n try:\n if agreement_end < agreement_start:\n errors[\"agreement_end\"] = ValidationError(\n \"Agreement end date can't be sooner than the start date.\"\n )\n except TypeError:\n pass\n\n if errors:\n raise ValidationError(errors)\n\n\nclass MembershipCreateForm(MembershipForm):\n comment = MarkdownxFormField(\n label=\"Comment\",\n help_text=\"This will be added to comments after the membership is created.\",\n widget=forms.Textarea,\n required=False,\n )\n\n helper = BootstrapHelper(add_cancel_button=True)\n\n main_organization = forms.ModelChoiceField(\n Organization.objects.all(),\n label=\"Main organisation\",\n required=True,\n widget=ModelSelect2Widget(data_view=\"organization-lookup\"),\n help_text=\"Select main organisation (e.g. Signatory in case of consortium).\",\n )\n\n class Meta(MembershipForm.Meta):\n fields = MembershipForm.Meta.fields.copy()\n fields.insert(0, \"main_organization\")\n fields.append(\"comment\")\n\n class Media:\n js = (\"membership_create.js\",)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"consortium\"].help_text += (\n \"<br>If you select this option, you'll be taken to the next screen to \"\n \"select organisations engaged in consortium. You must create the \"\n \"organisation (<a href='{}'>here</a>) before applying it to this \"\n \"membership.\"\n ).format(reverse(\"organization_add\"))\n\n def save(self, *args, **kwargs):\n res = super().save(*args, **kwargs)\n\n create_comment_signal.send(\n sender=self.__class__,\n content_object=res,\n comment=self.cleaned_data[\"comment\"],\n timestamp=None,\n )\n\n return res\n\n\nclass MembershipRollOverForm(MembershipCreateForm):\n main_organization = None # remove the additional field\n\n class Meta(MembershipCreateForm.Meta):\n fields = [\n \"name\",\n \"consortium\",\n \"public_status\",\n \"variant\",\n \"agreement_start\",\n \"agreement_end\",\n \"contribution_type\",\n \"registration_code\",\n \"agreement_link\",\n \"workshops_without_admin_fee_per_agreement\",\n \"workshops_without_admin_fee_rolled_from_previous\",\n \"public_instructor_training_seats\",\n \"additional_public_instructor_training_seats\",\n \"public_instructor_training_seats_rolled_from_previous\",\n \"inhouse_instructor_training_seats\",\n \"additional_inhouse_instructor_training_seats\",\n \"inhouse_instructor_training_seats_rolled_from_previous\",\n \"emergency_contact\",\n \"comment\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self[\"workshops_without_admin_fee_rolled_from_previous\"].field.disabled = True\n self[\n \"public_instructor_training_seats_rolled_from_previous\"\n ].field.disabled = True\n self[\n \"inhouse_instructor_training_seats_rolled_from_previous\"\n ].field.disabled = True\n\n\nclass MemberForm(forms.ModelForm):\n \"\"\"Form intended to use in formset for creating multiple membership members.\"\"\"\n\n helper = BootstrapHelper(\n add_cancel_button=False, form_tag=False, add_submit_button=False\n )\n helper_empty_form = BootstrapHelper(\n add_cancel_button=False, form_tag=False, add_submit_button=False\n )\n\n class Meta:\n model = Member\n fields = [\n \"organization\",\n \"role\",\n ]\n widgets = {\n \"organization\": ModelSelect2Widget(data_view=\"organization-lookup\"),\n \"role\": ModelSelect2Widget(data_view=\"memberrole-lookup\"),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set up layout objects for the helpers - they're identical except for\n # visibility of the delete checkbox\n self.helper.layout = self.helper.build_default_layout(self)\n self.helper_empty_form.layout = self.helper.build_default_layout(self)\n self.helper.layout.append(Field(\"id\"))\n self.helper.layout.append(Field(\"DELETE\")) # visible; formset adds it\n self.helper_empty_form.layout.append(Field(\"id\"))\n self.helper_empty_form.layout.append(\n Div(Field(\"DELETE\"), css_class=\"d-none\") # hidden\n )\n\n\nclass MembershipTaskForm(forms.ModelForm):\n \"\"\"Form intended to use in formset for creating multiple membership members.\"\"\"\n\n helper = BootstrapHelper(\n add_cancel_button=False, form_tag=False, add_submit_button=False\n )\n helper_empty_form = BootstrapHelper(\n add_cancel_button=False, form_tag=False, add_submit_button=False\n )\n\n class Meta:\n model = MembershipTask\n fields = [\n \"person\",\n \"role\",\n ]\n widgets = {\n \"person\": ModelSelect2Widget(data_view=\"person-lookup\"),\n \"role\": ModelSelect2Widget(data_view=\"membershippersonrole-lookup\"),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set up layout objects for the helpers - they're identical except for\n # visibility of the delete checkbox\n self.helper.layout = self.helper.build_default_layout(self)\n self.helper_empty_form.layout = self.helper.build_default_layout(self)\n self.helper.layout.append(Field(\"id\"))\n self.helper.layout.append(Field(\"DELETE\")) # visible; formset adds it\n self.helper_empty_form.layout.append(Field(\"id\"))\n self.helper_empty_form.layout.append(\n Div(Field(\"DELETE\"), css_class=\"d-none\") # hidden\n )\n\n\nclass MembershipExtensionForm(forms.Form):\n agreement_start = forms.DateField(disabled=True, required=False)\n agreement_end = forms.DateField(disabled=True, required=False)\n extension = forms.IntegerField(\n min_value=1,\n required=True,\n help_text=\"Number of days the agreement should be extended.\",\n )\n new_agreement_end = forms.DateField(disabled=True, required=False)\n\n helper = BootstrapHelper()\n\n class Media:\n js = (\"membership_extend.js\", \"date_yyyymmdd.js\")\n\n\n# ----------------------------------------------------------\n# Signals\n\n# adding @receiver decorator to the function defined in `workshops.forms`\nform_saved_add_comment = receiver(\n create_comment_signal,\n sender=OrganizationCreateForm,\n)(form_saved_add_comment)\n\n# adding @receiver decorator to the function defined in `workshops.forms`\nform_saved_add_comment = receiver(\n create_comment_signal,\n sender=MembershipCreateForm,\n)(form_saved_add_comment)\n", "path": "amy/fiscal/forms.py"}]} | 3,773 | 901 |
gh_patches_debug_19377 | rasdani/github-patches | git_diff | scrapy__scrapy-5526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Response.headers loses data on multiple values
https://github.com/scrapy/scrapy/issues/1262 reported that by default `response.headers` would only expose the first value of a header e.g. when casted as a `dict`, acknowledging that `response.headers.getlist` could be used instead to get all values.
I have just found out that the latter is not true:
```python
>>> from scrapy.http import Response
>>> response = Response("https://example.com", headers=(("a", "b"), ("a", "c")))
>>> response.headers.getlist("a")
[b'c']
```
I could verify the issue happening as far back as Scrapy 1.6, so it does not look like a recent bug.
</issue>
<code>
[start of scrapy/http/headers.py]
1 from w3lib.http import headers_dict_to_raw
2 from scrapy.utils.datatypes import CaselessDict
3 from scrapy.utils.python import to_unicode
4
5
6 class Headers(CaselessDict):
7 """Case insensitive http headers dictionary"""
8
9 def __init__(self, seq=None, encoding='utf-8'):
10 self.encoding = encoding
11 super().__init__(seq)
12
13 def normkey(self, key):
14 """Normalize key to bytes"""
15 return self._tobytes(key.title())
16
17 def normvalue(self, value):
18 """Normalize values to bytes"""
19 if value is None:
20 value = []
21 elif isinstance(value, (str, bytes)):
22 value = [value]
23 elif not hasattr(value, '__iter__'):
24 value = [value]
25
26 return [self._tobytes(x) for x in value]
27
28 def _tobytes(self, x):
29 if isinstance(x, bytes):
30 return x
31 elif isinstance(x, str):
32 return x.encode(self.encoding)
33 elif isinstance(x, int):
34 return str(x).encode(self.encoding)
35 else:
36 raise TypeError(f'Unsupported value type: {type(x)}')
37
38 def __getitem__(self, key):
39 try:
40 return super().__getitem__(key)[-1]
41 except IndexError:
42 return None
43
44 def get(self, key, def_val=None):
45 try:
46 return super().get(key, def_val)[-1]
47 except IndexError:
48 return None
49
50 def getlist(self, key, def_val=None):
51 try:
52 return super().__getitem__(key)
53 except KeyError:
54 if def_val is not None:
55 return self.normvalue(def_val)
56 return []
57
58 def setlist(self, key, list_):
59 self[key] = list_
60
61 def setlistdefault(self, key, default_list=()):
62 return self.setdefault(key, default_list)
63
64 def appendlist(self, key, value):
65 lst = self.getlist(key)
66 lst.extend(self.normvalue(value))
67 self[key] = lst
68
69 def items(self):
70 return ((k, self.getlist(k)) for k in self.keys())
71
72 def values(self):
73 return [self[k] for k in self.keys()]
74
75 def to_string(self):
76 return headers_dict_to_raw(self)
77
78 def to_unicode_dict(self):
79 """ Return headers as a CaselessDict with unicode keys
80 and unicode values. Multiple values are joined with ','.
81 """
82 return CaselessDict(
83 (to_unicode(key, encoding=self.encoding),
84 to_unicode(b','.join(value), encoding=self.encoding))
85 for key, value in self.items())
86
87 def __copy__(self):
88 return self.__class__(self)
89 copy = __copy__
90
[end of scrapy/http/headers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/http/headers.py b/scrapy/http/headers.py
--- a/scrapy/http/headers.py
+++ b/scrapy/http/headers.py
@@ -1,3 +1,5 @@
+from collections.abc import Mapping
+
from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaselessDict
from scrapy.utils.python import to_unicode
@@ -10,6 +12,13 @@
self.encoding = encoding
super().__init__(seq)
+ def update(self, seq):
+ seq = seq.items() if isinstance(seq, Mapping) else seq
+ iseq = {}
+ for k, v in seq:
+ iseq.setdefault(self.normkey(k), []).extend(self.normvalue(v))
+ super().update(iseq)
+
def normkey(self, key):
"""Normalize key to bytes"""
return self._tobytes(key.title())
@@ -86,4 +95,5 @@
def __copy__(self):
return self.__class__(self)
+
copy = __copy__
| {"golden_diff": "diff --git a/scrapy/http/headers.py b/scrapy/http/headers.py\n--- a/scrapy/http/headers.py\n+++ b/scrapy/http/headers.py\n@@ -1,3 +1,5 @@\n+from collections.abc import Mapping\n+\n from w3lib.http import headers_dict_to_raw\n from scrapy.utils.datatypes import CaselessDict\n from scrapy.utils.python import to_unicode\n@@ -10,6 +12,13 @@\n self.encoding = encoding\n super().__init__(seq)\n \n+ def update(self, seq):\n+ seq = seq.items() if isinstance(seq, Mapping) else seq\n+ iseq = {}\n+ for k, v in seq:\n+ iseq.setdefault(self.normkey(k), []).extend(self.normvalue(v))\n+ super().update(iseq)\n+\n def normkey(self, key):\n \"\"\"Normalize key to bytes\"\"\"\n return self._tobytes(key.title())\n@@ -86,4 +95,5 @@\n \n def __copy__(self):\n return self.__class__(self)\n+\n copy = __copy__\n", "issue": "Response.headers loses data on multiple values\nhttps://github.com/scrapy/scrapy/issues/1262 reported that by default `response.headers` would only expose the first value of a header e.g. when casted as a `dict`, acknowledging that `response.headers.getlist` could be used instead to get all values.\r\n\r\nI have just found out that the latter is not true:\r\n\r\n```python\r\n>>> from scrapy.http import Response\r\n>>> response = Response(\"https://example.com\", headers=((\"a\", \"b\"), (\"a\", \"c\")))\r\n>>> response.headers.getlist(\"a\")\r\n[b'c']\r\n```\r\n\r\nI could verify the issue happening as far back as Scrapy 1.6, so it does not look like a recent bug.\n", "before_files": [{"content": "from w3lib.http import headers_dict_to_raw\nfrom scrapy.utils.datatypes import CaselessDict\nfrom scrapy.utils.python import to_unicode\n\n\nclass Headers(CaselessDict):\n \"\"\"Case insensitive http headers dictionary\"\"\"\n\n def __init__(self, seq=None, encoding='utf-8'):\n self.encoding = encoding\n super().__init__(seq)\n\n def normkey(self, key):\n \"\"\"Normalize key to bytes\"\"\"\n return self._tobytes(key.title())\n\n def normvalue(self, value):\n \"\"\"Normalize values to bytes\"\"\"\n if value is None:\n value = []\n elif isinstance(value, (str, bytes)):\n value = [value]\n elif not hasattr(value, '__iter__'):\n value = [value]\n\n return [self._tobytes(x) for x in value]\n\n def _tobytes(self, x):\n if isinstance(x, bytes):\n return x\n elif isinstance(x, str):\n return x.encode(self.encoding)\n elif isinstance(x, int):\n return str(x).encode(self.encoding)\n else:\n raise TypeError(f'Unsupported value type: {type(x)}')\n\n def __getitem__(self, key):\n try:\n return super().__getitem__(key)[-1]\n except IndexError:\n return None\n\n def get(self, key, def_val=None):\n try:\n return super().get(key, def_val)[-1]\n except IndexError:\n return None\n\n def getlist(self, key, def_val=None):\n try:\n return super().__getitem__(key)\n except KeyError:\n if def_val is not None:\n return self.normvalue(def_val)\n return []\n\n def setlist(self, key, list_):\n self[key] = list_\n\n def setlistdefault(self, key, default_list=()):\n return self.setdefault(key, default_list)\n\n def appendlist(self, key, value):\n lst = self.getlist(key)\n lst.extend(self.normvalue(value))\n self[key] = lst\n\n def items(self):\n return ((k, self.getlist(k)) for k in self.keys())\n\n def values(self):\n return [self[k] for k in self.keys()]\n\n def to_string(self):\n return headers_dict_to_raw(self)\n\n def to_unicode_dict(self):\n \"\"\" Return headers as a CaselessDict with unicode keys\n and unicode values. Multiple values are joined with ','.\n \"\"\"\n return CaselessDict(\n (to_unicode(key, encoding=self.encoding),\n to_unicode(b','.join(value), encoding=self.encoding))\n for key, value in self.items())\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n", "path": "scrapy/http/headers.py"}]} | 1,444 | 237 |
gh_patches_debug_424 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-1170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[V3] Error in command repl
### Type:
- [ ] Suggestion
- [x] Bug
### Brief description of the problem
So I first tried to use the core repl function on V3 for the first time. I set a very basic value and got an error
### Steps to reproduce
1. `=repl`
2. `level = 1`

Full error:
```Py
Exception in command 'repl'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 62, in wrapped
ret = yield from coro(*args, **kwargs)
File "/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/redbot/core/dev_commands.py", line 273, in repl
msg = self.sanitize_output(ctx, msg)
File "/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/redbot/core/dev_commands.py", line 64, in sanitize_output
result = input_.replace(token, r)
AttributeError: 'NoneType' object has no attribute 'replace'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/bot.py", line 886, in invoke
yield from ctx.command.invoke(ctx)
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 489, in invoke
yield from injected(*ctx.args, **ctx.kwargs)
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 71, in wrapped
raise CommandInvokeError(e) from e
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: AttributeError: 'NoneType' object has no attribute 'replace'
```
</issue>
<code>
[start of redbot/core/dev_commands.py]
1 import asyncio
2 import inspect
3 import io
4 import textwrap
5 import traceback
6 from contextlib import redirect_stdout
7 from copy import copy
8
9 import discord
10 from discord.ext import commands
11 from . import checks
12 from .i18n import CogI18n
13 from .utils.chat_formatting import box, pagify
14 """
15 Notice:
16
17 95% of the below code came from R.Danny which can be found here:
18
19 https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
20 """
21
22 _ = CogI18n("Dev", __file__)
23
24
25 class Dev:
26 """Various development focused utilities."""
27
28 def __init__(self):
29 self._last_result = None
30 self.sessions = set()
31
32 @staticmethod
33 def cleanup_code(content):
34 """Automatically removes code blocks from the code."""
35 # remove ```py\n```
36 if content.startswith('```') and content.endswith('```'):
37 return '\n'.join(content.split('\n')[1:-1])
38
39 # remove `foo`
40 return content.strip('` \n')
41
42 @staticmethod
43 def get_syntax_error(e):
44 """Format a syntax error to send to the user.
45
46 Returns a string representation of the error formatted as a codeblock.
47 """
48 if e.text is None:
49 return box('{0.__class__.__name__}: {0}'.format(e), lang="py")
50 return box(
51 '{0.text}{1:>{0.offset}}\n{2}: {0}'
52 ''.format(e, '^', type(e).__name__),
53 lang="py")
54
55 @staticmethod
56 def get_pages(msg: str):
57 """Pagify the given message for output to the user."""
58 return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
59
60 @staticmethod
61 def sanitize_output(ctx: commands.Context, input_: str) -> str:
62 """Hides the bot's token from a string."""
63 token = ctx.bot.http.token
64 r = "[EXPUNGED]"
65 result = input_.replace(token, r)
66 result = result.replace(token.lower(), r)
67 result = result.replace(token.upper(), r)
68 return result
69
70 @commands.command()
71 @checks.is_owner()
72 async def debug(self, ctx, *, code):
73 """Evaluate a statement of python code.
74
75 The bot will always respond with the return value of the code.
76 If the return value of the code is a coroutine, it will be awaited,
77 and the result of that will be the bot's response.
78
79 Note: Only one statement may be evaluated. Using await, yield or
80 similar restricted keywords will result in a syntax error. For multiple
81 lines or asynchronous code, see [p]repl or [p]eval.
82
83 Environment Variables:
84 ctx - command invokation context
85 bot - bot object
86 channel - the current channel object
87 author - command author's member object
88 message - the command's message object
89 discord - discord.py library
90 commands - discord.py commands extension
91 _ - The result of the last dev command.
92 """
93 env = {
94 'bot': ctx.bot,
95 'ctx': ctx,
96 'channel': ctx.channel,
97 'author': ctx.author,
98 'guild': ctx.guild,
99 'message': ctx.message,
100 'discord': discord,
101 'commands': commands,
102 '_': self._last_result
103 }
104
105 code = self.cleanup_code(code)
106
107 try:
108 result = eval(code, env)
109 except SyntaxError as e:
110 await ctx.send(self.get_syntax_error(e))
111 return
112 except Exception as e:
113 await ctx.send(
114 box('{}: {!s}'.format(type(e).__name__, e), lang='py'))
115 return
116
117 if asyncio.iscoroutine(result):
118 result = await result
119
120 self._last_result = result
121
122 result = self.sanitize_output(ctx, str(result))
123
124 await ctx.send_interactive(self.get_pages(result), box_lang="py")
125
126 @commands.command(name='eval')
127 @checks.is_owner()
128 async def _eval(self, ctx, *, body: str):
129 """Execute asynchronous code.
130
131 This command wraps code into the body of an async function and then
132 calls and awaits it. The bot will respond with anything printed to
133 stdout, as well as the return value of the function.
134
135 The code can be within a codeblock, inline code or neither, as long
136 as they are not mixed and they are formatted correctly.
137
138 Environment Variables:
139 ctx - command invokation context
140 bot - bot object
141 channel - the current channel object
142 author - command author's member object
143 message - the command's message object
144 discord - discord.py library
145 commands - discord.py commands extension
146 _ - The result of the last dev command.
147 """
148 env = {
149 'bot': ctx.bot,
150 'ctx': ctx,
151 'channel': ctx.channel,
152 'author': ctx.author,
153 'guild': ctx.guild,
154 'message': ctx.message,
155 'discord': discord,
156 'commands': commands,
157 '_': self._last_result
158 }
159
160 body = self.cleanup_code(body)
161 stdout = io.StringIO()
162
163 to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ')
164
165 try:
166 exec(to_compile, env)
167 except SyntaxError as e:
168 return await ctx.send(self.get_syntax_error(e))
169
170 func = env['func']
171 result = None
172 try:
173 with redirect_stdout(stdout):
174 result = await func()
175 except:
176 printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
177 else:
178 printed = stdout.getvalue()
179 await ctx.tick()
180
181 if result is not None:
182 self._last_result = result
183 msg = "{}{}".format(printed, result)
184 else:
185 msg = printed
186 msg = self.sanitize_output(ctx, msg)
187
188 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
189
190 @commands.command()
191 @checks.is_owner()
192 async def repl(self, ctx):
193 """Open an interactive REPL.
194
195 The REPL will only recognise code as messages which start with a
196 backtick. This includes codeblocks, and as such multiple lines can be
197 evaluated.
198
199 You may not await any code in this REPL unless you define it inside an
200 async function.
201 """
202 variables = {
203 'ctx': ctx,
204 'bot': ctx.bot,
205 'message': ctx.message,
206 'guild': ctx.guild,
207 'channel': ctx.channel,
208 'author': ctx.author,
209 '_': None,
210 }
211
212 if ctx.channel.id in self.sessions:
213 await ctx.send(_('Already running a REPL session in this channel. '
214 'Exit it with `quit`.'))
215 return
216
217 self.sessions.add(ctx.channel.id)
218 await ctx.send(_('Enter code to execute or evaluate.'
219 ' `exit()` or `quit` to exit.'))
220
221 msg_check = lambda m: (m.author == ctx.author and
222 m.channel == ctx.channel and
223 m.content.startswith('`'))
224
225 while True:
226 response = await ctx.bot.wait_for("message", check=msg_check)
227
228 cleaned = self.cleanup_code(response.content)
229
230 if cleaned in ('quit', 'exit', 'exit()'):
231 await ctx.send('Exiting.')
232 self.sessions.remove(ctx.channel.id)
233 return
234
235 executor = exec
236 if cleaned.count('\n') == 0:
237 # single statement, potentially 'eval'
238 try:
239 code = compile(cleaned, '<repl session>', 'eval')
240 except SyntaxError:
241 pass
242 else:
243 executor = eval
244
245 if executor is exec:
246 try:
247 code = compile(cleaned, '<repl session>', 'exec')
248 except SyntaxError as e:
249 await ctx.send(self.get_syntax_error(e))
250 continue
251
252 variables['message'] = response
253
254 stdout = io.StringIO()
255
256 msg = None
257
258 try:
259 with redirect_stdout(stdout):
260 result = executor(code, variables)
261 if inspect.isawaitable(result):
262 result = await result
263 except:
264 value = stdout.getvalue()
265 msg = "{}{}".format(value, traceback.format_exc())
266 else:
267 value = stdout.getvalue()
268 if result is not None:
269 msg = "{}{}".format(value, result)
270 variables['_'] = result
271 elif value:
272 msg = "{}".format(value)
273
274 msg = self.sanitize_output(ctx, msg)
275
276 try:
277 await ctx.send_interactive(self.get_pages(msg), box_lang="py")
278 except discord.Forbidden:
279 pass
280 except discord.HTTPException as e:
281 await ctx.send(_('Unexpected error: `{}`').format(e))
282
283 @commands.command()
284 @checks.is_owner()
285 async def mock(self, ctx, user: discord.Member, *, command):
286 """Mock another user invoking a command.
287
288 The prefix must not be entered.
289 """
290 msg = copy(ctx.message)
291 msg.author = user
292 msg.content = ctx.prefix + command
293
294 ctx.bot.dispatch('message', msg)
295
296 @commands.command(name="mockmsg")
297 @checks.is_owner()
298 async def mock_msg(self, ctx, user: discord.Member, *, content: str):
299 """Dispatch a message event as if it were sent by a different user.
300
301 Only reads the raw content of the message. Attachments, embeds etc. are
302 ignored.
303 """
304 old_author = ctx.author
305 old_content = ctx.message.content
306 ctx.message.author = user
307 ctx.message.content = content
308
309 ctx.bot.dispatch("message", ctx.message)
310
311 # If we change the author and content back too quickly,
312 # the bot won't process the mocked message in time.
313 await asyncio.sleep(2)
314 ctx.message.author = old_author
315 ctx.message.content = old_content
316
[end of redbot/core/dev_commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py
--- a/redbot/core/dev_commands.py
+++ b/redbot/core/dev_commands.py
@@ -253,7 +253,7 @@
stdout = io.StringIO()
- msg = None
+ msg = ""
try:
with redirect_stdout(stdout):
| {"golden_diff": "diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py\n--- a/redbot/core/dev_commands.py\n+++ b/redbot/core/dev_commands.py\n@@ -253,7 +253,7 @@\n \n stdout = io.StringIO()\n \n- msg = None\n+ msg = \"\"\n \n try:\n with redirect_stdout(stdout):\n", "issue": "[V3] Error in command repl\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\n\r\nSo I first tried to use the core repl function on V3 for the first time. I set a very basic value and got an error\r\n\r\n### Steps to reproduce\r\n\r\n1. `=repl`\r\n2. `level = 1`\r\n\r\n\r\n\r\nFull error:\r\n\r\n```Py\r\nException in command 'repl'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py\", line 62, in wrapped\r\n ret = yield from coro(*args, **kwargs)\r\n File \"/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/redbot/core/dev_commands.py\", line 273, in repl\r\n msg = self.sanitize_output(ctx, msg)\r\n File \"/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/redbot/core/dev_commands.py\", line 64, in sanitize_output\r\n result = input_.replace(token, r)\r\nAttributeError: 'NoneType' object has no attribute 'replace'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/discord/ext/commands/bot.py\", line 886, in invoke\r\n yield from ctx.command.invoke(ctx)\r\n File \"/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py\", line 489, in invoke\r\n yield from injected(*ctx.args, **ctx.kwargs)\r\n File \"/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py\", line 71, in wrapped\r\n raise CommandInvokeError(e) from e\r\ndiscord.ext.commands.errors.CommandInvokeError: Command raised an exception: AttributeError: 'NoneType' object has no attribute 'replace'\r\n```\n", "before_files": [{"content": "import asyncio\nimport inspect\nimport io\nimport textwrap\nimport traceback\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\nfrom discord.ext import commands\nfrom . import checks\nfrom .i18n import CogI18n\nfrom .utils.chat_formatting import box, pagify\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = CogI18n(\"Dev\", __file__)\n\n\nclass Dev:\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box('{0.__class__.__name__}: {0}'.format(e), lang=\"py\")\n return box(\n '{0.text}{1:>{0.offset}}\\n{2}: {0}'\n ''.format(e, '^', type(e).__name__),\n lang=\"py\")\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n r = \"[EXPUNGED]\"\n result = input_.replace(token, r)\n result = result.replace(token.lower(), r)\n result = result.replace(token.upper(), r)\n return result\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using await, yield or\n similar restricted keywords will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n code = self.cleanup_code(code)\n\n try:\n result = eval(code, env)\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(\n box('{}: {!s}'.format(type(e).__name__, e), lang='py'))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n self._last_result = result\n\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name='eval')\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = 'async def func():\\n%s' % textwrap.indent(body, ' ')\n\n try:\n exec(to_compile, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env['func']\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n\n You may not await any code in this REPL unless you define it inside an\n async function.\n \"\"\"\n variables = {\n 'ctx': ctx,\n 'bot': ctx.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(_('Already running a REPL session in this channel. '\n 'Exit it with `quit`.'))\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_('Enter code to execute or evaluate.'\n ' `exit()` or `quit` to exit.'))\n\n msg_check = lambda m: (m.author == ctx.author and\n m.channel == ctx.channel and\n m.content.startswith('`'))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=msg_check)\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n stdout = io.StringIO()\n\n msg = None\n\n try:\n with redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables['_'] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_('Unexpected error: `{}`').format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch('message', msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n", "path": "redbot/core/dev_commands.py"}]} | 4,071 | 80 |
gh_patches_debug_17818 | rasdani/github-patches | git_diff | oobabooga__text-generation-webui-5722 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`user_bio` is None by default, get an error when replacing the character names
### Describe the bug
Using API `/v1/chat/completions` (None stream mode) without `user_bio`.
----
The new parameter `user_bio` in API chat mode raises an error because it's `None` as default.
https://github.com/oobabooga/text-generation-webui/blob/7cf1402bde48fd76af501d5efecb34227bf4d082/extensions/openai/typing.py#L110
----
Then, in `chat.py` can't replace the names correctly.
https://github.com/oobabooga/text-generation-webui/blob/7cf1402bde48fd76af501d5efecb34227bf4d082/modules/chat.py#L97
get this error

-----
An empty string as default in webui.
https://github.com/oobabooga/text-generation-webui/blob/7cf1402bde48fd76af501d5efecb34227bf4d082/modules/shared.py#L60
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Any request in `/v1/chat/completions`
### Screenshot
_No response_
### Logs
```shell
text = text.replace('{{user}}', name1).replace('{{char}}', name2)
AttributeError: 'NoneType' object has no attribute 'replace'
```
### System Info
```shell
None
```
</issue>
<code>
[start of extensions/openai/typing.py]
1 import json
2 import time
3 from typing import Dict, List
4
5 from pydantic import BaseModel, Field
6
7
8 class GenerationOptions(BaseModel):
9 preset: str | None = Field(default=None, description="The name of a file under text-generation-webui/presets (without the .yaml extension). The sampling parameters that get overwritten by this option are the keys in the default_preset() function in modules/presets.py.")
10 min_p: float = 0
11 dynamic_temperature: bool = False
12 dynatemp_low: float = 1
13 dynatemp_high: float = 1
14 dynatemp_exponent: float = 1
15 smoothing_factor: float = 0
16 smoothing_curve: float = 1
17 top_k: int = 0
18 repetition_penalty: float = 1
19 repetition_penalty_range: int = 1024
20 typical_p: float = 1
21 tfs: float = 1
22 top_a: float = 0
23 epsilon_cutoff: float = 0
24 eta_cutoff: float = 0
25 guidance_scale: float = 1
26 negative_prompt: str = ''
27 penalty_alpha: float = 0
28 mirostat_mode: int = 0
29 mirostat_tau: float = 5
30 mirostat_eta: float = 0.1
31 temperature_last: bool = False
32 do_sample: bool = True
33 seed: int = -1
34 encoder_repetition_penalty: float = 1
35 no_repeat_ngram_size: int = 0
36 min_length: int = 0
37 num_beams: int = 1
38 length_penalty: float = 1
39 early_stopping: bool = False
40 truncation_length: int = 0
41 max_tokens_second: int = 0
42 prompt_lookup_num_tokens: int = 0
43 custom_token_bans: str = ""
44 sampler_priority: List[str] | str | None = Field(default=None, description="List of samplers where the first items will appear first in the stack. Example: [\"top_k\", \"temperature\", \"top_p\"].")
45 auto_max_new_tokens: bool = False
46 ban_eos_token: bool = False
47 add_bos_token: bool = True
48 skip_special_tokens: bool = True
49 grammar_string: str = ""
50
51
52 class CompletionRequestParams(BaseModel):
53 model: str | None = Field(default=None, description="Unused parameter. To change the model, use the /v1/internal/model/load endpoint.")
54 prompt: str | List[str]
55 best_of: int | None = Field(default=1, description="Unused parameter.")
56 echo: bool | None = False
57 frequency_penalty: float | None = 0
58 logit_bias: dict | None = None
59 logprobs: int | None = None
60 max_tokens: int | None = 16
61 n: int | None = Field(default=1, description="Unused parameter.")
62 presence_penalty: float | None = 0
63 stop: str | List[str] | None = None
64 stream: bool | None = False
65 suffix: str | None = None
66 temperature: float | None = 1
67 top_p: float | None = 1
68 user: str | None = Field(default=None, description="Unused parameter.")
69
70
71 class CompletionRequest(GenerationOptions, CompletionRequestParams):
72 pass
73
74
75 class CompletionResponse(BaseModel):
76 id: str
77 choices: List[dict]
78 created: int = int(time.time())
79 model: str
80 object: str = "text_completion"
81 usage: dict
82
83
84 class ChatCompletionRequestParams(BaseModel):
85 messages: List[dict]
86 model: str | None = Field(default=None, description="Unused parameter. To change the model, use the /v1/internal/model/load endpoint.")
87 frequency_penalty: float | None = 0
88 function_call: str | dict | None = Field(default=None, description="Unused parameter.")
89 functions: List[dict] | None = Field(default=None, description="Unused parameter.")
90 logit_bias: dict | None = None
91 max_tokens: int | None = None
92 n: int | None = Field(default=1, description="Unused parameter.")
93 presence_penalty: float | None = 0
94 stop: str | List[str] | None = None
95 stream: bool | None = False
96 temperature: float | None = 1
97 top_p: float | None = 1
98 user: str | None = Field(default=None, description="Unused parameter.")
99
100 mode: str = Field(default='instruct', description="Valid options: instruct, chat, chat-instruct.")
101
102 instruction_template: str | None = Field(default=None, description="An instruction template defined under text-generation-webui/instruction-templates. If not set, the correct template will be automatically obtained from the model metadata.")
103 instruction_template_str: str | None = Field(default=None, description="A Jinja2 instruction template. If set, will take precedence over everything else.")
104
105 character: str | None = Field(default=None, description="A character defined under text-generation-webui/characters. If not set, the default \"Assistant\" character will be used.")
106 bot_name: str | None = Field(default=None, description="Overwrites the value set by character field.", alias="name2")
107 context: str | None = Field(default=None, description="Overwrites the value set by character field.")
108 greeting: str | None = Field(default=None, description="Overwrites the value set by character field.")
109 user_name: str | None = Field(default=None, description="Your name (the user). By default, it's \"You\".", alias="name1")
110 user_bio: str | None = Field(default=None, description="The user description/personality.")
111 chat_template_str: str | None = Field(default=None, description="Jinja2 template for chat.")
112
113 chat_instruct_command: str | None = None
114
115 continue_: bool = Field(default=False, description="Makes the last bot message in the history be continued instead of starting a new message.")
116
117
118 class ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams):
119 pass
120
121
122 class ChatCompletionResponse(BaseModel):
123 id: str
124 choices: List[dict]
125 created: int = int(time.time())
126 model: str
127 object: str = "chat.completion"
128 usage: dict
129
130
131 class EmbeddingsRequest(BaseModel):
132 input: str | List[str] | List[int] | List[List[int]]
133 model: str | None = Field(default=None, description="Unused parameter. To change the model, set the OPENEDAI_EMBEDDING_MODEL and OPENEDAI_EMBEDDING_DEVICE environment variables before starting the server.")
134 encoding_format: str = Field(default="float", description="Can be float or base64.")
135 user: str | None = Field(default=None, description="Unused parameter.")
136
137
138 class EmbeddingsResponse(BaseModel):
139 index: int
140 embedding: List[float]
141 object: str = "embedding"
142
143
144 class EncodeRequest(BaseModel):
145 text: str
146
147
148 class EncodeResponse(BaseModel):
149 tokens: List[int]
150 length: int
151
152
153 class DecodeRequest(BaseModel):
154 tokens: List[int]
155
156
157 class DecodeResponse(BaseModel):
158 text: str
159
160
161 class TokenCountResponse(BaseModel):
162 length: int
163
164
165 class LogitsRequestParams(BaseModel):
166 prompt: str
167 use_samplers: bool = False
168 top_logits: int | None = 50
169 frequency_penalty: float | None = 0
170 max_tokens: int | None = 16
171 presence_penalty: float | None = 0
172 temperature: float | None = 1
173 top_p: float | None = 1
174
175
176 class LogitsRequest(GenerationOptions, LogitsRequestParams):
177 pass
178
179
180 class LogitsResponse(BaseModel):
181 logits: Dict[str, float]
182
183
184 class ModelInfoResponse(BaseModel):
185 model_name: str
186 lora_names: List[str]
187
188
189 class ModelListResponse(BaseModel):
190 model_names: List[str]
191
192
193 class LoadModelRequest(BaseModel):
194 model_name: str
195 args: dict | None = None
196 settings: dict | None = None
197
198
199 class LoraListResponse(BaseModel):
200 lora_names: List[str]
201
202
203 class LoadLorasRequest(BaseModel):
204 lora_names: List[str]
205
206
207 def to_json(obj):
208 return json.dumps(obj.__dict__, indent=4)
209
210
211 def to_dict(obj):
212 return obj.__dict__
213
[end of extensions/openai/typing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/openai/typing.py b/extensions/openai/typing.py
--- a/extensions/openai/typing.py
+++ b/extensions/openai/typing.py
@@ -107,7 +107,7 @@
context: str | None = Field(default=None, description="Overwrites the value set by character field.")
greeting: str | None = Field(default=None, description="Overwrites the value set by character field.")
user_name: str | None = Field(default=None, description="Your name (the user). By default, it's \"You\".", alias="name1")
- user_bio: str | None = Field(default=None, description="The user description/personality.")
+ user_bio: str | None = Field(default='', description="The user description/personality.")
chat_template_str: str | None = Field(default=None, description="Jinja2 template for chat.")
chat_instruct_command: str | None = None
| {"golden_diff": "diff --git a/extensions/openai/typing.py b/extensions/openai/typing.py\n--- a/extensions/openai/typing.py\n+++ b/extensions/openai/typing.py\n@@ -107,7 +107,7 @@\n context: str | None = Field(default=None, description=\"Overwrites the value set by character field.\")\n greeting: str | None = Field(default=None, description=\"Overwrites the value set by character field.\")\n user_name: str | None = Field(default=None, description=\"Your name (the user). By default, it's \\\"You\\\".\", alias=\"name1\")\n- user_bio: str | None = Field(default=None, description=\"The user description/personality.\")\n+ user_bio: str | None = Field(default='', description=\"The user description/personality.\")\n chat_template_str: str | None = Field(default=None, description=\"Jinja2 template for chat.\")\n \n chat_instruct_command: str | None = None\n", "issue": "`user_bio` is None by default, get an error when replacing the character names\n### Describe the bug\r\nUsing API `/v1/chat/completions` (None stream mode) without `user_bio`.\r\n\r\n----\r\n\r\nThe new parameter `user_bio` in API chat mode raises an error because it's `None` as default.\r\nhttps://github.com/oobabooga/text-generation-webui/blob/7cf1402bde48fd76af501d5efecb34227bf4d082/extensions/openai/typing.py#L110\r\n\r\n----\r\n\r\nThen, in `chat.py` can't replace the names correctly.\r\nhttps://github.com/oobabooga/text-generation-webui/blob/7cf1402bde48fd76af501d5efecb34227bf4d082/modules/chat.py#L97\r\n\r\nget this error\r\n\r\n\r\n-----\r\n\r\nAn empty string as default in webui.\r\nhttps://github.com/oobabooga/text-generation-webui/blob/7cf1402bde48fd76af501d5efecb34227bf4d082/modules/shared.py#L60\r\n\r\n\r\n\r\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Reproduction\r\n\r\nAny request in `/v1/chat/completions`\r\n\r\n### Screenshot\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n```shell\r\n text = text.replace('{{user}}', name1).replace('{{char}}', name2)\r\nAttributeError: 'NoneType' object has no attribute 'replace'\r\n```\r\n\r\n\r\n### System Info\r\n\r\n```shell\r\nNone\r\n```\r\n\n", "before_files": [{"content": "import json\nimport time\nfrom typing import Dict, List\n\nfrom pydantic import BaseModel, Field\n\n\nclass GenerationOptions(BaseModel):\n preset: str | None = Field(default=None, description=\"The name of a file under text-generation-webui/presets (without the .yaml extension). The sampling parameters that get overwritten by this option are the keys in the default_preset() function in modules/presets.py.\")\n min_p: float = 0\n dynamic_temperature: bool = False\n dynatemp_low: float = 1\n dynatemp_high: float = 1\n dynatemp_exponent: float = 1\n smoothing_factor: float = 0\n smoothing_curve: float = 1\n top_k: int = 0\n repetition_penalty: float = 1\n repetition_penalty_range: int = 1024\n typical_p: float = 1\n tfs: float = 1\n top_a: float = 0\n epsilon_cutoff: float = 0\n eta_cutoff: float = 0\n guidance_scale: float = 1\n negative_prompt: str = ''\n penalty_alpha: float = 0\n mirostat_mode: int = 0\n mirostat_tau: float = 5\n mirostat_eta: float = 0.1\n temperature_last: bool = False\n do_sample: bool = True\n seed: int = -1\n encoder_repetition_penalty: float = 1\n no_repeat_ngram_size: int = 0\n min_length: int = 0\n num_beams: int = 1\n length_penalty: float = 1\n early_stopping: bool = False\n truncation_length: int = 0\n max_tokens_second: int = 0\n prompt_lookup_num_tokens: int = 0\n custom_token_bans: str = \"\"\n sampler_priority: List[str] | str | None = Field(default=None, description=\"List of samplers where the first items will appear first in the stack. Example: [\\\"top_k\\\", \\\"temperature\\\", \\\"top_p\\\"].\")\n auto_max_new_tokens: bool = False\n ban_eos_token: bool = False\n add_bos_token: bool = True\n skip_special_tokens: bool = True\n grammar_string: str = \"\"\n\n\nclass CompletionRequestParams(BaseModel):\n model: str | None = Field(default=None, description=\"Unused parameter. To change the model, use the /v1/internal/model/load endpoint.\")\n prompt: str | List[str]\n best_of: int | None = Field(default=1, description=\"Unused parameter.\")\n echo: bool | None = False\n frequency_penalty: float | None = 0\n logit_bias: dict | None = None\n logprobs: int | None = None\n max_tokens: int | None = 16\n n: int | None = Field(default=1, description=\"Unused parameter.\")\n presence_penalty: float | None = 0\n stop: str | List[str] | None = None\n stream: bool | None = False\n suffix: str | None = None\n temperature: float | None = 1\n top_p: float | None = 1\n user: str | None = Field(default=None, description=\"Unused parameter.\")\n\n\nclass CompletionRequest(GenerationOptions, CompletionRequestParams):\n pass\n\n\nclass CompletionResponse(BaseModel):\n id: str\n choices: List[dict]\n created: int = int(time.time())\n model: str\n object: str = \"text_completion\"\n usage: dict\n\n\nclass ChatCompletionRequestParams(BaseModel):\n messages: List[dict]\n model: str | None = Field(default=None, description=\"Unused parameter. To change the model, use the /v1/internal/model/load endpoint.\")\n frequency_penalty: float | None = 0\n function_call: str | dict | None = Field(default=None, description=\"Unused parameter.\")\n functions: List[dict] | None = Field(default=None, description=\"Unused parameter.\")\n logit_bias: dict | None = None\n max_tokens: int | None = None\n n: int | None = Field(default=1, description=\"Unused parameter.\")\n presence_penalty: float | None = 0\n stop: str | List[str] | None = None\n stream: bool | None = False\n temperature: float | None = 1\n top_p: float | None = 1\n user: str | None = Field(default=None, description=\"Unused parameter.\")\n\n mode: str = Field(default='instruct', description=\"Valid options: instruct, chat, chat-instruct.\")\n\n instruction_template: str | None = Field(default=None, description=\"An instruction template defined under text-generation-webui/instruction-templates. If not set, the correct template will be automatically obtained from the model metadata.\")\n instruction_template_str: str | None = Field(default=None, description=\"A Jinja2 instruction template. If set, will take precedence over everything else.\")\n\n character: str | None = Field(default=None, description=\"A character defined under text-generation-webui/characters. If not set, the default \\\"Assistant\\\" character will be used.\")\n bot_name: str | None = Field(default=None, description=\"Overwrites the value set by character field.\", alias=\"name2\")\n context: str | None = Field(default=None, description=\"Overwrites the value set by character field.\")\n greeting: str | None = Field(default=None, description=\"Overwrites the value set by character field.\")\n user_name: str | None = Field(default=None, description=\"Your name (the user). By default, it's \\\"You\\\".\", alias=\"name1\")\n user_bio: str | None = Field(default=None, description=\"The user description/personality.\")\n chat_template_str: str | None = Field(default=None, description=\"Jinja2 template for chat.\")\n\n chat_instruct_command: str | None = None\n\n continue_: bool = Field(default=False, description=\"Makes the last bot message in the history be continued instead of starting a new message.\")\n\n\nclass ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams):\n pass\n\n\nclass ChatCompletionResponse(BaseModel):\n id: str\n choices: List[dict]\n created: int = int(time.time())\n model: str\n object: str = \"chat.completion\"\n usage: dict\n\n\nclass EmbeddingsRequest(BaseModel):\n input: str | List[str] | List[int] | List[List[int]]\n model: str | None = Field(default=None, description=\"Unused parameter. To change the model, set the OPENEDAI_EMBEDDING_MODEL and OPENEDAI_EMBEDDING_DEVICE environment variables before starting the server.\")\n encoding_format: str = Field(default=\"float\", description=\"Can be float or base64.\")\n user: str | None = Field(default=None, description=\"Unused parameter.\")\n\n\nclass EmbeddingsResponse(BaseModel):\n index: int\n embedding: List[float]\n object: str = \"embedding\"\n\n\nclass EncodeRequest(BaseModel):\n text: str\n\n\nclass EncodeResponse(BaseModel):\n tokens: List[int]\n length: int\n\n\nclass DecodeRequest(BaseModel):\n tokens: List[int]\n\n\nclass DecodeResponse(BaseModel):\n text: str\n\n\nclass TokenCountResponse(BaseModel):\n length: int\n\n\nclass LogitsRequestParams(BaseModel):\n prompt: str\n use_samplers: bool = False\n top_logits: int | None = 50\n frequency_penalty: float | None = 0\n max_tokens: int | None = 16\n presence_penalty: float | None = 0\n temperature: float | None = 1\n top_p: float | None = 1\n\n\nclass LogitsRequest(GenerationOptions, LogitsRequestParams):\n pass\n\n\nclass LogitsResponse(BaseModel):\n logits: Dict[str, float]\n\n\nclass ModelInfoResponse(BaseModel):\n model_name: str\n lora_names: List[str]\n\n\nclass ModelListResponse(BaseModel):\n model_names: List[str]\n\n\nclass LoadModelRequest(BaseModel):\n model_name: str\n args: dict | None = None\n settings: dict | None = None\n\n\nclass LoraListResponse(BaseModel):\n lora_names: List[str]\n\n\nclass LoadLorasRequest(BaseModel):\n lora_names: List[str]\n\n\ndef to_json(obj):\n return json.dumps(obj.__dict__, indent=4)\n\n\ndef to_dict(obj):\n return obj.__dict__\n", "path": "extensions/openai/typing.py"}]} | 3,371 | 204 |
gh_patches_debug_25871 | rasdani/github-patches | git_diff | apache__airflow-11720 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow for specification of cipher in SftpHook
<!--
Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions.
Don't worry if they're not all applicable; just try to include what you can :-)
If you need to include code snippets or logs, please put them in fenced code
blocks. If they're super-long, please use the details tag like
<details><summary>super-long log</summary> lots of stuff </details>
Please delete these comment blocks before submitting the issue.
-->
**Description**
I want to specify the cipher to use when creating a SftpHook. Currently this is not possible because there is not a way to propagate the value to CnOpts.
https://github.com/apache/airflow/blob/master/airflow/providers/sftp/hooks/sftp.py
https://pysftp.readthedocs.io/en/release_0.2.9/pysftp.html?#pysftp.CnOpts
https://pysftp.readthedocs.io/en/release_0.2.9/cookbook.html
**Use case / motivation**
<!-- What do you want to happen?
Rather than telling us how you might implement this solution, try to take a
step back and describe what you are trying to achieve.
-->
I want to be able to access SFTP servers that have disabled access from connection with weak ciphers.
</issue>
<code>
[start of airflow/providers/sftp/hooks/sftp.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """This module contains SFTP hook."""
19 import datetime
20 import stat
21 from typing import Dict, List, Optional, Tuple
22
23 import pysftp
24
25 from airflow.providers.ssh.hooks.ssh import SSHHook
26
27
28 class SFTPHook(SSHHook):
29 """
30 This hook is inherited from SSH hook. Please refer to SSH hook for the input
31 arguments.
32
33 Interact with SFTP. Aims to be interchangeable with FTPHook.
34
35 :Pitfalls::
36
37 - In contrast with FTPHook describe_directory only returns size, type and
38 modify. It doesn't return unix.owner, unix.mode, perm, unix.group and
39 unique.
40 - retrieve_file and store_file only take a local full path and not a
41 buffer.
42 - If no mode is passed to create_directory it will be created with 777
43 permissions.
44
45 Errors that may occur throughout but should be handled downstream.
46 """
47
48 def __init__(self, ftp_conn_id: str = 'sftp_default', *args, **kwargs) -> None:
49 kwargs['ssh_conn_id'] = ftp_conn_id
50 super().__init__(*args, **kwargs)
51
52 self.conn = None
53 self.private_key_pass = None
54
55 # Fail for unverified hosts, unless this is explicitly allowed
56 self.no_host_key_check = False
57
58 if self.ssh_conn_id is not None:
59 conn = self.get_connection(self.ssh_conn_id)
60 if conn.extra is not None:
61 extra_options = conn.extra_dejson
62 if 'private_key_pass' in extra_options:
63 self.private_key_pass = extra_options.get('private_key_pass')
64
65 # For backward compatibility
66 # TODO: remove in Airflow 2.1
67 import warnings
68
69 if 'ignore_hostkey_verification' in extra_options:
70 warnings.warn(
71 'Extra option `ignore_hostkey_verification` is deprecated.'
72 'Please use `no_host_key_check` instead.'
73 'This option will be removed in Airflow 2.1',
74 DeprecationWarning,
75 stacklevel=2,
76 )
77 self.no_host_key_check = (
78 str(extra_options['ignore_hostkey_verification']).lower() == 'true'
79 )
80
81 if 'no_host_key_check' in extra_options:
82 self.no_host_key_check = str(extra_options['no_host_key_check']).lower() == 'true'
83
84 if 'private_key' in extra_options:
85 warnings.warn(
86 'Extra option `private_key` is deprecated.'
87 'Please use `key_file` instead.'
88 'This option will be removed in Airflow 2.1',
89 DeprecationWarning,
90 stacklevel=2,
91 )
92 self.key_file = extra_options.get('private_key')
93
94 def get_conn(self) -> pysftp.Connection:
95 """Returns an SFTP connection object"""
96 if self.conn is None:
97 cnopts = pysftp.CnOpts()
98 if self.no_host_key_check:
99 cnopts.hostkeys = None
100 cnopts.compression = self.compress
101 conn_params = {
102 'host': self.remote_host,
103 'port': self.port,
104 'username': self.username,
105 'cnopts': cnopts,
106 }
107 if self.password and self.password.strip():
108 conn_params['password'] = self.password
109 if self.key_file:
110 conn_params['private_key'] = self.key_file
111 if self.private_key_pass:
112 conn_params['private_key_pass'] = self.private_key_pass
113
114 self.conn = pysftp.Connection(**conn_params)
115 return self.conn
116
117 def close_conn(self) -> None:
118 """Closes the connection"""
119 if self.conn is not None:
120 self.conn.close()
121 self.conn = None
122
123 def describe_directory(self, path: str) -> Dict[str, Dict[str, str]]:
124 """
125 Returns a dictionary of {filename: {attributes}} for all files
126 on the remote system (where the MLSD command is supported).
127
128 :param path: full path to the remote directory
129 :type path: str
130 """
131 conn = self.get_conn()
132 flist = conn.listdir_attr(path)
133 files = {}
134 for f in flist:
135 modify = datetime.datetime.fromtimestamp(f.st_mtime).strftime('%Y%m%d%H%M%S')
136 files[f.filename] = {
137 'size': f.st_size,
138 'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',
139 'modify': modify,
140 }
141 return files
142
143 def list_directory(self, path: str) -> List[str]:
144 """
145 Returns a list of files on the remote system.
146
147 :param path: full path to the remote directory to list
148 :type path: str
149 """
150 conn = self.get_conn()
151 files = conn.listdir(path)
152 return files
153
154 def create_directory(self, path: str, mode: int = 777) -> None:
155 """
156 Creates a directory on the remote system.
157
158 :param path: full path to the remote directory to create
159 :type path: str
160 :param mode: int representation of octal mode for directory
161 """
162 conn = self.get_conn()
163 conn.makedirs(path, mode)
164
165 def delete_directory(self, path: str) -> None:
166 """
167 Deletes a directory on the remote system.
168
169 :param path: full path to the remote directory to delete
170 :type path: str
171 """
172 conn = self.get_conn()
173 conn.rmdir(path)
174
175 def retrieve_file(self, remote_full_path: str, local_full_path: str) -> None:
176 """
177 Transfers the remote file to a local location.
178 If local_full_path is a string path, the file will be put
179 at that location
180
181 :param remote_full_path: full path to the remote file
182 :type remote_full_path: str
183 :param local_full_path: full path to the local file
184 :type local_full_path: str
185 """
186 conn = self.get_conn()
187 self.log.info('Retrieving file from FTP: %s', remote_full_path)
188 conn.get(remote_full_path, local_full_path)
189 self.log.info('Finished retrieving file from FTP: %s', remote_full_path)
190
191 def store_file(self, remote_full_path: str, local_full_path: str) -> None:
192 """
193 Transfers a local file to the remote location.
194 If local_full_path_or_buffer is a string path, the file will be read
195 from that location
196
197 :param remote_full_path: full path to the remote file
198 :type remote_full_path: str
199 :param local_full_path: full path to the local file
200 :type local_full_path: str
201 """
202 conn = self.get_conn()
203 conn.put(local_full_path, remote_full_path)
204
205 def delete_file(self, path: str) -> None:
206 """
207 Removes a file on the FTP Server
208
209 :param path: full path to the remote file
210 :type path: str
211 """
212 conn = self.get_conn()
213 conn.remove(path)
214
215 def get_mod_time(self, path: str) -> str:
216 """
217 Returns modification time.
218
219 :param path: full path to the remote file
220 :type path: str
221 """
222 conn = self.get_conn()
223 ftp_mdtm = conn.stat(path).st_mtime
224 return datetime.datetime.fromtimestamp(ftp_mdtm).strftime('%Y%m%d%H%M%S')
225
226 def path_exists(self, path: str) -> bool:
227 """
228 Returns True if a remote entity exists
229
230 :param path: full path to the remote file or directory
231 :type path: str
232 """
233 conn = self.get_conn()
234 return conn.exists(path)
235
236 @staticmethod
237 def _is_path_match(path: str, prefix: Optional[str] = None, delimiter: Optional[str] = None) -> bool:
238 """
239 Return True if given path starts with prefix (if set) and ends with delimiter (if set).
240
241 :param path: path to be checked
242 :type path: str
243 :param prefix: if set path will be checked is starting with prefix
244 :type prefix: str
245 :param delimiter: if set path will be checked is ending with suffix
246 :type delimiter: str
247 :return: bool
248 """
249 if prefix is not None and not path.startswith(prefix):
250 return False
251 if delimiter is not None and not path.endswith(delimiter):
252 return False
253 return True
254
255 def get_tree_map(
256 self, path: str, prefix: Optional[str] = None, delimiter: Optional[str] = None
257 ) -> Tuple[List[str], List[str], List[str]]:
258 """
259 Return tuple with recursive lists of files, directories and unknown paths from given path.
260 It is possible to filter results by giving prefix and/or delimiter parameters.
261
262 :param path: path from which tree will be built
263 :type path: str
264 :param prefix: if set paths will be added if start with prefix
265 :type prefix: str
266 :param delimiter: if set paths will be added if end with delimiter
267 :type delimiter: str
268 :return: tuple with list of files, dirs and unknown items
269 :rtype: Tuple[List[str], List[str], List[str]]
270 """
271 conn = self.get_conn()
272 files, dirs, unknowns = [], [], [] # type: List[str], List[str], List[str]
273
274 def append_matching_path_callback(list_):
275 return lambda item: list_.append(item) if self._is_path_match(item, prefix, delimiter) else None
276
277 conn.walktree(
278 remotepath=path,
279 fcallback=append_matching_path_callback(files),
280 dcallback=append_matching_path_callback(dirs),
281 ucallback=append_matching_path_callback(unknowns),
282 recurse=True,
283 )
284
285 return files, dirs, unknowns
286
[end of airflow/providers/sftp/hooks/sftp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/providers/sftp/hooks/sftp.py b/airflow/providers/sftp/hooks/sftp.py
--- a/airflow/providers/sftp/hooks/sftp.py
+++ b/airflow/providers/sftp/hooks/sftp.py
@@ -51,6 +51,7 @@
self.conn = None
self.private_key_pass = None
+ self.ciphers = None
# Fail for unverified hosts, unless this is explicitly allowed
self.no_host_key_check = False
@@ -80,6 +81,9 @@
if 'no_host_key_check' in extra_options:
self.no_host_key_check = str(extra_options['no_host_key_check']).lower() == 'true'
+
+ if 'ciphers' in extra_options:
+ self.ciphers = extra_options['ciphers']
if 'private_key' in extra_options:
warnings.warn(
@@ -98,6 +102,7 @@
if self.no_host_key_check:
cnopts.hostkeys = None
cnopts.compression = self.compress
+ cnopts.ciphers = self.ciphers
conn_params = {
'host': self.remote_host,
'port': self.port,
| {"golden_diff": "diff --git a/airflow/providers/sftp/hooks/sftp.py b/airflow/providers/sftp/hooks/sftp.py\n--- a/airflow/providers/sftp/hooks/sftp.py\n+++ b/airflow/providers/sftp/hooks/sftp.py\n@@ -51,6 +51,7 @@\n \n self.conn = None\n self.private_key_pass = None\n+ self.ciphers = None\n \n # Fail for unverified hosts, unless this is explicitly allowed\n self.no_host_key_check = False\n@@ -80,6 +81,9 @@\n \n if 'no_host_key_check' in extra_options:\n self.no_host_key_check = str(extra_options['no_host_key_check']).lower() == 'true'\n+ \n+ if 'ciphers' in extra_options:\n+ self.ciphers = extra_options['ciphers']\n \n if 'private_key' in extra_options:\n warnings.warn(\n@@ -98,6 +102,7 @@\n if self.no_host_key_check:\n cnopts.hostkeys = None\n cnopts.compression = self.compress\n+ cnopts.ciphers = self.ciphers\n conn_params = {\n 'host': self.remote_host,\n 'port': self.port,\n", "issue": "Allow for specification of cipher in SftpHook\n<!--\r\n\r\nWelcome to Apache Airflow! For a smooth issue process, try to answer the following questions.\r\nDon't worry if they're not all applicable; just try to include what you can :-)\r\n\r\nIf you need to include code snippets or logs, please put them in fenced code\r\nblocks. If they're super-long, please use the details tag like\r\n<details><summary>super-long log</summary> lots of stuff </details>\r\n\r\nPlease delete these comment blocks before submitting the issue.\r\n\r\n-->\r\n\r\n**Description**\r\n\r\nI want to specify the cipher to use when creating a SftpHook. Currently this is not possible because there is not a way to propagate the value to CnOpts. \r\n\r\nhttps://github.com/apache/airflow/blob/master/airflow/providers/sftp/hooks/sftp.py\r\nhttps://pysftp.readthedocs.io/en/release_0.2.9/pysftp.html?#pysftp.CnOpts\r\nhttps://pysftp.readthedocs.io/en/release_0.2.9/cookbook.html\r\n\r\n**Use case / motivation**\r\n\r\n<!-- What do you want to happen?\r\n\r\nRather than telling us how you might implement this solution, try to take a\r\nstep back and describe what you are trying to achieve.\r\n\r\n-->\r\n\r\nI want to be able to access SFTP servers that have disabled access from connection with weak ciphers.\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains SFTP hook.\"\"\"\nimport datetime\nimport stat\nfrom typing import Dict, List, Optional, Tuple\n\nimport pysftp\n\nfrom airflow.providers.ssh.hooks.ssh import SSHHook\n\n\nclass SFTPHook(SSHHook):\n \"\"\"\n This hook is inherited from SSH hook. Please refer to SSH hook for the input\n arguments.\n\n Interact with SFTP. Aims to be interchangeable with FTPHook.\n\n :Pitfalls::\n\n - In contrast with FTPHook describe_directory only returns size, type and\n modify. It doesn't return unix.owner, unix.mode, perm, unix.group and\n unique.\n - retrieve_file and store_file only take a local full path and not a\n buffer.\n - If no mode is passed to create_directory it will be created with 777\n permissions.\n\n Errors that may occur throughout but should be handled downstream.\n \"\"\"\n\n def __init__(self, ftp_conn_id: str = 'sftp_default', *args, **kwargs) -> None:\n kwargs['ssh_conn_id'] = ftp_conn_id\n super().__init__(*args, **kwargs)\n\n self.conn = None\n self.private_key_pass = None\n\n # Fail for unverified hosts, unless this is explicitly allowed\n self.no_host_key_check = False\n\n if self.ssh_conn_id is not None:\n conn = self.get_connection(self.ssh_conn_id)\n if conn.extra is not None:\n extra_options = conn.extra_dejson\n if 'private_key_pass' in extra_options:\n self.private_key_pass = extra_options.get('private_key_pass')\n\n # For backward compatibility\n # TODO: remove in Airflow 2.1\n import warnings\n\n if 'ignore_hostkey_verification' in extra_options:\n warnings.warn(\n 'Extra option `ignore_hostkey_verification` is deprecated.'\n 'Please use `no_host_key_check` instead.'\n 'This option will be removed in Airflow 2.1',\n DeprecationWarning,\n stacklevel=2,\n )\n self.no_host_key_check = (\n str(extra_options['ignore_hostkey_verification']).lower() == 'true'\n )\n\n if 'no_host_key_check' in extra_options:\n self.no_host_key_check = str(extra_options['no_host_key_check']).lower() == 'true'\n\n if 'private_key' in extra_options:\n warnings.warn(\n 'Extra option `private_key` is deprecated.'\n 'Please use `key_file` instead.'\n 'This option will be removed in Airflow 2.1',\n DeprecationWarning,\n stacklevel=2,\n )\n self.key_file = extra_options.get('private_key')\n\n def get_conn(self) -> pysftp.Connection:\n \"\"\"Returns an SFTP connection object\"\"\"\n if self.conn is None:\n cnopts = pysftp.CnOpts()\n if self.no_host_key_check:\n cnopts.hostkeys = None\n cnopts.compression = self.compress\n conn_params = {\n 'host': self.remote_host,\n 'port': self.port,\n 'username': self.username,\n 'cnopts': cnopts,\n }\n if self.password and self.password.strip():\n conn_params['password'] = self.password\n if self.key_file:\n conn_params['private_key'] = self.key_file\n if self.private_key_pass:\n conn_params['private_key_pass'] = self.private_key_pass\n\n self.conn = pysftp.Connection(**conn_params)\n return self.conn\n\n def close_conn(self) -> None:\n \"\"\"Closes the connection\"\"\"\n if self.conn is not None:\n self.conn.close()\n self.conn = None\n\n def describe_directory(self, path: str) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Returns a dictionary of {filename: {attributes}} for all files\n on the remote system (where the MLSD command is supported).\n\n :param path: full path to the remote directory\n :type path: str\n \"\"\"\n conn = self.get_conn()\n flist = conn.listdir_attr(path)\n files = {}\n for f in flist:\n modify = datetime.datetime.fromtimestamp(f.st_mtime).strftime('%Y%m%d%H%M%S')\n files[f.filename] = {\n 'size': f.st_size,\n 'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',\n 'modify': modify,\n }\n return files\n\n def list_directory(self, path: str) -> List[str]:\n \"\"\"\n Returns a list of files on the remote system.\n\n :param path: full path to the remote directory to list\n :type path: str\n \"\"\"\n conn = self.get_conn()\n files = conn.listdir(path)\n return files\n\n def create_directory(self, path: str, mode: int = 777) -> None:\n \"\"\"\n Creates a directory on the remote system.\n\n :param path: full path to the remote directory to create\n :type path: str\n :param mode: int representation of octal mode for directory\n \"\"\"\n conn = self.get_conn()\n conn.makedirs(path, mode)\n\n def delete_directory(self, path: str) -> None:\n \"\"\"\n Deletes a directory on the remote system.\n\n :param path: full path to the remote directory to delete\n :type path: str\n \"\"\"\n conn = self.get_conn()\n conn.rmdir(path)\n\n def retrieve_file(self, remote_full_path: str, local_full_path: str) -> None:\n \"\"\"\n Transfers the remote file to a local location.\n If local_full_path is a string path, the file will be put\n at that location\n\n :param remote_full_path: full path to the remote file\n :type remote_full_path: str\n :param local_full_path: full path to the local file\n :type local_full_path: str\n \"\"\"\n conn = self.get_conn()\n self.log.info('Retrieving file from FTP: %s', remote_full_path)\n conn.get(remote_full_path, local_full_path)\n self.log.info('Finished retrieving file from FTP: %s', remote_full_path)\n\n def store_file(self, remote_full_path: str, local_full_path: str) -> None:\n \"\"\"\n Transfers a local file to the remote location.\n If local_full_path_or_buffer is a string path, the file will be read\n from that location\n\n :param remote_full_path: full path to the remote file\n :type remote_full_path: str\n :param local_full_path: full path to the local file\n :type local_full_path: str\n \"\"\"\n conn = self.get_conn()\n conn.put(local_full_path, remote_full_path)\n\n def delete_file(self, path: str) -> None:\n \"\"\"\n Removes a file on the FTP Server\n\n :param path: full path to the remote file\n :type path: str\n \"\"\"\n conn = self.get_conn()\n conn.remove(path)\n\n def get_mod_time(self, path: str) -> str:\n \"\"\"\n Returns modification time.\n\n :param path: full path to the remote file\n :type path: str\n \"\"\"\n conn = self.get_conn()\n ftp_mdtm = conn.stat(path).st_mtime\n return datetime.datetime.fromtimestamp(ftp_mdtm).strftime('%Y%m%d%H%M%S')\n\n def path_exists(self, path: str) -> bool:\n \"\"\"\n Returns True if a remote entity exists\n\n :param path: full path to the remote file or directory\n :type path: str\n \"\"\"\n conn = self.get_conn()\n return conn.exists(path)\n\n @staticmethod\n def _is_path_match(path: str, prefix: Optional[str] = None, delimiter: Optional[str] = None) -> bool:\n \"\"\"\n Return True if given path starts with prefix (if set) and ends with delimiter (if set).\n\n :param path: path to be checked\n :type path: str\n :param prefix: if set path will be checked is starting with prefix\n :type prefix: str\n :param delimiter: if set path will be checked is ending with suffix\n :type delimiter: str\n :return: bool\n \"\"\"\n if prefix is not None and not path.startswith(prefix):\n return False\n if delimiter is not None and not path.endswith(delimiter):\n return False\n return True\n\n def get_tree_map(\n self, path: str, prefix: Optional[str] = None, delimiter: Optional[str] = None\n ) -> Tuple[List[str], List[str], List[str]]:\n \"\"\"\n Return tuple with recursive lists of files, directories and unknown paths from given path.\n It is possible to filter results by giving prefix and/or delimiter parameters.\n\n :param path: path from which tree will be built\n :type path: str\n :param prefix: if set paths will be added if start with prefix\n :type prefix: str\n :param delimiter: if set paths will be added if end with delimiter\n :type delimiter: str\n :return: tuple with list of files, dirs and unknown items\n :rtype: Tuple[List[str], List[str], List[str]]\n \"\"\"\n conn = self.get_conn()\n files, dirs, unknowns = [], [], [] # type: List[str], List[str], List[str]\n\n def append_matching_path_callback(list_):\n return lambda item: list_.append(item) if self._is_path_match(item, prefix, delimiter) else None\n\n conn.walktree(\n remotepath=path,\n fcallback=append_matching_path_callback(files),\n dcallback=append_matching_path_callback(dirs),\n ucallback=append_matching_path_callback(unknowns),\n recurse=True,\n )\n\n return files, dirs, unknowns\n", "path": "airflow/providers/sftp/hooks/sftp.py"}]} | 3,906 | 269 |
gh_patches_debug_60360 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-3774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug : Export des interventions filtrées dans la table n'est pas filtré dans le csv exporté
**Contexte :**
Bug dans le module intervention
**Résultat attendu :**
Lorsque je filtre la table qui liste les objets dans le module et que j'exporte le résultat au format CSV, le CSV ne doit contenir que les résultats filtrés
**Résultat observé (bug) :**
La table CSV obtenue contient l'ensemble des interventions non filtrées, sans tenir compte des éventuels filtres choisis par l'utilisateur dans l'interface.
</issue>
<code>
[start of geotrek/maintenance/views.py]
1 import logging
2 import re
3
4 from django.conf import settings
5 from django.db.models import Subquery, OuterRef, Sum
6 from django.db.models.expressions import Value
7 from django.utils.translation import gettext_lazy as _
8 from mapentity.views import (MapEntityList, MapEntityFormat, MapEntityDetail, MapEntityDocument,
9 MapEntityCreate, MapEntityUpdate, MapEntityDelete)
10
11 from geotrek.altimetry.models import AltimetryMixin
12 from geotrek.authent.decorators import same_structure_required
13 from geotrek.common.mixins.forms import FormsetMixin
14 from geotrek.common.mixins.views import CustomColumnsMixin
15 from geotrek.common.viewsets import GeotrekMapentityViewSet
16 from .filters import InterventionFilterSet, ProjectFilterSet
17 from .forms import (InterventionForm, ProjectForm,
18 FundingFormSet, ManDayFormSet)
19 from .models import Intervention, Project, ManDay
20 from .serializers import (InterventionSerializer, ProjectSerializer,
21 InterventionGeojsonSerializer, ProjectGeojsonSerializer)
22
23 logger = logging.getLogger(__name__)
24
25
26 ANNOTATION_FORBIDDEN_CHARS = re.compile(r"['`\"\]\[;\s]|--|/\*|\*/")
27 REPLACEMENT_CHAR = "_"
28
29
30 def _normalize_annotation_column_name(col_name):
31 return ANNOTATION_FORBIDDEN_CHARS.sub(repl=REPLACEMENT_CHAR, string=col_name)
32
33
34 class InterventionList(CustomColumnsMixin, MapEntityList):
35 queryset = Intervention.objects.existing()
36 filterform = InterventionFilterSet
37 mandatory_columns = ['id', 'name']
38 default_extra_columns = ['date', 'type', 'target', 'status', 'stake']
39 searchable_columns = ['id', 'name']
40 unorderable_columns = ['target']
41
42
43 class InterventionFormatList(MapEntityFormat, InterventionList):
44
45 @classmethod
46 def build_cost_column_name(cls, job_name):
47 return _normalize_annotation_column_name(f"{_('Cost')} {job_name}")
48
49 def get_queryset(self):
50 """Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention"""
51
52 queryset = Intervention.objects.existing()
53
54 if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:
55
56 # Get all jobs that are used in interventions, as unique names, ids and costs
57 all_mandays = ManDay.objects.all()
58 jobs_used_in_interventions = list(
59 set(all_mandays.values_list("job__job", "job_id", "job__cost"))
60 )
61
62 # Iter over unique jobs
63 for job_name, job_id, job_cost in jobs_used_in_interventions:
64
65 # Create column name for current job cost
66 column_name = self.build_cost_column_name(job_name)
67
68 # Create subquery to retrieve total cost of mandays for a given intervention and a given job
69 mandays_query = (
70 ManDay.objects.filter(intervention=OuterRef("pk"), job_id=job_id) # Extract all mandays for a given intervention and a given job
71 .values("job_id") # Group by job
72 .annotate(total_days=Sum("nb_days")) # Select number of days worked
73 .values("total_days") # Rename result as total_days
74 )
75
76 # Use total_days and job cost to calculate total cost for a given intervention and a given job
77 job_cost_query = Subquery(mandays_query) * Value(job_cost)
78
79 # Annotate queryset with this cost query
80 params = {column_name: job_cost_query}
81 queryset = queryset.annotate(**params)
82 return queryset
83
84 @classmethod
85 def get_mandatory_columns(cls):
86 mandatory_columns = ['id']
87 if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:
88 all_mandays = ManDay.objects.all() # Used to find all jobs that ARE USED in interventions
89 # Get all jobs that are used in interventions, as unique names
90 jobs_as_names = list(
91 set(all_mandays.values_list("job__job", flat=True))
92 )
93 # Create column names for each unique job cost
94 cost_column_names = list(map(cls.build_cost_column_name, jobs_as_names))
95 # Add these column names to export
96 mandatory_columns = mandatory_columns + cost_column_names
97 return mandatory_columns
98
99 default_extra_columns = [
100 'name', 'date', 'type', 'target', 'status', 'stake',
101 'disorders', 'total_manday', 'project', 'subcontracting',
102 'width', 'height', 'area', 'structure',
103 'description', 'date_insert', 'date_update',
104 'material_cost', 'heliport_cost', 'subcontract_cost',
105 'total_cost_mandays', 'total_cost',
106 'cities', 'districts', 'areas',
107 ] + AltimetryMixin.COLUMNS
108
109
110 class InterventionDetail(MapEntityDetail):
111 queryset = Intervention.objects.existing()
112
113 def get_context_data(self, *args, **kwargs):
114 context = super().get_context_data(*args, **kwargs)
115 context['can_edit'] = self.get_object().same_structure(self.request.user)
116 return context
117
118
119 class InterventionDocument(MapEntityDocument):
120 model = Intervention
121
122
123 class ManDayFormsetMixin(FormsetMixin):
124 context_name = 'manday_formset'
125 formset_class = ManDayFormSet
126
127
128 class InterventionCreate(ManDayFormsetMixin, MapEntityCreate):
129 model = Intervention
130 form_class = InterventionForm
131
132 def get_form_kwargs(self):
133 kwargs = super().get_form_kwargs()
134 if 'target_id' in self.request.GET and 'target_type' in self.request.GET:
135 # Create intervention on an existing infrastructure
136 kwargs['target_id'] = self.request.GET['target_id']
137 kwargs['target_type'] = self.request.GET['target_type']
138 return kwargs
139
140
141 class InterventionUpdate(ManDayFormsetMixin, MapEntityUpdate):
142 queryset = Intervention.objects.existing()
143 form_class = InterventionForm
144
145 @same_structure_required('maintenance:intervention_detail')
146 def dispatch(self, *args, **kwargs):
147 return super().dispatch(*args, **kwargs)
148
149 def get_form_kwargs(self):
150 kwargs = super().get_form_kwargs()
151 # If deletion is allowed
152 if kwargs['can_delete']:
153 intervention = self.get_object()
154 # Disallow deletion if this intervention is part of Suricate Workflow at the moment
155 not_workflow = not settings.SURICATE_WORKFLOW_ENABLED
156 is_report = intervention.target and intervention.target.__class__.__name__ == "Report"
157 report_is_closed = False
158 if is_report:
159 report_is_closed = (intervention.target.status.identifier == 'solved')
160 kwargs["can_delete"] = not_workflow or (not is_report) or report_is_closed
161 return kwargs
162
163
164 class InterventionDelete(MapEntityDelete):
165 model = Intervention
166
167 @same_structure_required('maintenance:intervention_detail')
168 def dispatch(self, *args, **kwargs):
169 return super().dispatch(*args, **kwargs)
170
171
172 class InterventionViewSet(GeotrekMapentityViewSet):
173 model = Intervention
174 serializer_class = InterventionSerializer
175 geojson_serializer_class = InterventionGeojsonSerializer
176 filterset_class = InterventionFilterSet
177 mapentity_list_class = InterventionList
178
179 def get_queryset(self):
180 qs = self.model.objects.existing()
181 if self.format_kwarg == 'geojson':
182 qs = qs.only('id', 'name')
183 else:
184 qs = qs.select_related("stake", "status", "type", "target_type").prefetch_related('target')
185 return qs
186
187
188 class ProjectList(CustomColumnsMixin, MapEntityList):
189 queryset = Project.objects.existing()
190 filterform = ProjectFilterSet
191 mandatory_columns = ['id', 'name']
192 default_extra_columns = ['period', 'type', 'domain']
193 searchable_columns = ['id', 'name']
194 unorderable_columns = ['period', ]
195
196
197 class ProjectFormatList(MapEntityFormat, ProjectList):
198 mandatory_columns = ['id']
199 default_extra_columns = [
200 'structure', 'name', 'period', 'type', 'domain', 'constraint', 'global_cost',
201 'interventions', 'interventions_total_cost', 'comments', 'contractors',
202 'project_owner', 'project_manager', 'founders',
203 'date_insert', 'date_update',
204 'cities', 'districts', 'areas',
205 ]
206
207
208 class ProjectDetail(MapEntityDetail):
209 queryset = Project.objects.existing()
210
211 def get_context_data(self, *args, **kwargs):
212 context = super().get_context_data(*args, **kwargs)
213 context['can_edit'] = self.get_object().same_structure(self.request.user)
214 context['empty_map_message'] = _("No intervention related.")
215 return context
216
217
218 class ProjectDocument(MapEntityDocument):
219 model = Project
220
221
222 class FundingFormsetMixin(FormsetMixin):
223 context_name = 'funding_formset'
224 formset_class = FundingFormSet
225
226
227 class ProjectCreate(FundingFormsetMixin, MapEntityCreate):
228 model = Project
229 form_class = ProjectForm
230
231
232 class ProjectUpdate(FundingFormsetMixin, MapEntityUpdate):
233 queryset = Project.objects.existing()
234 form_class = ProjectForm
235
236 @same_structure_required('maintenance:project_detail')
237 def dispatch(self, *args, **kwargs):
238 return super().dispatch(*args, **kwargs)
239
240
241 class ProjectDelete(MapEntityDelete):
242 model = Project
243
244 @same_structure_required('maintenance:project_detail')
245 def dispatch(self, *args, **kwargs):
246 return super().dispatch(*args, **kwargs)
247
248
249 class ProjectViewSet(GeotrekMapentityViewSet):
250 model = Project
251 serializer_class = ProjectSerializer
252 geojson_serializer_class = ProjectGeojsonSerializer
253 filterset_class = ProjectFilterSet
254 mapentity_list_class = ProjectList
255
256 def get_queryset(self):
257 qs = self.model.objects.existing()
258 if self.format_kwarg == 'geojson':
259 non_empty_qs = Intervention.objects.existing().filter(project__isnull=False).values('project')
260 qs = qs.filter(pk__in=non_empty_qs)
261 qs = qs.only('id', 'name')
262 return qs
263
[end of geotrek/maintenance/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/maintenance/views.py b/geotrek/maintenance/views.py
--- a/geotrek/maintenance/views.py
+++ b/geotrek/maintenance/views.py
@@ -49,7 +49,7 @@
def get_queryset(self):
"""Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention"""
- queryset = Intervention.objects.existing()
+ queryset = super().get_queryset()
if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:
| {"golden_diff": "diff --git a/geotrek/maintenance/views.py b/geotrek/maintenance/views.py\n--- a/geotrek/maintenance/views.py\n+++ b/geotrek/maintenance/views.py\n@@ -49,7 +49,7 @@\n def get_queryset(self):\n \"\"\"Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention\"\"\"\n \n- queryset = Intervention.objects.existing()\n+ queryset = super().get_queryset()\n \n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n", "issue": "Bug : Export des interventions filtr\u00e9es dans la table n'est pas filtr\u00e9 dans le csv export\u00e9\n**Contexte :** \r\n\r\nBug dans le module intervention\r\n\r\n**R\u00e9sultat attendu :** \r\n\r\nLorsque je filtre la table qui liste les objets dans le module et que j'exporte le r\u00e9sultat au format CSV, le CSV ne doit contenir que les r\u00e9sultats filtr\u00e9s\r\n\r\n**R\u00e9sultat observ\u00e9 (bug) :** \r\n\r\nLa table CSV obtenue contient l'ensemble des interventions non filtr\u00e9es, sans tenir compte des \u00e9ventuels filtres choisis par l'utilisateur dans l'interface. \n", "before_files": [{"content": "import logging\nimport re\n\nfrom django.conf import settings\nfrom django.db.models import Subquery, OuterRef, Sum\nfrom django.db.models.expressions import Value\nfrom django.utils.translation import gettext_lazy as _\nfrom mapentity.views import (MapEntityList, MapEntityFormat, MapEntityDetail, MapEntityDocument,\n MapEntityCreate, MapEntityUpdate, MapEntityDelete)\n\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.authent.decorators import same_structure_required\nfrom geotrek.common.mixins.forms import FormsetMixin\nfrom geotrek.common.mixins.views import CustomColumnsMixin\nfrom geotrek.common.viewsets import GeotrekMapentityViewSet\nfrom .filters import InterventionFilterSet, ProjectFilterSet\nfrom .forms import (InterventionForm, ProjectForm,\n FundingFormSet, ManDayFormSet)\nfrom .models import Intervention, Project, ManDay\nfrom .serializers import (InterventionSerializer, ProjectSerializer,\n InterventionGeojsonSerializer, ProjectGeojsonSerializer)\n\nlogger = logging.getLogger(__name__)\n\n\nANNOTATION_FORBIDDEN_CHARS = re.compile(r\"['`\\\"\\]\\[;\\s]|--|/\\*|\\*/\")\nREPLACEMENT_CHAR = \"_\"\n\n\ndef _normalize_annotation_column_name(col_name):\n return ANNOTATION_FORBIDDEN_CHARS.sub(repl=REPLACEMENT_CHAR, string=col_name)\n\n\nclass InterventionList(CustomColumnsMixin, MapEntityList):\n queryset = Intervention.objects.existing()\n filterform = InterventionFilterSet\n mandatory_columns = ['id', 'name']\n default_extra_columns = ['date', 'type', 'target', 'status', 'stake']\n searchable_columns = ['id', 'name']\n unorderable_columns = ['target']\n\n\nclass InterventionFormatList(MapEntityFormat, InterventionList):\n\n @classmethod\n def build_cost_column_name(cls, job_name):\n return _normalize_annotation_column_name(f\"{_('Cost')} {job_name}\")\n\n def get_queryset(self):\n \"\"\"Returns all interventions joined with a new column for each job, to record the total cost of each job in each intervention\"\"\"\n\n queryset = Intervention.objects.existing()\n\n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n\n # Get all jobs that are used in interventions, as unique names, ids and costs\n all_mandays = ManDay.objects.all()\n jobs_used_in_interventions = list(\n set(all_mandays.values_list(\"job__job\", \"job_id\", \"job__cost\"))\n )\n\n # Iter over unique jobs\n for job_name, job_id, job_cost in jobs_used_in_interventions:\n\n # Create column name for current job cost\n column_name = self.build_cost_column_name(job_name)\n\n # Create subquery to retrieve total cost of mandays for a given intervention and a given job\n mandays_query = (\n ManDay.objects.filter(intervention=OuterRef(\"pk\"), job_id=job_id) # Extract all mandays for a given intervention and a given job\n .values(\"job_id\") # Group by job\n .annotate(total_days=Sum(\"nb_days\")) # Select number of days worked\n .values(\"total_days\") # Rename result as total_days\n )\n\n # Use total_days and job cost to calculate total cost for a given intervention and a given job\n job_cost_query = Subquery(mandays_query) * Value(job_cost)\n\n # Annotate queryset with this cost query\n params = {column_name: job_cost_query}\n queryset = queryset.annotate(**params)\n return queryset\n\n @classmethod\n def get_mandatory_columns(cls):\n mandatory_columns = ['id']\n if settings.ENABLE_JOBS_COSTS_DETAILED_EXPORT:\n all_mandays = ManDay.objects.all() # Used to find all jobs that ARE USED in interventions\n # Get all jobs that are used in interventions, as unique names\n jobs_as_names = list(\n set(all_mandays.values_list(\"job__job\", flat=True))\n )\n # Create column names for each unique job cost\n cost_column_names = list(map(cls.build_cost_column_name, jobs_as_names))\n # Add these column names to export\n mandatory_columns = mandatory_columns + cost_column_names\n return mandatory_columns\n\n default_extra_columns = [\n 'name', 'date', 'type', 'target', 'status', 'stake',\n 'disorders', 'total_manday', 'project', 'subcontracting',\n 'width', 'height', 'area', 'structure',\n 'description', 'date_insert', 'date_update',\n 'material_cost', 'heliport_cost', 'subcontract_cost',\n 'total_cost_mandays', 'total_cost',\n 'cities', 'districts', 'areas',\n ] + AltimetryMixin.COLUMNS\n\n\nclass InterventionDetail(MapEntityDetail):\n queryset = Intervention.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n return context\n\n\nclass InterventionDocument(MapEntityDocument):\n model = Intervention\n\n\nclass ManDayFormsetMixin(FormsetMixin):\n context_name = 'manday_formset'\n formset_class = ManDayFormSet\n\n\nclass InterventionCreate(ManDayFormsetMixin, MapEntityCreate):\n model = Intervention\n form_class = InterventionForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n if 'target_id' in self.request.GET and 'target_type' in self.request.GET:\n # Create intervention on an existing infrastructure\n kwargs['target_id'] = self.request.GET['target_id']\n kwargs['target_type'] = self.request.GET['target_type']\n return kwargs\n\n\nclass InterventionUpdate(ManDayFormsetMixin, MapEntityUpdate):\n queryset = Intervention.objects.existing()\n form_class = InterventionForm\n\n @same_structure_required('maintenance:intervention_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # If deletion is allowed\n if kwargs['can_delete']:\n intervention = self.get_object()\n # Disallow deletion if this intervention is part of Suricate Workflow at the moment\n not_workflow = not settings.SURICATE_WORKFLOW_ENABLED\n is_report = intervention.target and intervention.target.__class__.__name__ == \"Report\"\n report_is_closed = False\n if is_report:\n report_is_closed = (intervention.target.status.identifier == 'solved')\n kwargs[\"can_delete\"] = not_workflow or (not is_report) or report_is_closed\n return kwargs\n\n\nclass InterventionDelete(MapEntityDelete):\n model = Intervention\n\n @same_structure_required('maintenance:intervention_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass InterventionViewSet(GeotrekMapentityViewSet):\n model = Intervention\n serializer_class = InterventionSerializer\n geojson_serializer_class = InterventionGeojsonSerializer\n filterset_class = InterventionFilterSet\n mapentity_list_class = InterventionList\n\n def get_queryset(self):\n qs = self.model.objects.existing()\n if self.format_kwarg == 'geojson':\n qs = qs.only('id', 'name')\n else:\n qs = qs.select_related(\"stake\", \"status\", \"type\", \"target_type\").prefetch_related('target')\n return qs\n\n\nclass ProjectList(CustomColumnsMixin, MapEntityList):\n queryset = Project.objects.existing()\n filterform = ProjectFilterSet\n mandatory_columns = ['id', 'name']\n default_extra_columns = ['period', 'type', 'domain']\n searchable_columns = ['id', 'name']\n unorderable_columns = ['period', ]\n\n\nclass ProjectFormatList(MapEntityFormat, ProjectList):\n mandatory_columns = ['id']\n default_extra_columns = [\n 'structure', 'name', 'period', 'type', 'domain', 'constraint', 'global_cost',\n 'interventions', 'interventions_total_cost', 'comments', 'contractors',\n 'project_owner', 'project_manager', 'founders',\n 'date_insert', 'date_update',\n 'cities', 'districts', 'areas',\n ]\n\n\nclass ProjectDetail(MapEntityDetail):\n queryset = Project.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.get_object().same_structure(self.request.user)\n context['empty_map_message'] = _(\"No intervention related.\")\n return context\n\n\nclass ProjectDocument(MapEntityDocument):\n model = Project\n\n\nclass FundingFormsetMixin(FormsetMixin):\n context_name = 'funding_formset'\n formset_class = FundingFormSet\n\n\nclass ProjectCreate(FundingFormsetMixin, MapEntityCreate):\n model = Project\n form_class = ProjectForm\n\n\nclass ProjectUpdate(FundingFormsetMixin, MapEntityUpdate):\n queryset = Project.objects.existing()\n form_class = ProjectForm\n\n @same_structure_required('maintenance:project_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass ProjectDelete(MapEntityDelete):\n model = Project\n\n @same_structure_required('maintenance:project_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass ProjectViewSet(GeotrekMapentityViewSet):\n model = Project\n serializer_class = ProjectSerializer\n geojson_serializer_class = ProjectGeojsonSerializer\n filterset_class = ProjectFilterSet\n mapentity_list_class = ProjectList\n\n def get_queryset(self):\n qs = self.model.objects.existing()\n if self.format_kwarg == 'geojson':\n non_empty_qs = Intervention.objects.existing().filter(project__isnull=False).values('project')\n qs = qs.filter(pk__in=non_empty_qs)\n qs = qs.only('id', 'name')\n return qs\n", "path": "geotrek/maintenance/views.py"}]} | 3,564 | 118 |
gh_patches_debug_29067 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1008 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Chunked uploads not attempting retries
I forgot to implement the actual retry-portion for the jQuery file uploading. As such, jQuery file upload will only try to upload a given chunk once. See here:
https://github.com/blueimp/jQuery-File-Upload/wiki/Chunked-file-uploads
</issue>
<code>
[start of app/grandchallenge/jqfileupload/views.py]
1 import re
2 from datetime import timedelta
3
4 from django.utils.timezone import now
5 from rest_framework import mixins
6 from rest_framework.parsers import FormParser, MultiPartParser
7 from rest_framework.response import Response
8 from rest_framework.status import HTTP_400_BAD_REQUEST
9 from rest_framework.viewsets import GenericViewSet
10 from rest_framework_guardian.filters import ObjectPermissionsFilter
11
12 from grandchallenge.core.permissions.rest_framework import (
13 DjangoObjectOnlyPermissions,
14 )
15 from grandchallenge.jqfileupload.models import StagedFile
16 from grandchallenge.jqfileupload.serializers import StagedFileSerializer
17
18
19 class StagedFileViewSet(
20 mixins.CreateModelMixin,
21 mixins.RetrieveModelMixin,
22 mixins.ListModelMixin,
23 GenericViewSet,
24 ):
25 serializer_class = StagedFileSerializer
26 queryset = StagedFile.objects.all()
27 parser_classes = (FormParser, MultiPartParser)
28 permission_classes = (DjangoObjectOnlyPermissions,)
29 filter_backends = (ObjectPermissionsFilter,)
30
31 def create(self, request, *args, **kwargs):
32 if "HTTP_CONTENT_RANGE" in self.request.META:
33 if not self.range_header or not self.range_match:
34 return Response(
35 {"status": "Client did not supply valid Content-Range"},
36 status=HTTP_400_BAD_REQUEST,
37 )
38
39 return super().create(request, *args, **kwargs)
40
41 def get_serializer(self, *args, **kwargs):
42 data = [
43 self._handle_file(uploaded_file)
44 for uploaded_file in self.request.FILES.values()
45 ]
46
47 if data:
48 kwargs.update({"many": True, "data": data})
49
50 return super().get_serializer(*args, **kwargs)
51
52 @property
53 def user_pk_str(self):
54 return str(self.request.user.pk)
55
56 @property
57 def client_id(self):
58 return self.request.POST.get("X-Upload-ID")
59
60 @property
61 def range_header(self):
62 return self.request.META.get("HTTP_CONTENT_RANGE")
63
64 @property
65 def range_match(self):
66 return re.match(
67 r"bytes (?P<start>[0-9]{1,32})-(?P<end>[0-9]{1,32})/(?P<length>\*|[0-9]{1,32})",
68 self.range_header,
69 )
70
71 def _handle_file(self, uploaded_file):
72 if "HTTP_CONTENT_RANGE" in self.request.META:
73 start_byte = int(self.range_match.group("start"))
74 end_byte = int(self.range_match.group("end"))
75 if (self.range_match.group("length") is None) or (
76 self.range_match.group("length") == "*"
77 ):
78 total_size = None
79 else:
80 total_size = int(self.range_match.group("length"))
81 else:
82 start_byte = 0
83 end_byte = uploaded_file.size - 1
84 total_size = uploaded_file.size
85
86 return {
87 "client_id": self.client_id,
88 "end_byte": end_byte,
89 "file": uploaded_file,
90 "filename": uploaded_file.name,
91 "start_byte": start_byte if start_byte is not None else 0,
92 "timeout": now() + timedelta(hours=6),
93 "total_size": total_size,
94 "user_pk_str": self.user_pk_str,
95 }
96
[end of app/grandchallenge/jqfileupload/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/jqfileupload/views.py b/app/grandchallenge/jqfileupload/views.py
--- a/app/grandchallenge/jqfileupload/views.py
+++ b/app/grandchallenge/jqfileupload/views.py
@@ -3,6 +3,7 @@
from django.utils.timezone import now
from rest_framework import mixins
+from rest_framework.decorators import action
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
@@ -35,7 +36,6 @@
{"status": "Client did not supply valid Content-Range"},
status=HTTP_400_BAD_REQUEST,
)
-
return super().create(request, *args, **kwargs)
def get_serializer(self, *args, **kwargs):
@@ -93,3 +93,21 @@
"total_size": total_size,
"user_pk_str": self.user_pk_str,
}
+
+ def _find_last_end_byte(self, files):
+ last_end_byte = -1
+ for file in files:
+ if file["start_byte"] != last_end_byte + 1:
+ return last_end_byte
+ last_end_byte = file["end_byte"]
+ return last_end_byte
+
+ @action(detail=False, methods=["get"])
+ def get_current_file_size(self, request):
+ client_id = request.GET.get("file", None)
+ files = (
+ StagedFile.objects.filter(client_id=client_id)
+ .order_by("start_byte")
+ .values("start_byte", "end_byte")
+ )
+ return Response({"current_size": self._find_last_end_byte(files)})
| {"golden_diff": "diff --git a/app/grandchallenge/jqfileupload/views.py b/app/grandchallenge/jqfileupload/views.py\n--- a/app/grandchallenge/jqfileupload/views.py\n+++ b/app/grandchallenge/jqfileupload/views.py\n@@ -3,6 +3,7 @@\n \n from django.utils.timezone import now\n from rest_framework import mixins\n+from rest_framework.decorators import action\n from rest_framework.parsers import FormParser, MultiPartParser\n from rest_framework.response import Response\n from rest_framework.status import HTTP_400_BAD_REQUEST\n@@ -35,7 +36,6 @@\n {\"status\": \"Client did not supply valid Content-Range\"},\n status=HTTP_400_BAD_REQUEST,\n )\n-\n return super().create(request, *args, **kwargs)\n \n def get_serializer(self, *args, **kwargs):\n@@ -93,3 +93,21 @@\n \"total_size\": total_size,\n \"user_pk_str\": self.user_pk_str,\n }\n+\n+ def _find_last_end_byte(self, files):\n+ last_end_byte = -1\n+ for file in files:\n+ if file[\"start_byte\"] != last_end_byte + 1:\n+ return last_end_byte\n+ last_end_byte = file[\"end_byte\"]\n+ return last_end_byte\n+\n+ @action(detail=False, methods=[\"get\"])\n+ def get_current_file_size(self, request):\n+ client_id = request.GET.get(\"file\", None)\n+ files = (\n+ StagedFile.objects.filter(client_id=client_id)\n+ .order_by(\"start_byte\")\n+ .values(\"start_byte\", \"end_byte\")\n+ )\n+ return Response({\"current_size\": self._find_last_end_byte(files)})\n", "issue": "Chunked uploads not attempting retries\nI forgot to implement the actual retry-portion for the jQuery file uploading. As such, jQuery file upload will only try to upload a given chunk once. See here:\r\n\r\nhttps://github.com/blueimp/jQuery-File-Upload/wiki/Chunked-file-uploads\n", "before_files": [{"content": "import re\nfrom datetime import timedelta\n\nfrom django.utils.timezone import now\nfrom rest_framework import mixins\nfrom rest_framework.parsers import FormParser, MultiPartParser\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_400_BAD_REQUEST\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.core.permissions.rest_framework import (\n DjangoObjectOnlyPermissions,\n)\nfrom grandchallenge.jqfileupload.models import StagedFile\nfrom grandchallenge.jqfileupload.serializers import StagedFileSerializer\n\n\nclass StagedFileViewSet(\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n GenericViewSet,\n):\n serializer_class = StagedFileSerializer\n queryset = StagedFile.objects.all()\n parser_classes = (FormParser, MultiPartParser)\n permission_classes = (DjangoObjectOnlyPermissions,)\n filter_backends = (ObjectPermissionsFilter,)\n\n def create(self, request, *args, **kwargs):\n if \"HTTP_CONTENT_RANGE\" in self.request.META:\n if not self.range_header or not self.range_match:\n return Response(\n {\"status\": \"Client did not supply valid Content-Range\"},\n status=HTTP_400_BAD_REQUEST,\n )\n\n return super().create(request, *args, **kwargs)\n\n def get_serializer(self, *args, **kwargs):\n data = [\n self._handle_file(uploaded_file)\n for uploaded_file in self.request.FILES.values()\n ]\n\n if data:\n kwargs.update({\"many\": True, \"data\": data})\n\n return super().get_serializer(*args, **kwargs)\n\n @property\n def user_pk_str(self):\n return str(self.request.user.pk)\n\n @property\n def client_id(self):\n return self.request.POST.get(\"X-Upload-ID\")\n\n @property\n def range_header(self):\n return self.request.META.get(\"HTTP_CONTENT_RANGE\")\n\n @property\n def range_match(self):\n return re.match(\n r\"bytes (?P<start>[0-9]{1,32})-(?P<end>[0-9]{1,32})/(?P<length>\\*|[0-9]{1,32})\",\n self.range_header,\n )\n\n def _handle_file(self, uploaded_file):\n if \"HTTP_CONTENT_RANGE\" in self.request.META:\n start_byte = int(self.range_match.group(\"start\"))\n end_byte = int(self.range_match.group(\"end\"))\n if (self.range_match.group(\"length\") is None) or (\n self.range_match.group(\"length\") == \"*\"\n ):\n total_size = None\n else:\n total_size = int(self.range_match.group(\"length\"))\n else:\n start_byte = 0\n end_byte = uploaded_file.size - 1\n total_size = uploaded_file.size\n\n return {\n \"client_id\": self.client_id,\n \"end_byte\": end_byte,\n \"file\": uploaded_file,\n \"filename\": uploaded_file.name,\n \"start_byte\": start_byte if start_byte is not None else 0,\n \"timeout\": now() + timedelta(hours=6),\n \"total_size\": total_size,\n \"user_pk_str\": self.user_pk_str,\n }\n", "path": "app/grandchallenge/jqfileupload/views.py"}]} | 1,494 | 380 |
gh_patches_debug_8114 | rasdani/github-patches | git_diff | conan-io__conan-center-index-5415 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] all: "Access is denied" in os.rename() on Windows
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **almost all packages affected**
* Operating System+version: **Windows 10**
* Compiler+version: **MSVC 16**
* Conan version: **conan 1.35.2**
* Python version: **Python 3.8.7**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os_build=Windows
os=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
compiler.runtime=MD
build_type=Release
```
### Steps to reproduce (Include if Applicable)
This is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774
However most recipes still use `os.rename()` and not `tools.rename()`.
### Log
```
b2/4.2.0: Configuring sources in C:\Users\xxx\.conan\data\b2\4.2.0\_\_\source
ERROR: b2/4.2.0: Error in source() method, line 58
os.rename(extracted_dir, "source")
PermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'
```
</issue>
<code>
[start of recipes/sqlite3/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import textwrap
5
6 required_conan_version = ">=1.33.0"
7
8
9 class ConanSqlite3(ConanFile):
10 name = "sqlite3"
11 description = "Self-contained, serverless, in-process SQL database engine."
12 url = "https://github.com/conan-io/conan-center-index"
13 homepage = "https://www.sqlite.org"
14 topics = ("conan", "sqlite", "database", "sql", "serverless")
15 license = "Unlicense"
16 generators = "cmake"
17 settings = "os", "compiler", "arch", "build_type"
18 exports_sources = ["CMakeLists.txt"]
19 options = {
20 "shared": [True, False],
21 "fPIC": [True, False],
22 "threadsafe": [0, 1, 2],
23 "enable_column_metadata": [True, False],
24 "enable_dbstat_vtab": [True, False],
25 "enable_explain_comments": [True, False],
26 "enable_fts3": [True, False],
27 "enable_fts3_parenthesis": [True, False],
28 "enable_fts4": [True, False],
29 "enable_fts5": [True, False],
30 "enable_json1": [True, False],
31 "enable_soundex": [True, False],
32 "enable_preupdate_hook": [True, False],
33 "enable_rtree": [True, False],
34 "use_alloca": [True, False],
35 "omit_load_extension": [True, False],
36 "enable_math_functions": [True, False],
37 "enable_unlock_notify": [True, False],
38 "enable_default_secure_delete": [True, False],
39 "disable_gethostuuid": [True, False],
40 "max_blob_size": "ANY",
41 "build_executable": [True, False],
42 "enable_default_vfs": [True, False],
43 }
44 default_options = {
45 "shared": False,
46 "fPIC": True,
47 "threadsafe": 1,
48 "enable_column_metadata": True,
49 "enable_dbstat_vtab": False,
50 "enable_explain_comments": False,
51 "enable_fts3": False,
52 "enable_fts3_parenthesis": False,
53 "enable_fts4": False,
54 "enable_fts5": False,
55 "enable_json1": False,
56 "enable_soundex": False,
57 "enable_preupdate_hook": False,
58 "enable_rtree": True,
59 "use_alloca": False,
60 "omit_load_extension": False,
61 "enable_math_functions": True,
62 "enable_unlock_notify": True,
63 "enable_default_secure_delete": False,
64 "disable_gethostuuid": False,
65 "max_blob_size": 1000000000,
66 "build_executable": True,
67 "enable_default_vfs": True,
68 }
69
70 _cmake = None
71
72 @property
73 def _source_subfolder(self):
74 return "source_subfolder"
75
76 @property
77 def _has_enable_math_function_option(self):
78 return tools.Version(self.version) >= "3.35.0"
79
80 def config_options(self):
81 if self.settings.os == "Windows":
82 del self.options.fPIC
83 if not self._has_enable_math_function_option:
84 del self.options.enable_math_functions
85
86 def configure(self):
87 if self.options.shared:
88 del self.options.fPIC
89 del self.settings.compiler.libcxx
90 del self.settings.compiler.cppstd
91
92 def validate(self):
93 if not self.options.enable_default_vfs and self.options.build_executable:
94 # Need to provide custom VFS code: https://www.sqlite.org/custombuild.html
95 raise ConanInvalidConfiguration("build_executable=True cannot be combined with enable_default_vfs=False")
96
97 def source(self):
98 tools.get(**self.conan_data["sources"][self.version])
99 url = self.conan_data["sources"][self.version]["url"]
100 archive_name = os.path.basename(url)
101 archive_name = os.path.splitext(archive_name)[0]
102 os.rename(archive_name, self._source_subfolder)
103
104 def _configure_cmake(self):
105 if self._cmake:
106 return self._cmake
107 self._cmake = CMake(self)
108 self._cmake.definitions["SQLITE3_VERSION"] = self.version
109 self._cmake.definitions["SQLITE3_BUILD_EXECUTABLE"] = self.options.build_executable
110 self._cmake.definitions["THREADSAFE"] = self.options.threadsafe
111 self._cmake.definitions["ENABLE_COLUMN_METADATA"] = self.options.enable_column_metadata
112 self._cmake.definitions["ENABLE_DBSTAT_VTAB"] = self.options.enable_dbstat_vtab
113 self._cmake.definitions["ENABLE_EXPLAIN_COMMENTS"] = self.options.enable_explain_comments
114 self._cmake.definitions["ENABLE_FTS3"] = self.options.enable_fts3
115 self._cmake.definitions["ENABLE_FTS3_PARENTHESIS"] = self.options.enable_fts3_parenthesis
116 self._cmake.definitions["ENABLE_FTS4"] = self.options.enable_fts4
117 self._cmake.definitions["ENABLE_FTS5"] = self.options.enable_fts5
118 self._cmake.definitions["ENABLE_JSON1"] = self.options.enable_json1
119 self._cmake.definitions["ENABLE_PREUPDATE_HOOK"] = self.options.enable_preupdate_hook
120 self._cmake.definitions["ENABLE_SOUNDEX"] = self.options.enable_soundex
121 self._cmake.definitions["ENABLE_RTREE"] = self.options.enable_rtree
122 self._cmake.definitions["ENABLE_UNLOCK_NOTIFY"] = self.options.enable_unlock_notify
123 self._cmake.definitions["ENABLE_DEFAULT_SECURE_DELETE"] = self.options.enable_default_secure_delete
124 self._cmake.definitions["USE_ALLOCA"] = self.options.use_alloca
125 self._cmake.definitions["OMIT_LOAD_EXTENSION"] = self.options.omit_load_extension
126 if self._has_enable_math_function_option:
127 self._cmake.definitions["ENABLE_MATH_FUNCTIONS"] = self.options.enable_math_functions
128 self._cmake.definitions["HAVE_FDATASYNC"] = True
129 self._cmake.definitions["HAVE_GMTIME_R"] = True
130 self._cmake.definitions["HAVE_LOCALTIME_R"] = self.settings.os != "Windows"
131 self._cmake.definitions["HAVE_POSIX_FALLOCATE"] = not (self.settings.os in ["Windows", "Android"] or tools.is_apple_os(self.settings.os))
132 self._cmake.definitions["HAVE_STRERROR_R"] = True
133 self._cmake.definitions["HAVE_USLEEP"] = True
134 self._cmake.definitions["DISABLE_GETHOSTUUID"] = self.options.disable_gethostuuid
135 self._cmake.definitions["MAX_BLOB_SIZE"] = self.options.max_blob_size
136 self._cmake.definitions["DISABLE_DEFAULT_VFS"] = not self.options.enable_default_vfs
137 self._cmake.configure()
138 return self._cmake
139
140 def build(self):
141 cmake = self._configure_cmake()
142 cmake.build()
143
144 def package(self):
145 header = tools.load(os.path.join(self._source_subfolder, "sqlite3.h"))
146 license_content = header[3:header.find("***", 1)]
147 tools.save(os.path.join(self.package_folder, "licenses", "LICENSE"), license_content)
148 cmake = self._configure_cmake()
149 cmake.install()
150 self._create_cmake_module_variables(
151 os.path.join(self.package_folder, self._module_file_rel_path)
152 )
153
154 @staticmethod
155 def _create_cmake_module_variables(module_file):
156 content = textwrap.dedent("""\
157 if(DEFINED SQLite_INCLUDE_DIRS)
158 set(SQLite3_INCLUDE_DIRS ${SQLite_INCLUDE_DIRS})
159 endif()
160 if(DEFINED SQLite_LIBRARIES)
161 set(SQLite3_LIBRARIES ${SQLite_LIBRARIES})
162 endif()
163 """)
164 tools.save(module_file, content)
165
166 @property
167 def _module_subfolder(self):
168 return os.path.join("lib", "cmake")
169
170 @property
171 def _module_file_rel_path(self):
172 return os.path.join(self._module_subfolder,
173 "conan-official-{}-variables.cmake".format(self.name))
174
175 def package_info(self):
176 self.cpp_info.filenames["cmake_find_package"] = "SQLite3"
177 self.cpp_info.filenames["cmake_find_package_multi"] = "SQLite3"
178 self.cpp_info.names["cmake_find_package"] = "SQLite"
179 self.cpp_info.names["cmake_find_package_multi"] = "SQLite"
180 self.cpp_info.components["sqlite"].names["cmake_find_package"] = "SQLite3"
181 self.cpp_info.components["sqlite"].names["cmake_find_package_multi"] = "SQLite3"
182 self.cpp_info.components["sqlite"].builddirs.append(self._module_subfolder)
183 self.cpp_info.components["sqlite"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
184 self.cpp_info.components["sqlite"].libs = tools.collect_libs(self)
185 if self.settings.os in ["Linux", "FreeBSD"]:
186 if self.options.threadsafe:
187 self.cpp_info.components["sqlite"].system_libs.append("pthread")
188 if not self.options.omit_load_extension:
189 self.cpp_info.components["sqlite"].system_libs.append("dl")
190 if self.options.enable_fts5 or self.options.get_safe("enable_math_functions"):
191 self.cpp_info.components["sqlite"].system_libs.append("m")
192
193 if self.options.build_executable:
194 bin_path = os.path.join(self.package_folder, "bin")
195 self.output.info("Appending PATH env var with : {}".format(bin_path))
196 self.env_info.PATH.append(bin_path)
197
[end of recipes/sqlite3/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/sqlite3/all/conanfile.py b/recipes/sqlite3/all/conanfile.py
--- a/recipes/sqlite3/all/conanfile.py
+++ b/recipes/sqlite3/all/conanfile.py
@@ -87,11 +87,7 @@
del self.settings.compiler.cppstd
def source(self):
- tools.get(**self.conan_data["sources"][self.version])
- url = self.conan_data["sources"][self.version]["url"]
- archive_name = os.path.basename(url)
- archive_name = os.path.splitext(archive_name)[0]
- os.rename(archive_name, self._source_subfolder)
+ tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
| {"golden_diff": "diff --git a/recipes/sqlite3/all/conanfile.py b/recipes/sqlite3/all/conanfile.py\n--- a/recipes/sqlite3/all/conanfile.py\n+++ b/recipes/sqlite3/all/conanfile.py\n@@ -87,11 +87,7 @@\n del self.settings.compiler.cppstd\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version])\n- url = self.conan_data[\"sources\"][self.version][\"url\"]\n- archive_name = os.path.basename(url)\n- archive_name = os.path.splitext(archive_name)[0]\n- os.rename(archive_name, self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n \n def _configure_cmake(self):\n if self._cmake:\n", "issue": "[package] all: \"Access is denied\" in os.rename() on Windows\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **almost all packages affected**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **MSVC 16**\r\n * Conan version: **conan 1.35.2**\r\n * Python version: **Python 3.8.7**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos_build=Windows\r\nos=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\ncompiler.runtime=MD\r\nbuild_type=Release\r\n```\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nThis is a known issue. Solution provided by https://github.com/conan-io/conan/pull/6774\r\nHowever most recipes still use `os.rename()` and not `tools.rename()`. \r\n\r\n### Log\r\n```\r\nb2/4.2.0: Configuring sources in C:\\Users\\xxx\\.conan\\data\\b2\\4.2.0\\_\\_\\source\r\nERROR: b2/4.2.0: Error in source() method, line 58\r\nos.rename(extracted_dir, \"source\")\r\nPermissionError: [WinError 5] Access is denied: 'build-4.2.0' -> 'source'\r\n```\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass ConanSqlite3(ConanFile):\n name = \"sqlite3\"\n description = \"Self-contained, serverless, in-process SQL database engine.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.sqlite.org\"\n topics = (\"conan\", \"sqlite\", \"database\", \"sql\", \"serverless\")\n license = \"Unlicense\"\n generators = \"cmake\"\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n exports_sources = [\"CMakeLists.txt\"]\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"threadsafe\": [0, 1, 2],\n \"enable_column_metadata\": [True, False],\n \"enable_dbstat_vtab\": [True, False],\n \"enable_explain_comments\": [True, False],\n \"enable_fts3\": [True, False],\n \"enable_fts3_parenthesis\": [True, False],\n \"enable_fts4\": [True, False],\n \"enable_fts5\": [True, False],\n \"enable_json1\": [True, False],\n \"enable_soundex\": [True, False],\n \"enable_preupdate_hook\": [True, False],\n \"enable_rtree\": [True, False],\n \"use_alloca\": [True, False],\n \"omit_load_extension\": [True, False],\n \"enable_math_functions\": [True, False],\n \"enable_unlock_notify\": [True, False],\n \"enable_default_secure_delete\": [True, False],\n \"disable_gethostuuid\": [True, False],\n \"max_blob_size\": \"ANY\",\n \"build_executable\": [True, False],\n \"enable_default_vfs\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"threadsafe\": 1,\n \"enable_column_metadata\": True,\n \"enable_dbstat_vtab\": False,\n \"enable_explain_comments\": False,\n \"enable_fts3\": False,\n \"enable_fts3_parenthesis\": False,\n \"enable_fts4\": False,\n \"enable_fts5\": False,\n \"enable_json1\": False,\n \"enable_soundex\": False,\n \"enable_preupdate_hook\": False,\n \"enable_rtree\": True,\n \"use_alloca\": False,\n \"omit_load_extension\": False,\n \"enable_math_functions\": True,\n \"enable_unlock_notify\": True,\n \"enable_default_secure_delete\": False,\n \"disable_gethostuuid\": False,\n \"max_blob_size\": 1000000000,\n \"build_executable\": True,\n \"enable_default_vfs\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _has_enable_math_function_option(self):\n return tools.Version(self.version) >= \"3.35.0\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n if not self._has_enable_math_function_option:\n del self.options.enable_math_functions\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def validate(self):\n if not self.options.enable_default_vfs and self.options.build_executable:\n # Need to provide custom VFS code: https://www.sqlite.org/custombuild.html\n raise ConanInvalidConfiguration(\"build_executable=True cannot be combined with enable_default_vfs=False\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n url = self.conan_data[\"sources\"][self.version][\"url\"]\n archive_name = os.path.basename(url)\n archive_name = os.path.splitext(archive_name)[0]\n os.rename(archive_name, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"SQLITE3_VERSION\"] = self.version\n self._cmake.definitions[\"SQLITE3_BUILD_EXECUTABLE\"] = self.options.build_executable\n self._cmake.definitions[\"THREADSAFE\"] = self.options.threadsafe\n self._cmake.definitions[\"ENABLE_COLUMN_METADATA\"] = self.options.enable_column_metadata\n self._cmake.definitions[\"ENABLE_DBSTAT_VTAB\"] = self.options.enable_dbstat_vtab\n self._cmake.definitions[\"ENABLE_EXPLAIN_COMMENTS\"] = self.options.enable_explain_comments\n self._cmake.definitions[\"ENABLE_FTS3\"] = self.options.enable_fts3\n self._cmake.definitions[\"ENABLE_FTS3_PARENTHESIS\"] = self.options.enable_fts3_parenthesis\n self._cmake.definitions[\"ENABLE_FTS4\"] = self.options.enable_fts4\n self._cmake.definitions[\"ENABLE_FTS5\"] = self.options.enable_fts5\n self._cmake.definitions[\"ENABLE_JSON1\"] = self.options.enable_json1\n self._cmake.definitions[\"ENABLE_PREUPDATE_HOOK\"] = self.options.enable_preupdate_hook\n self._cmake.definitions[\"ENABLE_SOUNDEX\"] = self.options.enable_soundex\n self._cmake.definitions[\"ENABLE_RTREE\"] = self.options.enable_rtree\n self._cmake.definitions[\"ENABLE_UNLOCK_NOTIFY\"] = self.options.enable_unlock_notify\n self._cmake.definitions[\"ENABLE_DEFAULT_SECURE_DELETE\"] = self.options.enable_default_secure_delete\n self._cmake.definitions[\"USE_ALLOCA\"] = self.options.use_alloca\n self._cmake.definitions[\"OMIT_LOAD_EXTENSION\"] = self.options.omit_load_extension\n if self._has_enable_math_function_option:\n self._cmake.definitions[\"ENABLE_MATH_FUNCTIONS\"] = self.options.enable_math_functions\n self._cmake.definitions[\"HAVE_FDATASYNC\"] = True\n self._cmake.definitions[\"HAVE_GMTIME_R\"] = True\n self._cmake.definitions[\"HAVE_LOCALTIME_R\"] = self.settings.os != \"Windows\"\n self._cmake.definitions[\"HAVE_POSIX_FALLOCATE\"] = not (self.settings.os in [\"Windows\", \"Android\"] or tools.is_apple_os(self.settings.os))\n self._cmake.definitions[\"HAVE_STRERROR_R\"] = True\n self._cmake.definitions[\"HAVE_USLEEP\"] = True\n self._cmake.definitions[\"DISABLE_GETHOSTUUID\"] = self.options.disable_gethostuuid\n self._cmake.definitions[\"MAX_BLOB_SIZE\"] = self.options.max_blob_size\n self._cmake.definitions[\"DISABLE_DEFAULT_VFS\"] = not self.options.enable_default_vfs\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n header = tools.load(os.path.join(self._source_subfolder, \"sqlite3.h\"))\n license_content = header[3:header.find(\"***\", 1)]\n tools.save(os.path.join(self.package_folder, \"licenses\", \"LICENSE\"), license_content)\n cmake = self._configure_cmake()\n cmake.install()\n self._create_cmake_module_variables(\n os.path.join(self.package_folder, self._module_file_rel_path)\n )\n\n @staticmethod\n def _create_cmake_module_variables(module_file):\n content = textwrap.dedent(\"\"\"\\\n if(DEFINED SQLite_INCLUDE_DIRS)\n set(SQLite3_INCLUDE_DIRS ${SQLite_INCLUDE_DIRS})\n endif()\n if(DEFINED SQLite_LIBRARIES)\n set(SQLite3_LIBRARIES ${SQLite_LIBRARIES})\n endif()\n \"\"\")\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(self._module_subfolder,\n \"conan-official-{}-variables.cmake\".format(self.name))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"SQLite3\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"SQLite3\"\n self.cpp_info.names[\"cmake_find_package\"] = \"SQLite\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SQLite\"\n self.cpp_info.components[\"sqlite\"].names[\"cmake_find_package\"] = \"SQLite3\"\n self.cpp_info.components[\"sqlite\"].names[\"cmake_find_package_multi\"] = \"SQLite3\"\n self.cpp_info.components[\"sqlite\"].builddirs.append(self._module_subfolder)\n self.cpp_info.components[\"sqlite\"].build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.components[\"sqlite\"].libs = tools.collect_libs(self)\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n if self.options.threadsafe:\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"pthread\")\n if not self.options.omit_load_extension:\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"dl\")\n if self.options.enable_fts5 or self.options.get_safe(\"enable_math_functions\"):\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"m\")\n\n if self.options.build_executable:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/sqlite3/all/conanfile.py"}]} | 3,499 | 189 |
gh_patches_debug_38769 | rasdani/github-patches | git_diff | ansible-collections__community.aws-872 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add partition strategy to placement groups module
### Summary
Add partition as a strategy for the community.aws.ec2_placement_group module.
Also add an option to choose the actual number of partitions (min 2 which is the default and a max of 7). This option would be taken into account when the strategy is set to partition.
### Issue Type
Feature Idea
### Component Name
ec2_placement_group
### Additional Information
Possible module definition
```yaml (paste below)
- name: Create a Spread placement group.
community.aws.ec2_placement_group:
name: my-cluster
state: present
strategy: partition
partition_number: 4
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/ec2_placement_group.py]
1 #!/usr/bin/python
2 # Copyright (c) 2017 Ansible Project
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4
5 from __future__ import absolute_import, division, print_function
6 __metaclass__ = type
7
8
9 DOCUMENTATION = '''
10 ---
11 module: ec2_placement_group
12 version_added: 1.0.0
13 short_description: Create or delete an EC2 Placement Group
14 description:
15 - Create an EC2 Placement Group; if the placement group already exists,
16 nothing is done. Or, delete an existing placement group. If the placement
17 group is absent, do nothing. See also
18 U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
19 author: "Brad Macpherson (@iiibrad)"
20 options:
21 name:
22 description:
23 - The name for the placement group.
24 required: true
25 type: str
26 state:
27 description:
28 - Create or delete placement group.
29 default: present
30 choices: [ 'present', 'absent' ]
31 type: str
32 strategy:
33 description:
34 - Placement group strategy. Cluster will cluster instances into a
35 low-latency group in a single Availability Zone, while Spread spreads
36 instances across underlying hardware.
37 default: cluster
38 choices: [ 'cluster', 'spread' ]
39 type: str
40 extends_documentation_fragment:
41 - amazon.aws.aws
42 - amazon.aws.ec2
43
44 '''
45
46 EXAMPLES = '''
47 # Note: These examples do not set authentication details, see the AWS Guide
48 # for details.
49
50 - name: Create a placement group.
51 community.aws.ec2_placement_group:
52 name: my-cluster
53 state: present
54
55 - name: Create a Spread placement group.
56 community.aws.ec2_placement_group:
57 name: my-cluster
58 state: present
59 strategy: spread
60
61 - name: Delete a placement group.
62 community.aws.ec2_placement_group:
63 name: my-cluster
64 state: absent
65
66 '''
67
68
69 RETURN = '''
70 placement_group:
71 description: Placement group attributes
72 returned: when state != absent
73 type: complex
74 contains:
75 name:
76 description: PG name
77 type: str
78 sample: my-cluster
79 state:
80 description: PG state
81 type: str
82 sample: "available"
83 strategy:
84 description: PG strategy
85 type: str
86 sample: "cluster"
87
88 '''
89
90 try:
91 import botocore
92 except ImportError:
93 pass # caught by AnsibleAWSModule
94
95 from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
96 from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
97 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
98
99
100 @AWSRetry.exponential_backoff()
101 def get_placement_group_details(connection, module):
102 name = module.params.get("name")
103 try:
104 response = connection.describe_placement_groups(
105 Filters=[{
106 "Name": "group-name",
107 "Values": [name]
108 }])
109 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
110 module.fail_json_aws(
111 e,
112 msg="Couldn't find placement group named [%s]" % name)
113
114 if len(response['PlacementGroups']) != 1:
115 return None
116 else:
117 placement_group = response['PlacementGroups'][0]
118 return {
119 "name": placement_group['GroupName'],
120 "state": placement_group['State'],
121 "strategy": placement_group['Strategy'],
122 }
123
124
125 @AWSRetry.exponential_backoff()
126 def create_placement_group(connection, module):
127 name = module.params.get("name")
128 strategy = module.params.get("strategy")
129
130 try:
131 connection.create_placement_group(
132 GroupName=name, Strategy=strategy, DryRun=module.check_mode)
133 except is_boto3_error_code('DryRunOperation'):
134 module.exit_json(changed=True, placement_group={
135 "name": name,
136 "state": 'DryRun',
137 "strategy": strategy,
138 })
139 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
140 module.fail_json_aws(
141 e,
142 msg="Couldn't create placement group [%s]" % name)
143
144 module.exit_json(changed=True,
145 placement_group=get_placement_group_details(
146 connection, module
147 ))
148
149
150 @AWSRetry.exponential_backoff()
151 def delete_placement_group(connection, module):
152 name = module.params.get("name")
153
154 try:
155 connection.delete_placement_group(
156 GroupName=name, DryRun=module.check_mode)
157 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
158 module.fail_json_aws(
159 e,
160 msg="Couldn't delete placement group [%s]" % name)
161
162 module.exit_json(changed=True)
163
164
165 def main():
166 argument_spec = dict(
167 name=dict(required=True, type='str'),
168 state=dict(default='present', choices=['present', 'absent']),
169 strategy=dict(default='cluster', choices=['cluster', 'spread'])
170 )
171
172 module = AnsibleAWSModule(
173 argument_spec=argument_spec,
174 supports_check_mode=True
175 )
176
177 connection = module.client('ec2')
178
179 state = module.params.get("state")
180
181 if state == 'present':
182 placement_group = get_placement_group_details(connection, module)
183 if placement_group is None:
184 create_placement_group(connection, module)
185 else:
186 strategy = module.params.get("strategy")
187 if placement_group['strategy'] == strategy:
188 module.exit_json(
189 changed=False, placement_group=placement_group)
190 else:
191 name = module.params.get("name")
192 module.fail_json(
193 msg=("Placement group '{}' exists, can't change strategy" +
194 " from '{}' to '{}'").format(
195 name,
196 placement_group['strategy'],
197 strategy))
198
199 elif state == 'absent':
200 placement_group = get_placement_group_details(connection, module)
201 if placement_group is None:
202 module.exit_json(changed=False)
203 else:
204 delete_placement_group(connection, module)
205
206
207 if __name__ == '__main__':
208 main()
209
[end of plugins/modules/ec2_placement_group.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/ec2_placement_group.py b/plugins/modules/ec2_placement_group.py
--- a/plugins/modules/ec2_placement_group.py
+++ b/plugins/modules/ec2_placement_group.py
@@ -23,6 +23,13 @@
- The name for the placement group.
required: true
type: str
+ partition_count:
+ description:
+ - The number of partitions.
+ - Valid only when I(Strategy) is set to C(partition).
+ - Must be a value between C(1) and C(7).
+ type: int
+ version_added: 3.1.0
state:
description:
- Create or delete placement group.
@@ -35,7 +42,7 @@
low-latency group in a single Availability Zone, while Spread spreads
instances across underlying hardware.
default: cluster
- choices: [ 'cluster', 'spread' ]
+ choices: [ 'cluster', 'spread', 'partition' ]
type: str
extends_documentation_fragment:
- amazon.aws.aws
@@ -58,6 +65,13 @@
state: present
strategy: spread
+- name: Create a Partition strategy placement group.
+ community.aws.ec2_placement_group:
+ name: my-cluster
+ state: present
+ strategy: partition
+ partition_count: 3
+
- name: Delete a placement group.
community.aws.ec2_placement_group:
name: my-cluster
@@ -126,10 +140,21 @@
def create_placement_group(connection, module):
name = module.params.get("name")
strategy = module.params.get("strategy")
+ partition_count = module.params.get("partition_count")
+
+ if strategy != 'partition' and partition_count:
+ module.fail_json(
+ msg="'partition_count' can only be set when strategy is set to 'partition'.")
+
+ params = {}
+ params['GroupName'] = name
+ params['Strategy'] = strategy
+ if partition_count:
+ params['PartitionCount'] = partition_count
+ params['DryRun'] = module.check_mode
try:
- connection.create_placement_group(
- GroupName=name, Strategy=strategy, DryRun=module.check_mode)
+ connection.create_placement_group(**params)
except is_boto3_error_code('DryRunOperation'):
module.exit_json(changed=True, placement_group={
"name": name,
@@ -165,8 +190,9 @@
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
+ partition_count=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
- strategy=dict(default='cluster', choices=['cluster', 'spread'])
+ strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition'])
)
module = AnsibleAWSModule(
| {"golden_diff": "diff --git a/plugins/modules/ec2_placement_group.py b/plugins/modules/ec2_placement_group.py\n--- a/plugins/modules/ec2_placement_group.py\n+++ b/plugins/modules/ec2_placement_group.py\n@@ -23,6 +23,13 @@\n - The name for the placement group.\n required: true\n type: str\n+ partition_count:\n+ description:\n+ - The number of partitions.\n+ - Valid only when I(Strategy) is set to C(partition).\n+ - Must be a value between C(1) and C(7).\n+ type: int\n+ version_added: 3.1.0\n state:\n description:\n - Create or delete placement group.\n@@ -35,7 +42,7 @@\n low-latency group in a single Availability Zone, while Spread spreads\n instances across underlying hardware.\n default: cluster\n- choices: [ 'cluster', 'spread' ]\n+ choices: [ 'cluster', 'spread', 'partition' ]\n type: str\n extends_documentation_fragment:\n - amazon.aws.aws\n@@ -58,6 +65,13 @@\n state: present\n strategy: spread\n \n+- name: Create a Partition strategy placement group.\n+ community.aws.ec2_placement_group:\n+ name: my-cluster\n+ state: present\n+ strategy: partition\n+ partition_count: 3\n+\n - name: Delete a placement group.\n community.aws.ec2_placement_group:\n name: my-cluster\n@@ -126,10 +140,21 @@\n def create_placement_group(connection, module):\n name = module.params.get(\"name\")\n strategy = module.params.get(\"strategy\")\n+ partition_count = module.params.get(\"partition_count\")\n+\n+ if strategy != 'partition' and partition_count:\n+ module.fail_json(\n+ msg=\"'partition_count' can only be set when strategy is set to 'partition'.\")\n+\n+ params = {}\n+ params['GroupName'] = name\n+ params['Strategy'] = strategy\n+ if partition_count:\n+ params['PartitionCount'] = partition_count\n+ params['DryRun'] = module.check_mode\n \n try:\n- connection.create_placement_group(\n- GroupName=name, Strategy=strategy, DryRun=module.check_mode)\n+ connection.create_placement_group(**params)\n except is_boto3_error_code('DryRunOperation'):\n module.exit_json(changed=True, placement_group={\n \"name\": name,\n@@ -165,8 +190,9 @@\n def main():\n argument_spec = dict(\n name=dict(required=True, type='str'),\n+ partition_count=dict(type='int'),\n state=dict(default='present', choices=['present', 'absent']),\n- strategy=dict(default='cluster', choices=['cluster', 'spread'])\n+ strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition'])\n )\n \n module = AnsibleAWSModule(\n", "issue": "Add partition strategy to placement groups module\n### Summary\n\nAdd partition as a strategy for the community.aws.ec2_placement_group module.\r\n\r\nAlso add an option to choose the actual number of partitions (min 2 which is the default and a max of 7). This option would be taken into account when the strategy is set to partition.\n\n### Issue Type\n\nFeature Idea\n\n### Component Name\n\nec2_placement_group\n\n### Additional Information\n\nPossible module definition\r\n```yaml (paste below)\r\n- name: Create a Spread placement group.\r\n community.aws.ec2_placement_group:\r\n name: my-cluster\r\n state: present\r\n strategy: partition\r\n partition_number: 4\r\n```\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# Copyright (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_placement_group\nversion_added: 1.0.0\nshort_description: Create or delete an EC2 Placement Group\ndescription:\n - Create an EC2 Placement Group; if the placement group already exists,\n nothing is done. Or, delete an existing placement group. If the placement\n group is absent, do nothing. See also\n U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)\nauthor: \"Brad Macpherson (@iiibrad)\"\noptions:\n name:\n description:\n - The name for the placement group.\n required: true\n type: str\n state:\n description:\n - Create or delete placement group.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\n strategy:\n description:\n - Placement group strategy. Cluster will cluster instances into a\n low-latency group in a single Availability Zone, while Spread spreads\n instances across underlying hardware.\n default: cluster\n choices: [ 'cluster', 'spread' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide\n# for details.\n\n- name: Create a placement group.\n community.aws.ec2_placement_group:\n name: my-cluster\n state: present\n\n- name: Create a Spread placement group.\n community.aws.ec2_placement_group:\n name: my-cluster\n state: present\n strategy: spread\n\n- name: Delete a placement group.\n community.aws.ec2_placement_group:\n name: my-cluster\n state: absent\n\n'''\n\n\nRETURN = '''\nplacement_group:\n description: Placement group attributes\n returned: when state != absent\n type: complex\n contains:\n name:\n description: PG name\n type: str\n sample: my-cluster\n state:\n description: PG state\n type: str\n sample: \"available\"\n strategy:\n description: PG strategy\n type: str\n sample: \"cluster\"\n\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # caught by AnsibleAWSModule\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\[email protected]_backoff()\ndef get_placement_group_details(connection, module):\n name = module.params.get(\"name\")\n try:\n response = connection.describe_placement_groups(\n Filters=[{\n \"Name\": \"group-name\",\n \"Values\": [name]\n }])\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(\n e,\n msg=\"Couldn't find placement group named [%s]\" % name)\n\n if len(response['PlacementGroups']) != 1:\n return None\n else:\n placement_group = response['PlacementGroups'][0]\n return {\n \"name\": placement_group['GroupName'],\n \"state\": placement_group['State'],\n \"strategy\": placement_group['Strategy'],\n }\n\n\[email protected]_backoff()\ndef create_placement_group(connection, module):\n name = module.params.get(\"name\")\n strategy = module.params.get(\"strategy\")\n\n try:\n connection.create_placement_group(\n GroupName=name, Strategy=strategy, DryRun=module.check_mode)\n except is_boto3_error_code('DryRunOperation'):\n module.exit_json(changed=True, placement_group={\n \"name\": name,\n \"state\": 'DryRun',\n \"strategy\": strategy,\n })\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(\n e,\n msg=\"Couldn't create placement group [%s]\" % name)\n\n module.exit_json(changed=True,\n placement_group=get_placement_group_details(\n connection, module\n ))\n\n\[email protected]_backoff()\ndef delete_placement_group(connection, module):\n name = module.params.get(\"name\")\n\n try:\n connection.delete_placement_group(\n GroupName=name, DryRun=module.check_mode)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(\n e,\n msg=\"Couldn't delete placement group [%s]\" % name)\n\n module.exit_json(changed=True)\n\n\ndef main():\n argument_spec = dict(\n name=dict(required=True, type='str'),\n state=dict(default='present', choices=['present', 'absent']),\n strategy=dict(default='cluster', choices=['cluster', 'spread'])\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True\n )\n\n connection = module.client('ec2')\n\n state = module.params.get(\"state\")\n\n if state == 'present':\n placement_group = get_placement_group_details(connection, module)\n if placement_group is None:\n create_placement_group(connection, module)\n else:\n strategy = module.params.get(\"strategy\")\n if placement_group['strategy'] == strategy:\n module.exit_json(\n changed=False, placement_group=placement_group)\n else:\n name = module.params.get(\"name\")\n module.fail_json(\n msg=(\"Placement group '{}' exists, can't change strategy\" +\n \" from '{}' to '{}'\").format(\n name,\n placement_group['strategy'],\n strategy))\n\n elif state == 'absent':\n placement_group = get_placement_group_details(connection, module)\n if placement_group is None:\n module.exit_json(changed=False)\n else:\n delete_placement_group(connection, module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_placement_group.py"}]} | 2,564 | 640 |
gh_patches_debug_42476 | rasdani/github-patches | git_diff | ibis-project__ibis-2719 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement .insert() for SQLite
Some backends implement the `.insert()` method, to insert rows into tables. SQLite doesn't, and besides being useful for users, it'd be good to have it working, and add an example in the tutorial 5, so it's useful as a reference for other backends too.
</issue>
<code>
[start of ibis/backends/base/sql/alchemy/client.py]
1 import contextlib
2 import functools
3 from typing import List, Optional
4
5 import pandas as pd
6 import sqlalchemy as sa
7 from pkg_resources import parse_version
8
9 import ibis
10 import ibis.expr.datatypes as dt
11 import ibis.expr.schema as sch
12 import ibis.util as util
13 from ibis.backends.base.sql.compiler import Dialect
14 from ibis.client import Query, SQLClient
15
16 from .datatypes import to_sqla_type
17 from .geospatial import geospatial_supported
18 from .query_builder import build_ast
19 from .translator import AlchemyExprTranslator
20
21 if geospatial_supported:
22 import geoalchemy2.shape as shape
23 import geopandas
24
25
26 class _AlchemyProxy:
27 """
28 Wraps a SQLAlchemy ResultProxy and ensures that .close() is called on
29 garbage collection
30 """
31
32 def __init__(self, proxy):
33 self.proxy = proxy
34
35 def __del__(self):
36 self._close_cursor()
37
38 def _close_cursor(self):
39 self.proxy.close()
40
41 def __enter__(self):
42 return self
43
44 def __exit__(self, type, value, tb):
45 self._close_cursor()
46
47 def fetchall(self):
48 return self.proxy.fetchall()
49
50
51 def _invalidates_reflection_cache(f):
52 """Invalidate the SQLAlchemy reflection cache if `f` performs an operation
53 that mutates database or table metadata such as ``CREATE TABLE``,
54 ``DROP TABLE``, etc.
55
56 Parameters
57 ----------
58 f : callable
59 A method on :class:`ibis.sql.alchemy.AlchemyClient`
60 """
61
62 @functools.wraps(f)
63 def wrapped(self, *args, **kwargs):
64 result = f(self, *args, **kwargs)
65
66 # only invalidate the cache after we've succesfully called the wrapped
67 # function
68 self._reflection_cache_is_dirty = True
69 return result
70
71 return wrapped
72
73
74 def _maybe_to_geodataframe(df, schema):
75 """
76 If the required libraries for geospatial support are installed, and if a
77 geospatial column is present in the dataframe, convert it to a
78 GeoDataFrame.
79 """
80
81 def to_shapely(row, name):
82 return shape.to_shape(row[name]) if row[name] is not None else None
83
84 if len(df) and geospatial_supported:
85 geom_col = None
86 for name, dtype in schema.items():
87 if isinstance(dtype, dt.GeoSpatial):
88 geom_col = geom_col or name
89 df[name] = df.apply(lambda x: to_shapely(x, name), axis=1)
90 if geom_col:
91 df = geopandas.GeoDataFrame(df, geometry=geom_col)
92 return df
93
94
95 class AlchemyQuery(Query):
96 def _fetch(self, cursor):
97 df = pd.DataFrame.from_records(
98 cursor.proxy.fetchall(),
99 columns=cursor.proxy.keys(),
100 coerce_float=True,
101 )
102 schema = self.schema()
103 return _maybe_to_geodataframe(schema.apply_to(df), schema)
104
105
106 class AlchemyDialect(Dialect):
107
108 translator = AlchemyExprTranslator
109
110
111 class AlchemyClient(SQLClient):
112
113 dialect = AlchemyDialect
114 query_class = AlchemyQuery
115 has_attachment = False
116
117 def __init__(self, con: sa.engine.Engine) -> None:
118 super().__init__()
119 self.con = con
120 self.meta = sa.MetaData(bind=con)
121 self._inspector = sa.inspect(con)
122 self._reflection_cache_is_dirty = False
123 self._schemas = {}
124
125 @property
126 def inspector(self):
127 if self._reflection_cache_is_dirty:
128 self._inspector.info_cache.clear()
129 return self._inspector
130
131 @contextlib.contextmanager
132 def begin(self):
133 with self.con.begin() as bind:
134 yield bind
135
136 @_invalidates_reflection_cache
137 def create_table(self, name, expr=None, schema=None, database=None):
138 if database == self.database_name:
139 # avoid fully qualified name
140 database = None
141
142 if database is not None:
143 raise NotImplementedError(
144 'Creating tables from a different database is not yet '
145 'implemented'
146 )
147
148 if expr is None and schema is None:
149 raise ValueError('You must pass either an expression or a schema')
150
151 if expr is not None and schema is not None:
152 if not expr.schema().equals(ibis.schema(schema)):
153 raise TypeError(
154 'Expression schema is not equal to passed schema. '
155 'Try passing the expression without the schema'
156 )
157 if schema is None:
158 schema = expr.schema()
159
160 self._schemas[self._fully_qualified_name(name, database)] = schema
161 t = self._table_from_schema(
162 name, schema, database=database or self.current_database
163 )
164
165 with self.begin() as bind:
166 t.create(bind=bind)
167 if expr is not None:
168 bind.execute(
169 t.insert().from_select(list(expr.columns), expr.compile())
170 )
171
172 def _columns_from_schema(
173 self, name: str, schema: sch.Schema
174 ) -> List[sa.Column]:
175 return [
176 sa.Column(colname, to_sqla_type(dtype), nullable=dtype.nullable)
177 for colname, dtype in zip(schema.names, schema.types)
178 ]
179
180 def _table_from_schema(
181 self, name: str, schema: sch.Schema, database: Optional[str] = None
182 ) -> sa.Table:
183 columns = self._columns_from_schema(name, schema)
184 return sa.Table(name, self.meta, *columns)
185
186 @_invalidates_reflection_cache
187 def drop_table(
188 self,
189 table_name: str,
190 database: Optional[str] = None,
191 force: bool = False,
192 ) -> None:
193 if database == self.database_name:
194 # avoid fully qualified name
195 database = None
196
197 if database is not None:
198 raise NotImplementedError(
199 'Dropping tables from a different database is not yet '
200 'implemented'
201 )
202
203 t = self._get_sqla_table(table_name, schema=database, autoload=False)
204 t.drop(checkfirst=force)
205
206 assert (
207 not t.exists()
208 ), 'Something went wrong during DROP of table {!r}'.format(t.name)
209
210 self.meta.remove(t)
211
212 qualified_name = self._fully_qualified_name(table_name, database)
213
214 try:
215 del self._schemas[qualified_name]
216 except KeyError: # schemas won't be cached if created with raw_sql
217 pass
218
219 def load_data(
220 self,
221 table_name: str,
222 data: pd.DataFrame,
223 database: str = None,
224 if_exists: str = 'fail',
225 ):
226 """
227 Load data from a dataframe to the backend.
228
229 Parameters
230 ----------
231 table_name : string
232 data: pandas.DataFrame
233 database : string, optional
234 if_exists : string, optional, default 'fail'
235 The values available are: {‘fail’, ‘replace’, ‘append’}
236
237 Raises
238 ------
239 NotImplementedError
240 Loading data to a table from a different database is not
241 yet implemented
242 """
243 if database == self.database_name:
244 # avoid fully qualified name
245 database = None
246
247 if database is not None:
248 raise NotImplementedError(
249 'Loading data to a table from a different database is not '
250 'yet implemented'
251 )
252
253 params = {}
254 if self.has_attachment:
255 # for database with attachment
256 # see: https://github.com/ibis-project/ibis/issues/1930
257 params['schema'] = self.database_name
258
259 data.to_sql(
260 table_name,
261 con=self.con,
262 index=False,
263 if_exists=if_exists,
264 **params,
265 )
266
267 def truncate_table(
268 self, table_name: str, database: Optional[str] = None
269 ) -> None:
270 t = self._get_sqla_table(table_name, schema=database)
271 t.delete().execute()
272
273 def list_tables(
274 self,
275 like: Optional[str] = None,
276 database: Optional[str] = None,
277 schema: Optional[str] = None,
278 ) -> List[str]:
279 """List tables/views in the current or indicated database.
280
281 Parameters
282 ----------
283 like
284 Checks for this string contained in name
285 database
286 If not passed, uses the current database
287 schema
288 The schema namespace that tables should be listed from
289
290 Returns
291 -------
292 List[str]
293
294 """
295 inspector = self.inspector
296 # inspector returns a mutable version of its names, so make a copy.
297 names = inspector.get_table_names(schema=schema).copy()
298 names.extend(inspector.get_view_names(schema=schema))
299 if like is not None:
300 names = [x for x in names if like in x]
301 return sorted(names)
302
303 def _execute(self, query: str, results: bool = True):
304 return _AlchemyProxy(self.con.execute(query))
305
306 @_invalidates_reflection_cache
307 def raw_sql(self, query: str, results: bool = False):
308 return super().raw_sql(query, results=results)
309
310 def _build_ast(self, expr, context):
311 return build_ast(expr, context)
312
313 def _log(self, sql):
314 try:
315 query_str = str(sql)
316 except sa.exc.UnsupportedCompilationError:
317 pass
318 else:
319 util.log(query_str)
320
321 def _get_sqla_table(self, name, schema=None, autoload=True):
322 return sa.Table(name, self.meta, schema=schema, autoload=autoload)
323
324 def _sqla_table_to_expr(self, table):
325 node = self.table_class(table, self)
326 return self.table_expr_class(node)
327
328 @property
329 def version(self):
330 vstring = '.'.join(map(str, self.con.dialect.server_version_info))
331 return parse_version(vstring)
332
[end of ibis/backends/base/sql/alchemy/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/backends/base/sql/alchemy/client.py b/ibis/backends/base/sql/alchemy/client.py
--- a/ibis/backends/base/sql/alchemy/client.py
+++ b/ibis/backends/base/sql/alchemy/client.py
@@ -1,6 +1,6 @@
import contextlib
import functools
-from typing import List, Optional
+from typing import List, Optional, Union
import pandas as pd
import sqlalchemy as sa
@@ -9,6 +9,7 @@
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
+import ibis.expr.types as ir
import ibis.util as util
from ibis.backends.base.sql.compiler import Dialect
from ibis.client import Query, SQLClient
@@ -229,7 +230,7 @@
Parameters
----------
table_name : string
- data: pandas.DataFrame
+ data : pandas.DataFrame
database : string, optional
if_exists : string, optional, default 'fail'
The values available are: {‘fail’, ‘replace’, ‘append’}
@@ -329,3 +330,93 @@
def version(self):
vstring = '.'.join(map(str, self.con.dialect.server_version_info))
return parse_version(vstring)
+
+ def insert(
+ self,
+ table_name: str,
+ obj: Union[pd.DataFrame, ir.TableExpr],
+ database: Optional[str] = None,
+ overwrite: Optional[bool] = False,
+ ) -> None:
+ """
+ Insert the given data to a table in backend.
+
+ Parameters
+ ----------
+ table_name : string
+ name of the table to which data needs to be inserted
+ obj : pandas DataFrame or ibis TableExpr
+ obj is either the dataframe (pd.DataFrame) containing data
+ which needs to be inserted to table_name or
+ the TableExpr type which ibis provides with data which needs
+ to be inserted to table_name
+ database : string, optional
+ name of the attached database that the table is located in.
+ overwrite : boolean, default False
+ If True, will replace existing contents of table else not
+
+ Raises
+ -------
+ NotImplementedError
+ Inserting data to a table from a different database is not
+ yet implemented
+
+ ValueError
+ No operation is being performed. Either the obj parameter
+ is not a pandas DataFrame or is not a ibis TableExpr.
+ The given obj is of type type(obj).__name__ .
+
+ """
+
+ if database == self.database_name:
+ # avoid fully qualified name
+ database = None
+
+ if database is not None:
+ raise NotImplementedError(
+ 'Inserting data to a table from a different database is not '
+ 'yet implemented'
+ )
+
+ params = {}
+ if self.has_attachment:
+ # for database with attachment
+ # see: https://github.com/ibis-project/ibis/issues/1930
+ params['schema'] = self.database_name
+
+ if isinstance(obj, pd.DataFrame):
+ obj.to_sql(
+ table_name,
+ self.con,
+ index=False,
+ if_exists='replace' if overwrite else 'append',
+ **params,
+ )
+ elif isinstance(obj, ir.TableExpr):
+ to_table_expr = self.table(table_name)
+ to_table_schema = to_table_expr.schema()
+
+ if overwrite:
+ self.drop_table(table_name, database=database)
+ self.create_table(
+ table_name, schema=to_table_schema, database=database,
+ )
+
+ to_table = self._get_sqla_table(table_name, schema=database)
+
+ from_table_expr = obj
+
+ with self.begin() as bind:
+ if from_table_expr is not None:
+ bind.execute(
+ to_table.insert().from_select(
+ list(from_table_expr.columns),
+ from_table_expr.compile(),
+ )
+ )
+ else:
+ raise ValueError(
+ "No operation is being performed. Either the obj parameter "
+ "is not a pandas DataFrame or is not a ibis TableExpr."
+ f"The given obj is of type {type(obj).__name__} ."
+ )
| {"golden_diff": "diff --git a/ibis/backends/base/sql/alchemy/client.py b/ibis/backends/base/sql/alchemy/client.py\n--- a/ibis/backends/base/sql/alchemy/client.py\n+++ b/ibis/backends/base/sql/alchemy/client.py\n@@ -1,6 +1,6 @@\n import contextlib\n import functools\n-from typing import List, Optional\n+from typing import List, Optional, Union\n \n import pandas as pd\n import sqlalchemy as sa\n@@ -9,6 +9,7 @@\n import ibis\n import ibis.expr.datatypes as dt\n import ibis.expr.schema as sch\n+import ibis.expr.types as ir\n import ibis.util as util\n from ibis.backends.base.sql.compiler import Dialect\n from ibis.client import Query, SQLClient\n@@ -229,7 +230,7 @@\n Parameters\n ----------\n table_name : string\n- data: pandas.DataFrame\n+ data : pandas.DataFrame\n database : string, optional\n if_exists : string, optional, default 'fail'\n The values available are: {\u2018fail\u2019, \u2018replace\u2019, \u2018append\u2019}\n@@ -329,3 +330,93 @@\n def version(self):\n vstring = '.'.join(map(str, self.con.dialect.server_version_info))\n return parse_version(vstring)\n+\n+ def insert(\n+ self,\n+ table_name: str,\n+ obj: Union[pd.DataFrame, ir.TableExpr],\n+ database: Optional[str] = None,\n+ overwrite: Optional[bool] = False,\n+ ) -> None:\n+ \"\"\"\n+ Insert the given data to a table in backend.\n+\n+ Parameters\n+ ----------\n+ table_name : string\n+ name of the table to which data needs to be inserted\n+ obj : pandas DataFrame or ibis TableExpr\n+ obj is either the dataframe (pd.DataFrame) containing data\n+ which needs to be inserted to table_name or\n+ the TableExpr type which ibis provides with data which needs\n+ to be inserted to table_name\n+ database : string, optional\n+ name of the attached database that the table is located in.\n+ overwrite : boolean, default False\n+ If True, will replace existing contents of table else not\n+\n+ Raises\n+ -------\n+ NotImplementedError\n+ Inserting data to a table from a different database is not\n+ yet implemented\n+\n+ ValueError\n+ No operation is being performed. Either the obj parameter\n+ is not a pandas DataFrame or is not a ibis TableExpr.\n+ The given obj is of type type(obj).__name__ .\n+\n+ \"\"\"\n+\n+ if database == self.database_name:\n+ # avoid fully qualified name\n+ database = None\n+\n+ if database is not None:\n+ raise NotImplementedError(\n+ 'Inserting data to a table from a different database is not '\n+ 'yet implemented'\n+ )\n+\n+ params = {}\n+ if self.has_attachment:\n+ # for database with attachment\n+ # see: https://github.com/ibis-project/ibis/issues/1930\n+ params['schema'] = self.database_name\n+\n+ if isinstance(obj, pd.DataFrame):\n+ obj.to_sql(\n+ table_name,\n+ self.con,\n+ index=False,\n+ if_exists='replace' if overwrite else 'append',\n+ **params,\n+ )\n+ elif isinstance(obj, ir.TableExpr):\n+ to_table_expr = self.table(table_name)\n+ to_table_schema = to_table_expr.schema()\n+\n+ if overwrite:\n+ self.drop_table(table_name, database=database)\n+ self.create_table(\n+ table_name, schema=to_table_schema, database=database,\n+ )\n+\n+ to_table = self._get_sqla_table(table_name, schema=database)\n+\n+ from_table_expr = obj\n+\n+ with self.begin() as bind:\n+ if from_table_expr is not None:\n+ bind.execute(\n+ to_table.insert().from_select(\n+ list(from_table_expr.columns),\n+ from_table_expr.compile(),\n+ )\n+ )\n+ else:\n+ raise ValueError(\n+ \"No operation is being performed. Either the obj parameter \"\n+ \"is not a pandas DataFrame or is not a ibis TableExpr.\"\n+ f\"The given obj is of type {type(obj).__name__} .\"\n+ )\n", "issue": "Implement .insert() for SQLite\nSome backends implement the `.insert()` method, to insert rows into tables. SQLite doesn't, and besides being useful for users, it'd be good to have it working, and add an example in the tutorial 5, so it's useful as a reference for other backends too.\n", "before_files": [{"content": "import contextlib\nimport functools\nfrom typing import List, Optional\n\nimport pandas as pd\nimport sqlalchemy as sa\nfrom pkg_resources import parse_version\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nimport ibis.util as util\nfrom ibis.backends.base.sql.compiler import Dialect\nfrom ibis.client import Query, SQLClient\n\nfrom .datatypes import to_sqla_type\nfrom .geospatial import geospatial_supported\nfrom .query_builder import build_ast\nfrom .translator import AlchemyExprTranslator\n\nif geospatial_supported:\n import geoalchemy2.shape as shape\n import geopandas\n\n\nclass _AlchemyProxy:\n \"\"\"\n Wraps a SQLAlchemy ResultProxy and ensures that .close() is called on\n garbage collection\n \"\"\"\n\n def __init__(self, proxy):\n self.proxy = proxy\n\n def __del__(self):\n self._close_cursor()\n\n def _close_cursor(self):\n self.proxy.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n self._close_cursor()\n\n def fetchall(self):\n return self.proxy.fetchall()\n\n\ndef _invalidates_reflection_cache(f):\n \"\"\"Invalidate the SQLAlchemy reflection cache if `f` performs an operation\n that mutates database or table metadata such as ``CREATE TABLE``,\n ``DROP TABLE``, etc.\n\n Parameters\n ----------\n f : callable\n A method on :class:`ibis.sql.alchemy.AlchemyClient`\n \"\"\"\n\n @functools.wraps(f)\n def wrapped(self, *args, **kwargs):\n result = f(self, *args, **kwargs)\n\n # only invalidate the cache after we've succesfully called the wrapped\n # function\n self._reflection_cache_is_dirty = True\n return result\n\n return wrapped\n\n\ndef _maybe_to_geodataframe(df, schema):\n \"\"\"\n If the required libraries for geospatial support are installed, and if a\n geospatial column is present in the dataframe, convert it to a\n GeoDataFrame.\n \"\"\"\n\n def to_shapely(row, name):\n return shape.to_shape(row[name]) if row[name] is not None else None\n\n if len(df) and geospatial_supported:\n geom_col = None\n for name, dtype in schema.items():\n if isinstance(dtype, dt.GeoSpatial):\n geom_col = geom_col or name\n df[name] = df.apply(lambda x: to_shapely(x, name), axis=1)\n if geom_col:\n df = geopandas.GeoDataFrame(df, geometry=geom_col)\n return df\n\n\nclass AlchemyQuery(Query):\n def _fetch(self, cursor):\n df = pd.DataFrame.from_records(\n cursor.proxy.fetchall(),\n columns=cursor.proxy.keys(),\n coerce_float=True,\n )\n schema = self.schema()\n return _maybe_to_geodataframe(schema.apply_to(df), schema)\n\n\nclass AlchemyDialect(Dialect):\n\n translator = AlchemyExprTranslator\n\n\nclass AlchemyClient(SQLClient):\n\n dialect = AlchemyDialect\n query_class = AlchemyQuery\n has_attachment = False\n\n def __init__(self, con: sa.engine.Engine) -> None:\n super().__init__()\n self.con = con\n self.meta = sa.MetaData(bind=con)\n self._inspector = sa.inspect(con)\n self._reflection_cache_is_dirty = False\n self._schemas = {}\n\n @property\n def inspector(self):\n if self._reflection_cache_is_dirty:\n self._inspector.info_cache.clear()\n return self._inspector\n\n @contextlib.contextmanager\n def begin(self):\n with self.con.begin() as bind:\n yield bind\n\n @_invalidates_reflection_cache\n def create_table(self, name, expr=None, schema=None, database=None):\n if database == self.database_name:\n # avoid fully qualified name\n database = None\n\n if database is not None:\n raise NotImplementedError(\n 'Creating tables from a different database is not yet '\n 'implemented'\n )\n\n if expr is None and schema is None:\n raise ValueError('You must pass either an expression or a schema')\n\n if expr is not None and schema is not None:\n if not expr.schema().equals(ibis.schema(schema)):\n raise TypeError(\n 'Expression schema is not equal to passed schema. '\n 'Try passing the expression without the schema'\n )\n if schema is None:\n schema = expr.schema()\n\n self._schemas[self._fully_qualified_name(name, database)] = schema\n t = self._table_from_schema(\n name, schema, database=database or self.current_database\n )\n\n with self.begin() as bind:\n t.create(bind=bind)\n if expr is not None:\n bind.execute(\n t.insert().from_select(list(expr.columns), expr.compile())\n )\n\n def _columns_from_schema(\n self, name: str, schema: sch.Schema\n ) -> List[sa.Column]:\n return [\n sa.Column(colname, to_sqla_type(dtype), nullable=dtype.nullable)\n for colname, dtype in zip(schema.names, schema.types)\n ]\n\n def _table_from_schema(\n self, name: str, schema: sch.Schema, database: Optional[str] = None\n ) -> sa.Table:\n columns = self._columns_from_schema(name, schema)\n return sa.Table(name, self.meta, *columns)\n\n @_invalidates_reflection_cache\n def drop_table(\n self,\n table_name: str,\n database: Optional[str] = None,\n force: bool = False,\n ) -> None:\n if database == self.database_name:\n # avoid fully qualified name\n database = None\n\n if database is not None:\n raise NotImplementedError(\n 'Dropping tables from a different database is not yet '\n 'implemented'\n )\n\n t = self._get_sqla_table(table_name, schema=database, autoload=False)\n t.drop(checkfirst=force)\n\n assert (\n not t.exists()\n ), 'Something went wrong during DROP of table {!r}'.format(t.name)\n\n self.meta.remove(t)\n\n qualified_name = self._fully_qualified_name(table_name, database)\n\n try:\n del self._schemas[qualified_name]\n except KeyError: # schemas won't be cached if created with raw_sql\n pass\n\n def load_data(\n self,\n table_name: str,\n data: pd.DataFrame,\n database: str = None,\n if_exists: str = 'fail',\n ):\n \"\"\"\n Load data from a dataframe to the backend.\n\n Parameters\n ----------\n table_name : string\n data: pandas.DataFrame\n database : string, optional\n if_exists : string, optional, default 'fail'\n The values available are: {\u2018fail\u2019, \u2018replace\u2019, \u2018append\u2019}\n\n Raises\n ------\n NotImplementedError\n Loading data to a table from a different database is not\n yet implemented\n \"\"\"\n if database == self.database_name:\n # avoid fully qualified name\n database = None\n\n if database is not None:\n raise NotImplementedError(\n 'Loading data to a table from a different database is not '\n 'yet implemented'\n )\n\n params = {}\n if self.has_attachment:\n # for database with attachment\n # see: https://github.com/ibis-project/ibis/issues/1930\n params['schema'] = self.database_name\n\n data.to_sql(\n table_name,\n con=self.con,\n index=False,\n if_exists=if_exists,\n **params,\n )\n\n def truncate_table(\n self, table_name: str, database: Optional[str] = None\n ) -> None:\n t = self._get_sqla_table(table_name, schema=database)\n t.delete().execute()\n\n def list_tables(\n self,\n like: Optional[str] = None,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n ) -> List[str]:\n \"\"\"List tables/views in the current or indicated database.\n\n Parameters\n ----------\n like\n Checks for this string contained in name\n database\n If not passed, uses the current database\n schema\n The schema namespace that tables should be listed from\n\n Returns\n -------\n List[str]\n\n \"\"\"\n inspector = self.inspector\n # inspector returns a mutable version of its names, so make a copy.\n names = inspector.get_table_names(schema=schema).copy()\n names.extend(inspector.get_view_names(schema=schema))\n if like is not None:\n names = [x for x in names if like in x]\n return sorted(names)\n\n def _execute(self, query: str, results: bool = True):\n return _AlchemyProxy(self.con.execute(query))\n\n @_invalidates_reflection_cache\n def raw_sql(self, query: str, results: bool = False):\n return super().raw_sql(query, results=results)\n\n def _build_ast(self, expr, context):\n return build_ast(expr, context)\n\n def _log(self, sql):\n try:\n query_str = str(sql)\n except sa.exc.UnsupportedCompilationError:\n pass\n else:\n util.log(query_str)\n\n def _get_sqla_table(self, name, schema=None, autoload=True):\n return sa.Table(name, self.meta, schema=schema, autoload=autoload)\n\n def _sqla_table_to_expr(self, table):\n node = self.table_class(table, self)\n return self.table_expr_class(node)\n\n @property\n def version(self):\n vstring = '.'.join(map(str, self.con.dialect.server_version_info))\n return parse_version(vstring)\n", "path": "ibis/backends/base/sql/alchemy/client.py"}]} | 3,634 | 968 |
gh_patches_debug_32003 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-144 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StratifiedKFold with shuffling in the decoding step?
Currently we create the cross-validation object in our decoding step (08) of the pipeline via:
https://github.com/mne-tools/mne-study-template/blob/b61a5ca66aaef1f631d7ce2def3b1cde5d611729/08-sliding_estimator.py#L80-L81
By default, `StratifiedKFold` does not shuffle, meaning that the passed `random_state` doesn't have any effect (it produces a warning, though).
So – should we enable shuffling? Intuitively I would say yes, but want to hear your opinion, @agramfort
</issue>
<code>
[start of 11-make_cov.py]
1 """
2 ==================================
3 08. Baseline covariance estimation
4 ==================================
5
6 Covariance matrices are computed and saved.
7 """
8
9 import os.path as op
10 import itertools
11 import logging
12
13 import mne
14 from mne.parallel import parallel_func
15 from mne_bids import make_bids_basename
16
17 from sklearn.model_selection import KFold
18
19 import config
20 from config import gen_log_message, on_error, failsafe_run
21
22 logger = logging.getLogger('mne-study-template')
23
24
25 def compute_cov_from_epochs(subject, session, tmin, tmax):
26 deriv_path = config.get_subject_deriv_path(subject=subject,
27 session=session,
28 kind=config.get_kind())
29
30 bids_basename = make_bids_basename(subject=subject,
31 session=session,
32 task=config.get_task(),
33 acquisition=config.acq,
34 run=None,
35 processing=config.proc,
36 recording=config.rec,
37 space=config.space)
38
39 if config.use_ica or config.use_ssp:
40 extension = '_cleaned-epo'
41 else:
42 extension = '-epo'
43
44 epo_fname = op.join(deriv_path, bids_basename + '%s.fif' % extension)
45 cov_fname = op.join(deriv_path, bids_basename + '-cov.fif')
46
47 msg = (f"Computing regularized covariance based on epochs' baseline "
48 f"periods. Input: {epo_fname}, Output: {cov_fname}")
49 logger.info(gen_log_message(message=msg, step=11, subject=subject,
50 session=session))
51
52 epochs = mne.read_epochs(epo_fname, preload=True)
53
54 # Do not shuffle the data before splitting into train and test samples.
55 # Perform a block cross-validation instead to maintain autocorrelated
56 # noise.
57 cv = KFold(3, shuffle=False)
58 cov = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method='shrunk',
59 cv=cv, rank='info')
60 cov.save(cov_fname)
61
62
63 def compute_cov_from_empty_room(subject, session):
64 deriv_path = config.get_subject_deriv_path(subject=subject,
65 session=session,
66 kind=config.get_kind())
67
68 bids_basename = make_bids_basename(subject=subject,
69 session=session,
70 task=config.get_task(),
71 acquisition=config.acq,
72 run=None,
73 processing=config.proc,
74 recording=config.rec,
75 space=config.space)
76
77 raw_er_fname = op.join(deriv_path,
78 bids_basename + '_emptyroom_filt_raw.fif')
79 cov_fname = op.join(deriv_path, bids_basename + '-cov.fif')
80
81 extra_params = dict()
82 if not config.use_maxwell_filter and config.allow_maxshield:
83 extra_params['allow_maxshield'] = config.allow_maxshield
84
85 msg = (f'Computing regularized covariance based on empty-room recording. '
86 f'Input: {raw_er_fname}, Output: {cov_fname}')
87 logger.info(gen_log_message(message=msg, step=11, subject=subject,
88 session=session))
89
90 raw_er = mne.io.read_raw_fif(raw_er_fname, preload=True, **extra_params)
91
92 # Do not shuffle the data before splitting into train and test samples.
93 # Perform a block cross-validation instead to maintain autocorrelated
94 # noise.
95 cv = KFold(3, shuffle=False)
96 cov = mne.compute_raw_covariance(raw_er, method='shrunk', cv=cv,
97 rank='info')
98 cov.save(cov_fname)
99
100
101 @failsafe_run(on_error=on_error)
102 def run_covariance(subject, session=None):
103 if config.noise_cov == 'emptyroom' and 'eeg' not in config.ch_types:
104 compute_cov_from_empty_room(subject=subject, session=session)
105 else:
106 tmin, tmax = config.noise_cov
107 compute_cov_from_epochs(subject=subject, session=session, tmin=tmin,
108 tmax=tmax)
109
110
111 def main():
112 """Run cov."""
113 msg = 'Running Step 11: Estimate noise covariance'
114 logger.info(gen_log_message(step=11, message=msg))
115
116 parallel, run_func, _ = parallel_func(run_covariance, n_jobs=config.N_JOBS)
117 parallel(run_func(subject, session) for subject, session in
118 itertools.product(config.get_subjects(), config.get_sessions()))
119
120 msg = 'Completed Step 11: Estimate noise covariance'
121 logger.info(gen_log_message(step=11, message=msg))
122
123
124 if __name__ == '__main__':
125 main()
126
[end of 11-make_cov.py]
[start of 08-sliding_estimator.py]
1 """
2 =================
3 Sliding estimator
4 =================
5
6 A sliding estimator fits a logistic regression model for every time point.
7 In this example, we contrast the condition 'famous' against 'scrambled'
8 using this approach. The end result is an averaging effect across sensors.
9 The contrast across different sensors are combined into a single plot.
10
11 """ # noqa: E501
12
13 ###############################################################################
14 # Let us first import the libraries
15
16 import os.path as op
17 import logging
18
19 import numpy as np
20 from scipy.io import savemat
21
22 import mne
23 from mne.decoding import SlidingEstimator, cross_val_multiscore
24
25 from mne_bids import make_bids_basename
26
27 from sklearn.preprocessing import StandardScaler
28 from sklearn.model_selection import StratifiedKFold
29 from sklearn.pipeline import make_pipeline
30 from sklearn.linear_model import LogisticRegression
31
32 import config
33 from config import gen_log_message, on_error, failsafe_run
34
35 logger = logging.getLogger('mne-study-template')
36
37
38 ###############################################################################
39 # Then we write a function to do time decoding on one subject
40
41 @failsafe_run(on_error=on_error)
42 def run_time_decoding(subject, condition1, condition2, session=None):
43 msg = f'Contrasting conditions: {condition1} – {condition2}'
44 logger.info(gen_log_message(message=msg, step=8, subject=subject,
45 session=session))
46
47 deriv_path = config.get_subject_deriv_path(subject=subject,
48 session=session,
49 kind=config.get_kind())
50
51 bids_basename = make_bids_basename(subject=subject,
52 session=session,
53 task=config.get_task(),
54 acquisition=config.acq,
55 run=None,
56 processing=config.proc,
57 recording=config.rec,
58 space=config.space)
59
60 fname_in = op.join(deriv_path, bids_basename + '-epo.fif')
61 epochs = mne.read_epochs(fname_in)
62
63 # We define the epochs and the labels
64 epochs = mne.concatenate_epochs([epochs[condition1],
65 epochs[condition2]])
66 epochs.apply_baseline()
67
68 # Get the data and labels
69 X = epochs.get_data()
70 n_cond1 = len(epochs[condition1])
71 n_cond2 = len(epochs[condition2])
72 y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]
73
74 # Use AUC because chance level is same regardless of the class balance
75 se = SlidingEstimator(
76 make_pipeline(StandardScaler(),
77 LogisticRegression(solver='liblinear',
78 random_state=config.random_state)),
79 scoring=config.decoding_metric, n_jobs=config.N_JOBS)
80 cv = StratifiedKFold(random_state=config.random_state,
81 n_splits=config.decoding_n_splits)
82 scores = cross_val_multiscore(se, X=X, y=y, cv=cv)
83
84 # let's save the scores now
85 a_vs_b = '%s_vs_%s' % (condition1, condition2)
86 a_vs_b = a_vs_b.replace(op.sep, '')
87 fname_td = op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME,
88 '%s_%s_%s_%s.mat' %
89 (subject, config.study_name, a_vs_b,
90 config.decoding_metric))
91 savemat(fname_td, {'scores': scores, 'times': epochs.times})
92
93
94 def main():
95 """Run sliding estimator."""
96 msg = 'Running Step 8: Sliding estimator'
97 logger.info(gen_log_message(step=8, message=msg))
98
99 # Here we go parallel inside the :class:`mne.decoding.SlidingEstimator`
100 # so we don't dispatch manually to multiple jobs.
101 for subject in config.get_subjects():
102 for session in config.get_sessions():
103 for conditions in config.decoding_conditions:
104 run_time_decoding(subject, *conditions, session=session)
105
106 msg = 'Completed Step 8: Sliding estimator'
107 logger.info(gen_log_message(step=8, message=msg))
108
109
110 if __name__ == '__main__':
111 main()
112
[end of 08-sliding_estimator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/08-sliding_estimator.py b/08-sliding_estimator.py
--- a/08-sliding_estimator.py
+++ b/08-sliding_estimator.py
@@ -77,9 +77,7 @@
LogisticRegression(solver='liblinear',
random_state=config.random_state)),
scoring=config.decoding_metric, n_jobs=config.N_JOBS)
- cv = StratifiedKFold(random_state=config.random_state,
- n_splits=config.decoding_n_splits)
- scores = cross_val_multiscore(se, X=X, y=y, cv=cv)
+ scores = cross_val_multiscore(se, X=X, y=y, cv=config.decoding_n_splits)
# let's save the scores now
a_vs_b = '%s_vs_%s' % (condition1, condition2)
diff --git a/11-make_cov.py b/11-make_cov.py
--- a/11-make_cov.py
+++ b/11-make_cov.py
@@ -50,13 +50,8 @@
session=session))
epochs = mne.read_epochs(epo_fname, preload=True)
-
- # Do not shuffle the data before splitting into train and test samples.
- # Perform a block cross-validation instead to maintain autocorrelated
- # noise.
- cv = KFold(3, shuffle=False)
cov = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method='shrunk',
- cv=cv, rank='info')
+ rank='info')
cov.save(cov_fname)
@@ -88,13 +83,7 @@
session=session))
raw_er = mne.io.read_raw_fif(raw_er_fname, preload=True, **extra_params)
-
- # Do not shuffle the data before splitting into train and test samples.
- # Perform a block cross-validation instead to maintain autocorrelated
- # noise.
- cv = KFold(3, shuffle=False)
- cov = mne.compute_raw_covariance(raw_er, method='shrunk', cv=cv,
- rank='info')
+ cov = mne.compute_raw_covariance(raw_er, method='shrunk', rank='info')
cov.save(cov_fname)
| {"golden_diff": "diff --git a/08-sliding_estimator.py b/08-sliding_estimator.py\n--- a/08-sliding_estimator.py\n+++ b/08-sliding_estimator.py\n@@ -77,9 +77,7 @@\n LogisticRegression(solver='liblinear',\n random_state=config.random_state)),\n scoring=config.decoding_metric, n_jobs=config.N_JOBS)\n- cv = StratifiedKFold(random_state=config.random_state,\n- n_splits=config.decoding_n_splits)\n- scores = cross_val_multiscore(se, X=X, y=y, cv=cv)\n+ scores = cross_val_multiscore(se, X=X, y=y, cv=config.decoding_n_splits)\n \n # let's save the scores now\n a_vs_b = '%s_vs_%s' % (condition1, condition2)\ndiff --git a/11-make_cov.py b/11-make_cov.py\n--- a/11-make_cov.py\n+++ b/11-make_cov.py\n@@ -50,13 +50,8 @@\n session=session))\n \n epochs = mne.read_epochs(epo_fname, preload=True)\n-\n- # Do not shuffle the data before splitting into train and test samples.\n- # Perform a block cross-validation instead to maintain autocorrelated\n- # noise.\n- cv = KFold(3, shuffle=False)\n cov = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method='shrunk',\n- cv=cv, rank='info')\n+ rank='info')\n cov.save(cov_fname)\n \n \n@@ -88,13 +83,7 @@\n session=session))\n \n raw_er = mne.io.read_raw_fif(raw_er_fname, preload=True, **extra_params)\n-\n- # Do not shuffle the data before splitting into train and test samples.\n- # Perform a block cross-validation instead to maintain autocorrelated\n- # noise.\n- cv = KFold(3, shuffle=False)\n- cov = mne.compute_raw_covariance(raw_er, method='shrunk', cv=cv,\n- rank='info')\n+ cov = mne.compute_raw_covariance(raw_er, method='shrunk', rank='info')\n cov.save(cov_fname)\n", "issue": "StratifiedKFold with shuffling in the decoding step?\nCurrently we create the cross-validation object in our decoding step (08) of the pipeline via:\r\nhttps://github.com/mne-tools/mne-study-template/blob/b61a5ca66aaef1f631d7ce2def3b1cde5d611729/08-sliding_estimator.py#L80-L81\r\n\r\nBy default, `StratifiedKFold` does not shuffle, meaning that the passed `random_state` doesn't have any effect (it produces a warning, though).\r\n\r\nSo \u2013 should we enable shuffling? Intuitively I would say yes, but want to hear your opinion, @agramfort\n", "before_files": [{"content": "\"\"\"\n==================================\n08. Baseline covariance estimation\n==================================\n\nCovariance matrices are computed and saved.\n\"\"\"\n\nimport os.path as op\nimport itertools\nimport logging\n\nimport mne\nfrom mne.parallel import parallel_func\nfrom mne_bids import make_bids_basename\n\nfrom sklearn.model_selection import KFold\n\nimport config\nfrom config import gen_log_message, on_error, failsafe_run\n\nlogger = logging.getLogger('mne-study-template')\n\n\ndef compute_cov_from_epochs(subject, session, tmin, tmax):\n deriv_path = config.get_subject_deriv_path(subject=subject,\n session=session,\n kind=config.get_kind())\n\n bids_basename = make_bids_basename(subject=subject,\n session=session,\n task=config.get_task(),\n acquisition=config.acq,\n run=None,\n processing=config.proc,\n recording=config.rec,\n space=config.space)\n\n if config.use_ica or config.use_ssp:\n extension = '_cleaned-epo'\n else:\n extension = '-epo'\n\n epo_fname = op.join(deriv_path, bids_basename + '%s.fif' % extension)\n cov_fname = op.join(deriv_path, bids_basename + '-cov.fif')\n\n msg = (f\"Computing regularized covariance based on epochs' baseline \"\n f\"periods. Input: {epo_fname}, Output: {cov_fname}\")\n logger.info(gen_log_message(message=msg, step=11, subject=subject,\n session=session))\n\n epochs = mne.read_epochs(epo_fname, preload=True)\n\n # Do not shuffle the data before splitting into train and test samples.\n # Perform a block cross-validation instead to maintain autocorrelated\n # noise.\n cv = KFold(3, shuffle=False)\n cov = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax, method='shrunk',\n cv=cv, rank='info')\n cov.save(cov_fname)\n\n\ndef compute_cov_from_empty_room(subject, session):\n deriv_path = config.get_subject_deriv_path(subject=subject,\n session=session,\n kind=config.get_kind())\n\n bids_basename = make_bids_basename(subject=subject,\n session=session,\n task=config.get_task(),\n acquisition=config.acq,\n run=None,\n processing=config.proc,\n recording=config.rec,\n space=config.space)\n\n raw_er_fname = op.join(deriv_path,\n bids_basename + '_emptyroom_filt_raw.fif')\n cov_fname = op.join(deriv_path, bids_basename + '-cov.fif')\n\n extra_params = dict()\n if not config.use_maxwell_filter and config.allow_maxshield:\n extra_params['allow_maxshield'] = config.allow_maxshield\n\n msg = (f'Computing regularized covariance based on empty-room recording. '\n f'Input: {raw_er_fname}, Output: {cov_fname}')\n logger.info(gen_log_message(message=msg, step=11, subject=subject,\n session=session))\n\n raw_er = mne.io.read_raw_fif(raw_er_fname, preload=True, **extra_params)\n\n # Do not shuffle the data before splitting into train and test samples.\n # Perform a block cross-validation instead to maintain autocorrelated\n # noise.\n cv = KFold(3, shuffle=False)\n cov = mne.compute_raw_covariance(raw_er, method='shrunk', cv=cv,\n rank='info')\n cov.save(cov_fname)\n\n\n@failsafe_run(on_error=on_error)\ndef run_covariance(subject, session=None):\n if config.noise_cov == 'emptyroom' and 'eeg' not in config.ch_types:\n compute_cov_from_empty_room(subject=subject, session=session)\n else:\n tmin, tmax = config.noise_cov\n compute_cov_from_epochs(subject=subject, session=session, tmin=tmin,\n tmax=tmax)\n\n\ndef main():\n \"\"\"Run cov.\"\"\"\n msg = 'Running Step 11: Estimate noise covariance'\n logger.info(gen_log_message(step=11, message=msg))\n\n parallel, run_func, _ = parallel_func(run_covariance, n_jobs=config.N_JOBS)\n parallel(run_func(subject, session) for subject, session in\n itertools.product(config.get_subjects(), config.get_sessions()))\n\n msg = 'Completed Step 11: Estimate noise covariance'\n logger.info(gen_log_message(step=11, message=msg))\n\n\nif __name__ == '__main__':\n main()\n", "path": "11-make_cov.py"}, {"content": "\"\"\"\n=================\nSliding estimator\n=================\n\nA sliding estimator fits a logistic regression model for every time point.\nIn this example, we contrast the condition 'famous' against 'scrambled'\nusing this approach. The end result is an averaging effect across sensors.\nThe contrast across different sensors are combined into a single plot.\n\n\"\"\" # noqa: E501\n\n###############################################################################\n# Let us first import the libraries\n\nimport os.path as op\nimport logging\n\nimport numpy as np\nfrom scipy.io import savemat\n\nimport mne\nfrom mne.decoding import SlidingEstimator, cross_val_multiscore\n\nfrom mne_bids import make_bids_basename\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\n\nimport config\nfrom config import gen_log_message, on_error, failsafe_run\n\nlogger = logging.getLogger('mne-study-template')\n\n\n###############################################################################\n# Then we write a function to do time decoding on one subject\n\n@failsafe_run(on_error=on_error)\ndef run_time_decoding(subject, condition1, condition2, session=None):\n msg = f'Contrasting conditions: {condition1} \u2013 {condition2}'\n logger.info(gen_log_message(message=msg, step=8, subject=subject,\n session=session))\n\n deriv_path = config.get_subject_deriv_path(subject=subject,\n session=session,\n kind=config.get_kind())\n\n bids_basename = make_bids_basename(subject=subject,\n session=session,\n task=config.get_task(),\n acquisition=config.acq,\n run=None,\n processing=config.proc,\n recording=config.rec,\n space=config.space)\n\n fname_in = op.join(deriv_path, bids_basename + '-epo.fif')\n epochs = mne.read_epochs(fname_in)\n\n # We define the epochs and the labels\n epochs = mne.concatenate_epochs([epochs[condition1],\n epochs[condition2]])\n epochs.apply_baseline()\n\n # Get the data and labels\n X = epochs.get_data()\n n_cond1 = len(epochs[condition1])\n n_cond2 = len(epochs[condition2])\n y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]\n\n # Use AUC because chance level is same regardless of the class balance\n se = SlidingEstimator(\n make_pipeline(StandardScaler(),\n LogisticRegression(solver='liblinear',\n random_state=config.random_state)),\n scoring=config.decoding_metric, n_jobs=config.N_JOBS)\n cv = StratifiedKFold(random_state=config.random_state,\n n_splits=config.decoding_n_splits)\n scores = cross_val_multiscore(se, X=X, y=y, cv=cv)\n\n # let's save the scores now\n a_vs_b = '%s_vs_%s' % (condition1, condition2)\n a_vs_b = a_vs_b.replace(op.sep, '')\n fname_td = op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME,\n '%s_%s_%s_%s.mat' %\n (subject, config.study_name, a_vs_b,\n config.decoding_metric))\n savemat(fname_td, {'scores': scores, 'times': epochs.times})\n\n\ndef main():\n \"\"\"Run sliding estimator.\"\"\"\n msg = 'Running Step 8: Sliding estimator'\n logger.info(gen_log_message(step=8, message=msg))\n\n # Here we go parallel inside the :class:`mne.decoding.SlidingEstimator`\n # so we don't dispatch manually to multiple jobs.\n for subject in config.get_subjects():\n for session in config.get_sessions():\n for conditions in config.decoding_conditions:\n run_time_decoding(subject, *conditions, session=session)\n\n msg = 'Completed Step 8: Sliding estimator'\n logger.info(gen_log_message(step=8, message=msg))\n\n\nif __name__ == '__main__':\n main()\n", "path": "08-sliding_estimator.py"}]} | 3,024 | 490 |
gh_patches_debug_17162 | rasdani/github-patches | git_diff | web2py__web2py-1602 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
webclient lose the session when session.forget is used
When `session.forget` is added to an action like `index` we can observe that on subsequent call a logged user is logged out.
To reproduce, modify `TestWeb.testRegisterAndLogin` in gluon/tests/test_web.py (tryed on commit e6a3081b42ecd58441419930266baf286561c4c7) so as adding `session.forget(response)` at the first line of the `index` controller.
The test is run with :
```
python2 -m unittest -v gluon.tests.test_web.TestWeb.testRegisterAndLogin
```
And fails with :
```
======================================================================
FAIL: testRegisterAndLogin (gluon.tests.test_web.TestWeb)
----------------------------------------------------------------------
Traceback (most recent call last):
File "gluon/tests/test_web.py", line 92, in testRegisterAndLogin
self.assertTrue('Welcome Homer' in client.text)
AssertionError: False is not true
----------------------------------------------------------------------
```
Which seems unexpected according to the google group exchanges about this subject.
I'd like to help, but I have only a rough idea of the expected behaviour of a browser with respect to cookies, sessions and headers… Link to documentation is welcomed.
</issue>
<code>
[start of gluon/contrib/webclient.py]
1 """
2 Developed by Massimo Di Pierro
3 Released under the web2py license (LGPL)
4
5 It an interface on top of urllib2 which simplifies scripting of http requests
6 mostly for testing purposes
7
8 - customizable
9 - supports basic auth
10 - supports cookies
11 - supports session cookies (tested with web2py sessions)
12 - detects broken session
13 - detects web2py form postbacks and handles formname and formkey
14 - detects web2py tickets
15
16 Some examples at the bottom.
17 """
18 from __future__ import print_function
19 from gluon._compat import urllib2, cookielib, iteritems, to_native, urlencode, to_bytes
20 import re
21 import time
22
23
24 DEFAULT_HEADERS = {
25 'user-agent': 'Mozilla/4.0', # some servers are picky
26 'accept-language': 'en',
27 }
28
29 FORM_REGEX = re.compile('(\<input name\="_formkey" type\="hidden" value\="(?P<formkey>.+?)" \/\>)?\<input name\="_formname" type\="hidden" value\="(?P<formname>.+?)" \/\>')
30
31 SESSION_REGEX = 'session_id_(?P<name>.+)'
32
33
34 class WebClient(object):
35
36 def __init__(self,
37 app='',
38 postbacks=True,
39 default_headers=DEFAULT_HEADERS,
40 session_regex=SESSION_REGEX):
41 self.app = app
42 self.postbacks = postbacks
43 self.forms = {}
44 self.history = []
45 self.cookies = {}
46 self.default_headers = default_headers
47 self.sessions = {}
48 self.session_regex = session_regex and re.compile(session_regex)
49 self.headers = {}
50
51 def _parse_headers_in_cookies(self):
52 self.cookies = {}
53 if 'set-cookie' in self.headers:
54 for item in self.headers['set-cookie'].split(','):
55 cookie = item[:item.find(';')]
56 pos = cookie.find('=')
57 key = cookie[:pos]
58 value = cookie[pos+1:]
59 self.cookies[key.strip()] = value.strip()
60
61 def get(self, url, cookies=None, headers=None, auth=None):
62 return self.post(url, data=None, cookies=cookies,
63 headers=headers, method='GET')
64
65 def post(self, url, data=None, cookies=None,
66 headers=None, auth=None, method='auto'):
67 self.url = self.app + url
68
69 # if this POST form requires a postback do it
70 if data and '_formname' in data and self.postbacks and \
71 self.history and self.history[-1][1] != self.url:
72 # to bypass the web2py CSRF need to get formkey
73 # before submitting the form
74 self.get(url, cookies=cookies, headers=headers, auth=auth)
75
76 # unless cookies are specified, recycle cookies
77 if cookies is None:
78 cookies = self.cookies
79 cookies = cookies or {}
80 headers = headers or {}
81
82 cj = cookielib.CookieJar()
83 args = [
84 urllib2.HTTPCookieProcessor(cj),
85 urllib2.HTTPHandler(debuglevel=0)
86 ]
87 # if required do basic auth
88 if auth:
89 auth_handler = urllib2.HTTPBasicAuthHandler()
90 auth_handler.add_password(**auth)
91 args.append(auth_handler)
92
93 opener = urllib2.build_opener(*args)
94
95 # copy headers from dict to list of key,value
96 headers_list = []
97 for key, value in iteritems(self.default_headers):
98 if not key in headers:
99 headers[key] = value
100 for key, value in iteritems(headers):
101 if isinstance(value, (list, tuple)):
102 for v in value:
103 headers_list.append((key, v))
104 else:
105 headers_list.append((key, value))
106
107 # move cookies to headers
108 for key, value in iteritems(cookies):
109 headers_list.append(('Cookie', '%s=%s' % (key, value)))
110
111 # add headers to request
112 for key, value in headers_list:
113 opener.addheaders.append((key, str(value)))
114
115 # assume everything is ok and make http request
116 error = None
117 try:
118 if isinstance(data, str):
119 self.method = 'POST' if method=='auto' else method
120 elif isinstance(data, dict):
121 self.method = 'POST' if method=='auto' else method
122 # if there is only one form, set _formname automatically
123 if not '_formname' in data and len(self.forms) == 1:
124 data['_formname'] = self.forms.keys()[0]
125
126 # if there is no formkey but it is known, set it
127 if '_formname' in data and not '_formkey' in data and \
128 data['_formname'] in self.forms:
129 data['_formkey'] = self.forms[data['_formname']]
130
131 # time the POST request
132 data = urlencode(data, doseq=True)
133 else:
134 self.method = 'GET' if method=='auto' else method
135 data = None
136 t0 = time.time()
137 self.response = opener.open(self.url, to_bytes(data))
138 self.time = time.time() - t0
139 except urllib2.HTTPError as er:
140 error = er
141 # catch HTTP errors
142 self.time = time.time() - t0
143 self.response = er
144
145 if hasattr(self.response, 'getcode'):
146 self.status = self.response.getcode()
147 else:#python2.5
148 self.status = None
149
150 self.text = to_native(self.response.read())
151 # In PY3 self.response.headers are case sensitive
152 self.headers = dict()
153 for h in self.response.headers:
154 self.headers[h.lower()] = self.response.headers[h]
155
156 # treat web2py tickets as special types of errors
157 if error is not None:
158 if 'web2py_error' in self.headers:
159 raise RuntimeError(self.headers['web2py_error'])
160 else:
161 raise error
162
163 self._parse_headers_in_cookies()
164
165 # check is a new session id has been issued, symptom of broken session
166 if self.session_regex is not None:
167 for cookie, value in iteritems(self.cookies):
168 match = self.session_regex.match(cookie)
169 if match:
170 name = match.group('name')
171 if name in self.sessions and self.sessions[name] != value:
172 print(RuntimeError('Changed session ID %s' % name))
173 self.sessions[name] = value
174
175 # find all forms and formkeys in page
176 self.forms = {}
177 for match in FORM_REGEX.finditer(to_native(self.text)):
178 self.forms[match.group('formname')] = match.group('formkey')
179
180 # log this request
181 self.history.append((self.method, self.url, self.status, self.time))
182
183
184 def test_web2py_registration_and_login():
185 # from gluon.contrib.webclient import WebClient
186 # start a web2py instance for testing
187
188 client = WebClient('http://127.0.0.1:8000/welcome/default/')
189 client.get('index')
190
191 # register
192 data = dict(first_name='Homer',
193 last_name='Simpson',
194 email='[email protected]',
195 password='test',
196 password_two='test',
197 _formname='register')
198 client.post('user/register', data=data)
199
200 # logout
201 client.get('user/logout')
202
203 # login
204 data = dict(email='[email protected]',
205 password='test',
206 _formname='login')
207 client.post('user/login', data=data)
208
209 # check registration and login were successful
210 client.get('user/profile')
211 assert 'Welcome Homer' in client.text
212
213 # print some variables
214 print('\nsessions:\n', client.sessions)
215 print('\nheaders:\n', client.headers)
216 print('\ncookies:\n', client.cookies)
217 print('\nforms:\n', client.forms)
218 print()
219 for method, url, status, t in client.history:
220 print(method, url, status, t)
221
222 if __name__ == '__main__':
223 test_web2py_registration_and_login()
224
[end of gluon/contrib/webclient.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py
--- a/gluon/contrib/webclient.py
+++ b/gluon/contrib/webclient.py
@@ -43,6 +43,7 @@
self.forms = {}
self.history = []
self.cookies = {}
+ self.cookiejar = cookielib.CookieJar()
self.default_headers = default_headers
self.sessions = {}
self.session_regex = session_regex and re.compile(session_regex)
@@ -79,9 +80,8 @@
cookies = cookies or {}
headers = headers or {}
- cj = cookielib.CookieJar()
args = [
- urllib2.HTTPCookieProcessor(cj),
+ urllib2.HTTPCookieProcessor(self.cookiejar),
urllib2.HTTPHandler(debuglevel=0)
]
# if required do basic auth
| {"golden_diff": "diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py\n--- a/gluon/contrib/webclient.py\n+++ b/gluon/contrib/webclient.py\n@@ -43,6 +43,7 @@\n self.forms = {}\n self.history = []\n self.cookies = {}\n+ self.cookiejar = cookielib.CookieJar()\n self.default_headers = default_headers\n self.sessions = {}\n self.session_regex = session_regex and re.compile(session_regex)\n@@ -79,9 +80,8 @@\n cookies = cookies or {}\n headers = headers or {}\n \n- cj = cookielib.CookieJar()\n args = [\n- urllib2.HTTPCookieProcessor(cj),\n+ urllib2.HTTPCookieProcessor(self.cookiejar),\n urllib2.HTTPHandler(debuglevel=0)\n ]\n # if required do basic auth\n", "issue": "webclient lose the session when session.forget is used\nWhen `session.forget` is added to an action like `index` we can observe that on subsequent call a logged user is logged out.\n\nTo reproduce, modify `TestWeb.testRegisterAndLogin` in gluon/tests/test_web.py (tryed on commit e6a3081b42ecd58441419930266baf286561c4c7) so as adding `session.forget(response)` at the first line of the `index` controller.\n\nThe test is run with :\n\n```\npython2 -m unittest -v gluon.tests.test_web.TestWeb.testRegisterAndLogin\n```\n\nAnd fails with :\n\n```\n======================================================================\nFAIL: testRegisterAndLogin (gluon.tests.test_web.TestWeb)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"gluon/tests/test_web.py\", line 92, in testRegisterAndLogin\n self.assertTrue('Welcome Homer' in client.text)\nAssertionError: False is not true\n\n----------------------------------------------------------------------\n```\n\nWhich seems unexpected according to the google group exchanges about this subject.\n\nI'd like to help, but I have only a rough idea of the expected behaviour of a browser with respect to cookies, sessions and headers\u2026 Link to documentation is welcomed.\n\n", "before_files": [{"content": "\"\"\"\nDeveloped by Massimo Di Pierro\nReleased under the web2py license (LGPL)\n\nIt an interface on top of urllib2 which simplifies scripting of http requests\nmostly for testing purposes\n\n- customizable\n- supports basic auth\n- supports cookies\n- supports session cookies (tested with web2py sessions)\n- detects broken session\n- detects web2py form postbacks and handles formname and formkey\n- detects web2py tickets\n\nSome examples at the bottom.\n\"\"\"\nfrom __future__ import print_function\nfrom gluon._compat import urllib2, cookielib, iteritems, to_native, urlencode, to_bytes\nimport re\nimport time\n\n\nDEFAULT_HEADERS = {\n 'user-agent': 'Mozilla/4.0', # some servers are picky\n 'accept-language': 'en',\n}\n\nFORM_REGEX = re.compile('(\\<input name\\=\"_formkey\" type\\=\"hidden\" value\\=\"(?P<formkey>.+?)\" \\/\\>)?\\<input name\\=\"_formname\" type\\=\"hidden\" value\\=\"(?P<formname>.+?)\" \\/\\>')\n\nSESSION_REGEX = 'session_id_(?P<name>.+)'\n\n\nclass WebClient(object):\n\n def __init__(self,\n app='',\n postbacks=True,\n default_headers=DEFAULT_HEADERS,\n session_regex=SESSION_REGEX):\n self.app = app\n self.postbacks = postbacks\n self.forms = {}\n self.history = []\n self.cookies = {}\n self.default_headers = default_headers\n self.sessions = {}\n self.session_regex = session_regex and re.compile(session_regex)\n self.headers = {}\n\n def _parse_headers_in_cookies(self):\n self.cookies = {}\n if 'set-cookie' in self.headers:\n for item in self.headers['set-cookie'].split(','):\n cookie = item[:item.find(';')]\n pos = cookie.find('=')\n key = cookie[:pos]\n value = cookie[pos+1:]\n self.cookies[key.strip()] = value.strip()\n\n def get(self, url, cookies=None, headers=None, auth=None):\n return self.post(url, data=None, cookies=cookies,\n headers=headers, method='GET')\n\n def post(self, url, data=None, cookies=None,\n headers=None, auth=None, method='auto'):\n self.url = self.app + url\n\n # if this POST form requires a postback do it\n if data and '_formname' in data and self.postbacks and \\\n self.history and self.history[-1][1] != self.url:\n # to bypass the web2py CSRF need to get formkey\n # before submitting the form\n self.get(url, cookies=cookies, headers=headers, auth=auth)\n\n # unless cookies are specified, recycle cookies\n if cookies is None:\n cookies = self.cookies\n cookies = cookies or {}\n headers = headers or {}\n\n cj = cookielib.CookieJar()\n args = [\n urllib2.HTTPCookieProcessor(cj),\n urllib2.HTTPHandler(debuglevel=0)\n ]\n # if required do basic auth\n if auth:\n auth_handler = urllib2.HTTPBasicAuthHandler()\n auth_handler.add_password(**auth)\n args.append(auth_handler)\n\n opener = urllib2.build_opener(*args)\n\n # copy headers from dict to list of key,value\n headers_list = []\n for key, value in iteritems(self.default_headers):\n if not key in headers:\n headers[key] = value\n for key, value in iteritems(headers):\n if isinstance(value, (list, tuple)):\n for v in value:\n headers_list.append((key, v))\n else:\n headers_list.append((key, value))\n\n # move cookies to headers\n for key, value in iteritems(cookies):\n headers_list.append(('Cookie', '%s=%s' % (key, value)))\n\n # add headers to request\n for key, value in headers_list:\n opener.addheaders.append((key, str(value)))\n\n # assume everything is ok and make http request\n error = None\n try:\n if isinstance(data, str):\n self.method = 'POST' if method=='auto' else method\n elif isinstance(data, dict):\n self.method = 'POST' if method=='auto' else method\n # if there is only one form, set _formname automatically\n if not '_formname' in data and len(self.forms) == 1:\n data['_formname'] = self.forms.keys()[0]\n\n # if there is no formkey but it is known, set it\n if '_formname' in data and not '_formkey' in data and \\\n data['_formname'] in self.forms:\n data['_formkey'] = self.forms[data['_formname']]\n\n # time the POST request\n data = urlencode(data, doseq=True)\n else:\n self.method = 'GET' if method=='auto' else method\n data = None\n t0 = time.time()\n self.response = opener.open(self.url, to_bytes(data))\n self.time = time.time() - t0\n except urllib2.HTTPError as er:\n error = er\n # catch HTTP errors\n self.time = time.time() - t0\n self.response = er\n\n if hasattr(self.response, 'getcode'):\n self.status = self.response.getcode()\n else:#python2.5\n self.status = None\n\n self.text = to_native(self.response.read())\n # In PY3 self.response.headers are case sensitive\n self.headers = dict()\n for h in self.response.headers:\n self.headers[h.lower()] = self.response.headers[h]\n\n # treat web2py tickets as special types of errors\n if error is not None:\n if 'web2py_error' in self.headers:\n raise RuntimeError(self.headers['web2py_error'])\n else:\n raise error\n\n self._parse_headers_in_cookies()\n\n # check is a new session id has been issued, symptom of broken session\n if self.session_regex is not None:\n for cookie, value in iteritems(self.cookies):\n match = self.session_regex.match(cookie)\n if match:\n name = match.group('name')\n if name in self.sessions and self.sessions[name] != value:\n print(RuntimeError('Changed session ID %s' % name))\n self.sessions[name] = value\n\n # find all forms and formkeys in page\n self.forms = {}\n for match in FORM_REGEX.finditer(to_native(self.text)):\n self.forms[match.group('formname')] = match.group('formkey')\n\n # log this request\n self.history.append((self.method, self.url, self.status, self.time))\n\n\ndef test_web2py_registration_and_login():\n # from gluon.contrib.webclient import WebClient\n # start a web2py instance for testing\n\n client = WebClient('http://127.0.0.1:8000/welcome/default/')\n client.get('index')\n\n # register\n data = dict(first_name='Homer',\n last_name='Simpson',\n email='[email protected]',\n password='test',\n password_two='test',\n _formname='register')\n client.post('user/register', data=data)\n\n # logout\n client.get('user/logout')\n\n # login\n data = dict(email='[email protected]',\n password='test',\n _formname='login')\n client.post('user/login', data=data)\n\n # check registration and login were successful\n client.get('user/profile')\n assert 'Welcome Homer' in client.text\n\n # print some variables\n print('\\nsessions:\\n', client.sessions)\n print('\\nheaders:\\n', client.headers)\n print('\\ncookies:\\n', client.cookies)\n print('\\nforms:\\n', client.forms)\n print()\n for method, url, status, t in client.history:\n print(method, url, status, t)\n\nif __name__ == '__main__':\n test_web2py_registration_and_login()\n", "path": "gluon/contrib/webclient.py"}]} | 3,128 | 189 |
gh_patches_debug_20384 | rasdani/github-patches | git_diff | wagtail__wagtail-7934 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wagtail 2.16 - Slim Sidebar open only settings
### Issue Summary
When click on any menu item with submenu pages, the settings submenu always opens.
### Steps to Reproduce
1. Open admin page
2. Click on any menu item with a submenu
3. Settings submenu always opens
When click on the pages menu item, both submenus open.
### Technical details
* Python version: 3.9.9.
* Django version: 3.2.12.
* Wagtail version: 2.16
* Browser version: firefox 96.0.3
</issue>
<code>
[start of wagtail/admin/menu.py]
1 from django.forms import Media, MediaDefiningClass
2 from django.forms.utils import flatatt
3 from django.template.loader import render_to_string
4 from django.utils.safestring import mark_safe
5 from django.utils.text import slugify
6
7 from wagtail.admin.ui.sidebar import LinkMenuItem as LinkMenuItemComponent
8 from wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent
9 from wagtail.core import hooks
10
11
12 class MenuItem(metaclass=MediaDefiningClass):
13 template = 'wagtailadmin/shared/menu_item.html'
14
15 def __init__(self, label, url, name=None, classnames='', icon_name='', attrs=None, order=1000):
16 self.label = label
17 self.url = url
18 self.classnames = classnames
19 self.icon_name = icon_name
20 self.name = (name or slugify(str(label)))
21 self.order = order
22
23 if attrs:
24 self.attr_string = flatatt(attrs)
25 else:
26 self.attr_string = ""
27
28 def is_shown(self, request):
29 """
30 Whether this menu item should be shown for the given request; permission
31 checks etc should go here. By default, menu items are shown all the time
32 """
33 return True
34
35 def is_active(self, request):
36 return request.path.startswith(str(self.url))
37
38 def get_context(self, request):
39 """Defines context for the template, overridable to use more data"""
40 return {
41 'name': self.name,
42 'url': self.url,
43 'classnames': self.classnames,
44 'icon_name': self.icon_name,
45 'attr_string': self.attr_string,
46 'label': self.label,
47 'active': self.is_active(request)
48 }
49
50 def render_html(self, request):
51 context = self.get_context(request)
52 return render_to_string(self.template, context, request=request)
53
54 def render_component(self, request):
55 return LinkMenuItemComponent(self.name, self.label, self.url, icon_name=self.icon_name, classnames=self.classnames)
56
57
58 class Menu:
59 def __init__(self, register_hook_name, construct_hook_name=None):
60 self.register_hook_name = register_hook_name
61 self.construct_hook_name = construct_hook_name
62 # _registered_menu_items will be populated on first access to the
63 # registered_menu_items property. We can't populate it in __init__ because
64 # we can't rely on all hooks modules to have been imported at the point that
65 # we create the admin_menu and settings_menu instances
66 self._registered_menu_items = None
67
68 @property
69 def registered_menu_items(self):
70 if self._registered_menu_items is None:
71 self._registered_menu_items = [fn() for fn in hooks.get_hooks(self.register_hook_name)]
72 return self._registered_menu_items
73
74 def menu_items_for_request(self, request):
75 items = [item for item in self.registered_menu_items if item.is_shown(request)]
76
77 # provide a hook for modifying the menu, if construct_hook_name has been set
78 if self.construct_hook_name:
79 for fn in hooks.get_hooks(self.construct_hook_name):
80 fn(request, items)
81
82 return items
83
84 def active_menu_items(self, request):
85 return [item for item in self.menu_items_for_request(request) if item.is_active(request)]
86
87 @property
88 def media(self):
89 media = Media()
90 for item in self.registered_menu_items:
91 media += item.media
92 return media
93
94 def render_html(self, request):
95 menu_items = self.menu_items_for_request(request)
96 rendered_menu_items = []
97 for item in sorted(menu_items, key=lambda i: i.order):
98 rendered_menu_items.append(item.render_html(request))
99 return mark_safe(''.join(rendered_menu_items))
100
101 def render_component(self, request):
102 menu_items = self.menu_items_for_request(request)
103 rendered_menu_items = []
104 for item in sorted(menu_items, key=lambda i: i.order):
105 rendered_menu_items.append(item.render_component(request))
106 return rendered_menu_items
107
108
109 class SubmenuMenuItem(MenuItem):
110 template = 'wagtailadmin/shared/menu_submenu_item.html'
111
112 """A MenuItem which wraps an inner Menu object"""
113 def __init__(self, label, menu, **kwargs):
114 self.menu = menu
115 super().__init__(label, '#', **kwargs)
116
117 def is_shown(self, request):
118 # show the submenu if one or more of its children is shown
119 return bool(self.menu.menu_items_for_request(request))
120
121 def is_active(self, request):
122 return bool(self.menu.active_menu_items(request))
123
124 def get_context(self, request):
125 context = super().get_context(request)
126 context['menu_html'] = self.menu.render_html(request)
127 context['request'] = request
128 return context
129
130 def render_component(self, request):
131 return SubMenuItemComponent(self.name, self.label, self.menu.render_component(request), icon_name=self.icon_name, classnames=self.classnames)
132
133
134 class AdminOnlyMenuItem(MenuItem):
135 """A MenuItem which is only shown to superusers"""
136
137 def is_shown(self, request):
138 return request.user.is_superuser
139
140
141 admin_menu = Menu(register_hook_name='register_admin_menu_item', construct_hook_name='construct_main_menu')
142 settings_menu = Menu(register_hook_name='register_settings_menu_item', construct_hook_name='construct_settings_menu')
143 reports_menu = Menu(register_hook_name='register_reports_menu_item', construct_hook_name='construct_reports_menu')
144
[end of wagtail/admin/menu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/admin/menu.py b/wagtail/admin/menu.py
--- a/wagtail/admin/menu.py
+++ b/wagtail/admin/menu.py
@@ -2,11 +2,11 @@
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
-from django.utils.text import slugify
from wagtail.admin.ui.sidebar import LinkMenuItem as LinkMenuItemComponent
from wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent
from wagtail.core import hooks
+from wagtail.core.utils import cautious_slugify
class MenuItem(metaclass=MediaDefiningClass):
@@ -17,7 +17,7 @@
self.url = url
self.classnames = classnames
self.icon_name = icon_name
- self.name = (name or slugify(str(label)))
+ self.name = (name or cautious_slugify(str(label)))
self.order = order
if attrs:
| {"golden_diff": "diff --git a/wagtail/admin/menu.py b/wagtail/admin/menu.py\n--- a/wagtail/admin/menu.py\n+++ b/wagtail/admin/menu.py\n@@ -2,11 +2,11 @@\n from django.forms.utils import flatatt\n from django.template.loader import render_to_string\n from django.utils.safestring import mark_safe\n-from django.utils.text import slugify\n \n from wagtail.admin.ui.sidebar import LinkMenuItem as LinkMenuItemComponent\n from wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent\n from wagtail.core import hooks\n+from wagtail.core.utils import cautious_slugify\n \n \n class MenuItem(metaclass=MediaDefiningClass):\n@@ -17,7 +17,7 @@\n self.url = url\n self.classnames = classnames\n self.icon_name = icon_name\n- self.name = (name or slugify(str(label)))\n+ self.name = (name or cautious_slugify(str(label)))\n self.order = order\n \n if attrs:\n", "issue": "Wagtail 2.16 - Slim Sidebar open only settings\n### Issue Summary\r\nWhen click on any menu item with submenu pages, the settings submenu always opens. \r\n\r\n### Steps to Reproduce\r\n\r\n1. Open admin page\r\n2. Click on any menu item with a submenu \r\n3. Settings submenu always opens\r\n\r\nWhen click on the pages menu item, both submenus open. \r\n\r\n### Technical details\r\n\r\n* Python version: 3.9.9.\r\n* Django version: 3.2.12.\r\n* Wagtail version: 2.16\r\n* Browser version: firefox 96.0.3\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.forms import Media, MediaDefiningClass\nfrom django.forms.utils import flatatt\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import slugify\n\nfrom wagtail.admin.ui.sidebar import LinkMenuItem as LinkMenuItemComponent\nfrom wagtail.admin.ui.sidebar import SubMenuItem as SubMenuItemComponent\nfrom wagtail.core import hooks\n\n\nclass MenuItem(metaclass=MediaDefiningClass):\n template = 'wagtailadmin/shared/menu_item.html'\n\n def __init__(self, label, url, name=None, classnames='', icon_name='', attrs=None, order=1000):\n self.label = label\n self.url = url\n self.classnames = classnames\n self.icon_name = icon_name\n self.name = (name or slugify(str(label)))\n self.order = order\n\n if attrs:\n self.attr_string = flatatt(attrs)\n else:\n self.attr_string = \"\"\n\n def is_shown(self, request):\n \"\"\"\n Whether this menu item should be shown for the given request; permission\n checks etc should go here. By default, menu items are shown all the time\n \"\"\"\n return True\n\n def is_active(self, request):\n return request.path.startswith(str(self.url))\n\n def get_context(self, request):\n \"\"\"Defines context for the template, overridable to use more data\"\"\"\n return {\n 'name': self.name,\n 'url': self.url,\n 'classnames': self.classnames,\n 'icon_name': self.icon_name,\n 'attr_string': self.attr_string,\n 'label': self.label,\n 'active': self.is_active(request)\n }\n\n def render_html(self, request):\n context = self.get_context(request)\n return render_to_string(self.template, context, request=request)\n\n def render_component(self, request):\n return LinkMenuItemComponent(self.name, self.label, self.url, icon_name=self.icon_name, classnames=self.classnames)\n\n\nclass Menu:\n def __init__(self, register_hook_name, construct_hook_name=None):\n self.register_hook_name = register_hook_name\n self.construct_hook_name = construct_hook_name\n # _registered_menu_items will be populated on first access to the\n # registered_menu_items property. We can't populate it in __init__ because\n # we can't rely on all hooks modules to have been imported at the point that\n # we create the admin_menu and settings_menu instances\n self._registered_menu_items = None\n\n @property\n def registered_menu_items(self):\n if self._registered_menu_items is None:\n self._registered_menu_items = [fn() for fn in hooks.get_hooks(self.register_hook_name)]\n return self._registered_menu_items\n\n def menu_items_for_request(self, request):\n items = [item for item in self.registered_menu_items if item.is_shown(request)]\n\n # provide a hook for modifying the menu, if construct_hook_name has been set\n if self.construct_hook_name:\n for fn in hooks.get_hooks(self.construct_hook_name):\n fn(request, items)\n\n return items\n\n def active_menu_items(self, request):\n return [item for item in self.menu_items_for_request(request) if item.is_active(request)]\n\n @property\n def media(self):\n media = Media()\n for item in self.registered_menu_items:\n media += item.media\n return media\n\n def render_html(self, request):\n menu_items = self.menu_items_for_request(request)\n rendered_menu_items = []\n for item in sorted(menu_items, key=lambda i: i.order):\n rendered_menu_items.append(item.render_html(request))\n return mark_safe(''.join(rendered_menu_items))\n\n def render_component(self, request):\n menu_items = self.menu_items_for_request(request)\n rendered_menu_items = []\n for item in sorted(menu_items, key=lambda i: i.order):\n rendered_menu_items.append(item.render_component(request))\n return rendered_menu_items\n\n\nclass SubmenuMenuItem(MenuItem):\n template = 'wagtailadmin/shared/menu_submenu_item.html'\n\n \"\"\"A MenuItem which wraps an inner Menu object\"\"\"\n def __init__(self, label, menu, **kwargs):\n self.menu = menu\n super().__init__(label, '#', **kwargs)\n\n def is_shown(self, request):\n # show the submenu if one or more of its children is shown\n return bool(self.menu.menu_items_for_request(request))\n\n def is_active(self, request):\n return bool(self.menu.active_menu_items(request))\n\n def get_context(self, request):\n context = super().get_context(request)\n context['menu_html'] = self.menu.render_html(request)\n context['request'] = request\n return context\n\n def render_component(self, request):\n return SubMenuItemComponent(self.name, self.label, self.menu.render_component(request), icon_name=self.icon_name, classnames=self.classnames)\n\n\nclass AdminOnlyMenuItem(MenuItem):\n \"\"\"A MenuItem which is only shown to superusers\"\"\"\n\n def is_shown(self, request):\n return request.user.is_superuser\n\n\nadmin_menu = Menu(register_hook_name='register_admin_menu_item', construct_hook_name='construct_main_menu')\nsettings_menu = Menu(register_hook_name='register_settings_menu_item', construct_hook_name='construct_settings_menu')\nreports_menu = Menu(register_hook_name='register_reports_menu_item', construct_hook_name='construct_reports_menu')\n", "path": "wagtail/admin/menu.py"}]} | 2,154 | 212 |
gh_patches_debug_35858 | rasdani/github-patches | git_diff | pantsbuild__pants-15550 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
minor config-related nit with anonymous telemetry
```
10:17:08.49 [ERROR] Please set `repo_id = "<uuid>"` in the [anonymous-telemetry] section of pants.toml, where `<uuid>` is some fixed random identifier, such as one generated by uuidgen. No telemetry will be sent for this run. See https://www.pantsbuild.org/v2.7/docs/anonymous-telemetry for details.
```
Pants should just generate a UUID and display it in the message as the suggestion.
</issue>
<code>
[start of src/python/pants/goal/anonymous_telemetry.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import json
7 import logging
8 import re
9 import uuid
10 from typing import cast
11
12 from humbug.consent import HumbugConsent
13 from humbug.report import HumbugReporter, Modes, Report
14
15 from pants.engine.internals.scheduler import Workunit
16 from pants.engine.rules import collect_rules, rule
17 from pants.engine.streaming_workunit_handler import (
18 StreamingWorkunitContext,
19 WorkunitsCallback,
20 WorkunitsCallbackFactory,
21 WorkunitsCallbackFactoryRequest,
22 )
23 from pants.engine.unions import UnionRule
24 from pants.option.option_types import BoolOption, StrOption
25 from pants.option.subsystem import Subsystem
26 from pants.util.docutil import doc_url
27 from pants.util.strutil import softwrap
28
29 logger = logging.getLogger(__name__)
30
31
32 _bugout_access_token = "974b1acc-e052-4e5f-a45e-bae928e47bb0"
33 _telemetry_docs_referral = f"See {doc_url('anonymous-telemetry')} for details"
34
35
36 class AnonymousTelemetry(Subsystem):
37 options_scope = "anonymous-telemetry"
38 help = "Options related to sending anonymous stats to the Pants project, to aid development."
39
40 enabled = BoolOption(
41 "--enabled",
42 default=False,
43 help=softwrap(
44 f"""
45 Whether to send anonymous telemetry to the Pants project.
46
47 Telemetry is sent asynchronously, with silent failure, and does not impact build times
48 or outcomes.
49
50 {_telemetry_docs_referral}.
51 """
52 ),
53 advanced=True,
54 )
55 repo_id = StrOption(
56 "--repo-id",
57 default=None,
58 help=softwrap(
59 f"""
60 An anonymized ID representing this repo.
61
62 For private repos, you likely want the ID to not be derived from, or algorithmically
63 convertible to, anything identifying the repo.
64
65 For public repos the ID may be visible in that repo's config file, so anonymity of the
66 repo is not guaranteed (although user anonymity is always guaranteed).
67
68 {_telemetry_docs_referral}.
69 """
70 ),
71 advanced=True,
72 )
73
74
75 class AnonymousTelemetryCallback(WorkunitsCallback):
76 def __init__(self, unhashed_repo_id: str) -> None:
77 super().__init__()
78 self._unhashed_repo_id = unhashed_repo_id
79
80 # Broken out into a staticmethod for testing.
81 @staticmethod
82 def validate_repo_id(unhashed_repo_id: str) -> bool:
83 return re.match(r"^[a-zA-Z0-9-_]{30,60}$", unhashed_repo_id) is not None
84
85 @property
86 def can_finish_async(self) -> bool:
87 # Because we don't log anything, it's safe to finish in the background.
88 return True
89
90 def __call__(
91 self,
92 *,
93 started_workunits: tuple[Workunit, ...],
94 completed_workunits: tuple[Workunit, ...],
95 finished: bool,
96 context: StreamingWorkunitContext,
97 ) -> None:
98 if not finished:
99 return
100
101 # Assemble and send the telemetry.
102 # Note that this method is called with finished=True only after the
103 # StreamingWorkunitHandler context ends, i.e., after end_run() has been called,
104 # so the RunTracker will have had a chance to finalize its state.
105 telemetry_data = context.run_tracker.get_anonymous_telemetry_data(self._unhashed_repo_id)
106 # TODO: Add information about any errors that occurred.
107
108 reporter = HumbugReporter(
109 name="pantsbuild/pants",
110 # We've already established consent at this point.
111 consent=HumbugConsent(True),
112 session_id=str(telemetry_data.get("run_id", uuid.uuid4())),
113 bugout_token=_bugout_access_token,
114 timeout_seconds=5,
115 # We don't want to spawn a thread in the engine, and we're
116 # already running in a background thread in pantsd.
117 mode=Modes.SYNCHRONOUS,
118 )
119
120 # This is copied from humbug code, to ensure that future changes to humbug
121 # don't add tags that inadvertently violate our anonymity promise.
122 system_tags = [
123 f"source:{reporter.name}",
124 f"os:{reporter.system_information.os}",
125 f"arch:{reporter.system_information.machine}",
126 f"python:{reporter.system_information.python_version_major}",
127 "python:{}.{}".format(
128 reporter.system_information.python_version_major,
129 reporter.system_information.python_version_minor,
130 ),
131 f"python:{reporter.system_information.python_version}",
132 f"session:{reporter.session_id}",
133 ]
134 tags = (
135 system_tags
136 + [
137 f"pants_version:{telemetry_data.get('pants_version')}",
138 # This is hashed, unlike the contents of the unhashed_repo_id var.
139 f"repo:{telemetry_data.get('repo_id', 'UNKNOWN')}",
140 f"user:{telemetry_data.get('user_id', 'UNKNOWN')}",
141 f"machine:{telemetry_data.get('machine_id', 'UNKNOWN')}",
142 f"duration:{telemetry_data.get('duration', '0')}",
143 f"outcome:{telemetry_data.get('outcome', 'UNKNOWN')}",
144 ]
145 + [f"goal:{goal}" for goal in telemetry_data.get("standard_goals", [])]
146 )
147
148 report = Report(
149 title=f"pants run {reporter.session_id}",
150 tags=tags,
151 content=json.dumps(telemetry_data, sort_keys=True),
152 )
153 reporter.publish(report)
154
155
156 class AnonymousTelemetryCallbackFactoryRequest:
157 """A unique request type that is installed to trigger construction of the WorkunitsCallback."""
158
159
160 @rule
161 def construct_callback(
162 _: AnonymousTelemetryCallbackFactoryRequest, anonymous_telemetry: AnonymousTelemetry
163 ) -> WorkunitsCallbackFactory:
164 enabled = anonymous_telemetry.enabled
165 unhashed_repo_id = anonymous_telemetry.repo_id
166
167 if anonymous_telemetry.options.is_default("enabled"):
168 logger.warning(
169 "Please either set `enabled = true` in the [anonymous-telemetry] section of "
170 "pants.toml to enable sending anonymous stats to the Pants project to aid "
171 "development, or set `enabled = false` to disable it. No telemetry sent "
172 "for this run. An explicit setting will get rid of this message. "
173 f"{_telemetry_docs_referral}."
174 )
175 if enabled:
176 if unhashed_repo_id is None:
177 logger.error(
178 'Please set `repo_id = "<uuid>"` in the [anonymous-telemetry] section '
179 "of pants.toml, where `<uuid>` is some fixed random identifier, such as "
180 "one generated by uuidgen. No telemetry will be sent for this run. "
181 f"{_telemetry_docs_referral}."
182 )
183 enabled = False
184 elif not AnonymousTelemetryCallback.validate_repo_id(unhashed_repo_id):
185 logger.error(
186 "The repo_id option in the [anonymous-telemetry] scope must be between 30 and "
187 "60 characters long, and consist of only alphanumeric characters, dashes "
188 "and underscores."
189 )
190 enabled = False
191
192 return WorkunitsCallbackFactory(
193 lambda: AnonymousTelemetryCallback(cast(str, unhashed_repo_id)) if enabled else None
194 )
195
196
197 def rules():
198 return [
199 UnionRule(WorkunitsCallbackFactoryRequest, AnonymousTelemetryCallbackFactoryRequest),
200 *collect_rules(),
201 ]
202
[end of src/python/pants/goal/anonymous_telemetry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/goal/anonymous_telemetry.py b/src/python/pants/goal/anonymous_telemetry.py
--- a/src/python/pants/goal/anonymous_telemetry.py
+++ b/src/python/pants/goal/anonymous_telemetry.py
@@ -166,26 +166,45 @@
if anonymous_telemetry.options.is_default("enabled"):
logger.warning(
- "Please either set `enabled = true` in the [anonymous-telemetry] section of "
- "pants.toml to enable sending anonymous stats to the Pants project to aid "
- "development, or set `enabled = false` to disable it. No telemetry sent "
- "for this run. An explicit setting will get rid of this message. "
- f"{_telemetry_docs_referral}."
+ softwrap(
+ f"""
+ Please either set `enabled = true` in the [anonymous-telemetry] section of
+ pants.toml to enable sending anonymous stats to the Pants project to aid
+ development, or set `enabled = false` to disable it. No telemetry sent
+ for this run. An explicit setting will get rid of this message.
+ {_telemetry_docs_referral}.
+ """
+ )
)
if enabled:
if unhashed_repo_id is None:
logger.error(
- 'Please set `repo_id = "<uuid>"` in the [anonymous-telemetry] section '
- "of pants.toml, where `<uuid>` is some fixed random identifier, such as "
- "one generated by uuidgen. No telemetry will be sent for this run. "
- f"{_telemetry_docs_referral}."
+ softwrap(
+ f"""
+ Please set `repo_id = "<uuid>"` in the [anonymous-telemetry] section
+ of pants.toml, where `<uuid>` is some fixed random identifier, such as
+ one generated by uuidgen.
+
+ Example (using a randomly generated UUID):
+
+ [anonymous-telemetry]
+ repo_id = "{uuid.uuid4()}"
+
+ No telemetry will be sent for this run.
+ {_telemetry_docs_referral}.
+ """
+ )
)
enabled = False
elif not AnonymousTelemetryCallback.validate_repo_id(unhashed_repo_id):
logger.error(
- "The repo_id option in the [anonymous-telemetry] scope must be between 30 and "
- "60 characters long, and consist of only alphanumeric characters, dashes "
- "and underscores."
+ softwrap(
+ """
+ The repo_id option in the [anonymous-telemetry] scope must be between 30 and
+ 60 characters long, and consist of only alphanumeric characters, dashes
+ and underscores.
+ """
+ )
)
enabled = False
| {"golden_diff": "diff --git a/src/python/pants/goal/anonymous_telemetry.py b/src/python/pants/goal/anonymous_telemetry.py\n--- a/src/python/pants/goal/anonymous_telemetry.py\n+++ b/src/python/pants/goal/anonymous_telemetry.py\n@@ -166,26 +166,45 @@\n \n if anonymous_telemetry.options.is_default(\"enabled\"):\n logger.warning(\n- \"Please either set `enabled = true` in the [anonymous-telemetry] section of \"\n- \"pants.toml to enable sending anonymous stats to the Pants project to aid \"\n- \"development, or set `enabled = false` to disable it. No telemetry sent \"\n- \"for this run. An explicit setting will get rid of this message. \"\n- f\"{_telemetry_docs_referral}.\"\n+ softwrap(\n+ f\"\"\"\n+ Please either set `enabled = true` in the [anonymous-telemetry] section of\n+ pants.toml to enable sending anonymous stats to the Pants project to aid\n+ development, or set `enabled = false` to disable it. No telemetry sent\n+ for this run. An explicit setting will get rid of this message.\n+ {_telemetry_docs_referral}.\n+ \"\"\"\n+ )\n )\n if enabled:\n if unhashed_repo_id is None:\n logger.error(\n- 'Please set `repo_id = \"<uuid>\"` in the [anonymous-telemetry] section '\n- \"of pants.toml, where `<uuid>` is some fixed random identifier, such as \"\n- \"one generated by uuidgen. No telemetry will be sent for this run. \"\n- f\"{_telemetry_docs_referral}.\"\n+ softwrap(\n+ f\"\"\"\n+ Please set `repo_id = \"<uuid>\"` in the [anonymous-telemetry] section\n+ of pants.toml, where `<uuid>` is some fixed random identifier, such as\n+ one generated by uuidgen.\n+\n+ Example (using a randomly generated UUID):\n+\n+ [anonymous-telemetry]\n+ repo_id = \"{uuid.uuid4()}\"\n+\n+ No telemetry will be sent for this run.\n+ {_telemetry_docs_referral}.\n+ \"\"\"\n+ )\n )\n enabled = False\n elif not AnonymousTelemetryCallback.validate_repo_id(unhashed_repo_id):\n logger.error(\n- \"The repo_id option in the [anonymous-telemetry] scope must be between 30 and \"\n- \"60 characters long, and consist of only alphanumeric characters, dashes \"\n- \"and underscores.\"\n+ softwrap(\n+ \"\"\"\n+ The repo_id option in the [anonymous-telemetry] scope must be between 30 and\n+ 60 characters long, and consist of only alphanumeric characters, dashes\n+ and underscores.\n+ \"\"\"\n+ )\n )\n enabled = False\n", "issue": "minor config-related nit with anonymous telemetry\n```\r\n10:17:08.49 [ERROR] Please set `repo_id = \"<uuid>\"` in the [anonymous-telemetry] section of pants.toml, where `<uuid>` is some fixed random identifier, such as one generated by uuidgen. No telemetry will be sent for this run. See https://www.pantsbuild.org/v2.7/docs/anonymous-telemetry for details.\r\n```\r\n\r\nPants should just generate a UUID and display it in the message as the suggestion.\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport json\nimport logging\nimport re\nimport uuid\nfrom typing import cast\n\nfrom humbug.consent import HumbugConsent\nfrom humbug.report import HumbugReporter, Modes, Report\n\nfrom pants.engine.internals.scheduler import Workunit\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.engine.streaming_workunit_handler import (\n StreamingWorkunitContext,\n WorkunitsCallback,\n WorkunitsCallbackFactory,\n WorkunitsCallbackFactoryRequest,\n)\nfrom pants.engine.unions import UnionRule\nfrom pants.option.option_types import BoolOption, StrOption\nfrom pants.option.subsystem import Subsystem\nfrom pants.util.docutil import doc_url\nfrom pants.util.strutil import softwrap\n\nlogger = logging.getLogger(__name__)\n\n\n_bugout_access_token = \"974b1acc-e052-4e5f-a45e-bae928e47bb0\"\n_telemetry_docs_referral = f\"See {doc_url('anonymous-telemetry')} for details\"\n\n\nclass AnonymousTelemetry(Subsystem):\n options_scope = \"anonymous-telemetry\"\n help = \"Options related to sending anonymous stats to the Pants project, to aid development.\"\n\n enabled = BoolOption(\n \"--enabled\",\n default=False,\n help=softwrap(\n f\"\"\"\n Whether to send anonymous telemetry to the Pants project.\n\n Telemetry is sent asynchronously, with silent failure, and does not impact build times\n or outcomes.\n\n {_telemetry_docs_referral}.\n \"\"\"\n ),\n advanced=True,\n )\n repo_id = StrOption(\n \"--repo-id\",\n default=None,\n help=softwrap(\n f\"\"\"\n An anonymized ID representing this repo.\n\n For private repos, you likely want the ID to not be derived from, or algorithmically\n convertible to, anything identifying the repo.\n\n For public repos the ID may be visible in that repo's config file, so anonymity of the\n repo is not guaranteed (although user anonymity is always guaranteed).\n\n {_telemetry_docs_referral}.\n \"\"\"\n ),\n advanced=True,\n )\n\n\nclass AnonymousTelemetryCallback(WorkunitsCallback):\n def __init__(self, unhashed_repo_id: str) -> None:\n super().__init__()\n self._unhashed_repo_id = unhashed_repo_id\n\n # Broken out into a staticmethod for testing.\n @staticmethod\n def validate_repo_id(unhashed_repo_id: str) -> bool:\n return re.match(r\"^[a-zA-Z0-9-_]{30,60}$\", unhashed_repo_id) is not None\n\n @property\n def can_finish_async(self) -> bool:\n # Because we don't log anything, it's safe to finish in the background.\n return True\n\n def __call__(\n self,\n *,\n started_workunits: tuple[Workunit, ...],\n completed_workunits: tuple[Workunit, ...],\n finished: bool,\n context: StreamingWorkunitContext,\n ) -> None:\n if not finished:\n return\n\n # Assemble and send the telemetry.\n # Note that this method is called with finished=True only after the\n # StreamingWorkunitHandler context ends, i.e., after end_run() has been called,\n # so the RunTracker will have had a chance to finalize its state.\n telemetry_data = context.run_tracker.get_anonymous_telemetry_data(self._unhashed_repo_id)\n # TODO: Add information about any errors that occurred.\n\n reporter = HumbugReporter(\n name=\"pantsbuild/pants\",\n # We've already established consent at this point.\n consent=HumbugConsent(True),\n session_id=str(telemetry_data.get(\"run_id\", uuid.uuid4())),\n bugout_token=_bugout_access_token,\n timeout_seconds=5,\n # We don't want to spawn a thread in the engine, and we're\n # already running in a background thread in pantsd.\n mode=Modes.SYNCHRONOUS,\n )\n\n # This is copied from humbug code, to ensure that future changes to humbug\n # don't add tags that inadvertently violate our anonymity promise.\n system_tags = [\n f\"source:{reporter.name}\",\n f\"os:{reporter.system_information.os}\",\n f\"arch:{reporter.system_information.machine}\",\n f\"python:{reporter.system_information.python_version_major}\",\n \"python:{}.{}\".format(\n reporter.system_information.python_version_major,\n reporter.system_information.python_version_minor,\n ),\n f\"python:{reporter.system_information.python_version}\",\n f\"session:{reporter.session_id}\",\n ]\n tags = (\n system_tags\n + [\n f\"pants_version:{telemetry_data.get('pants_version')}\",\n # This is hashed, unlike the contents of the unhashed_repo_id var.\n f\"repo:{telemetry_data.get('repo_id', 'UNKNOWN')}\",\n f\"user:{telemetry_data.get('user_id', 'UNKNOWN')}\",\n f\"machine:{telemetry_data.get('machine_id', 'UNKNOWN')}\",\n f\"duration:{telemetry_data.get('duration', '0')}\",\n f\"outcome:{telemetry_data.get('outcome', 'UNKNOWN')}\",\n ]\n + [f\"goal:{goal}\" for goal in telemetry_data.get(\"standard_goals\", [])]\n )\n\n report = Report(\n title=f\"pants run {reporter.session_id}\",\n tags=tags,\n content=json.dumps(telemetry_data, sort_keys=True),\n )\n reporter.publish(report)\n\n\nclass AnonymousTelemetryCallbackFactoryRequest:\n \"\"\"A unique request type that is installed to trigger construction of the WorkunitsCallback.\"\"\"\n\n\n@rule\ndef construct_callback(\n _: AnonymousTelemetryCallbackFactoryRequest, anonymous_telemetry: AnonymousTelemetry\n) -> WorkunitsCallbackFactory:\n enabled = anonymous_telemetry.enabled\n unhashed_repo_id = anonymous_telemetry.repo_id\n\n if anonymous_telemetry.options.is_default(\"enabled\"):\n logger.warning(\n \"Please either set `enabled = true` in the [anonymous-telemetry] section of \"\n \"pants.toml to enable sending anonymous stats to the Pants project to aid \"\n \"development, or set `enabled = false` to disable it. No telemetry sent \"\n \"for this run. An explicit setting will get rid of this message. \"\n f\"{_telemetry_docs_referral}.\"\n )\n if enabled:\n if unhashed_repo_id is None:\n logger.error(\n 'Please set `repo_id = \"<uuid>\"` in the [anonymous-telemetry] section '\n \"of pants.toml, where `<uuid>` is some fixed random identifier, such as \"\n \"one generated by uuidgen. No telemetry will be sent for this run. \"\n f\"{_telemetry_docs_referral}.\"\n )\n enabled = False\n elif not AnonymousTelemetryCallback.validate_repo_id(unhashed_repo_id):\n logger.error(\n \"The repo_id option in the [anonymous-telemetry] scope must be between 30 and \"\n \"60 characters long, and consist of only alphanumeric characters, dashes \"\n \"and underscores.\"\n )\n enabled = False\n\n return WorkunitsCallbackFactory(\n lambda: AnonymousTelemetryCallback(cast(str, unhashed_repo_id)) if enabled else None\n )\n\n\ndef rules():\n return [\n UnionRule(WorkunitsCallbackFactoryRequest, AnonymousTelemetryCallbackFactoryRequest),\n *collect_rules(),\n ]\n", "path": "src/python/pants/goal/anonymous_telemetry.py"}]} | 2,792 | 623 |
gh_patches_debug_23282 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-2170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'
Recently, I update pytorch to 1.4. When running `tools/dist_train.sh submitted/faster_giou_train_config.py 1 --validate --autoscale-lr --seed 512`, one error raised: "AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'". I found random seed caused this error, when running without `--seed`, it's ok.
I haven't ran script with `--seed` option in pytorch 1.3, so i don't know if it's the pytorch verion.
I wonder if this is a bug or a feature, Thank you in advance!
</issue>
<code>
[start of mmdet/datasets/loader/build_loader.py]
1 import platform
2 import random
3 from functools import partial
4
5 import numpy as np
6 from mmcv.parallel import collate
7 from mmcv.runner import get_dist_info
8 from torch.utils.data import DataLoader
9
10 from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler
11
12 if platform.system() != 'Windows':
13 # https://github.com/pytorch/pytorch/issues/973
14 import resource
15 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
16 resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
17
18
19 def build_dataloader(dataset,
20 imgs_per_gpu,
21 workers_per_gpu,
22 num_gpus=1,
23 dist=True,
24 shuffle=True,
25 seed=None,
26 **kwargs):
27 """Build PyTorch DataLoader.
28
29 In distributed training, each GPU/process has a dataloader.
30 In non-distributed training, there is only one dataloader for all GPUs.
31
32 Args:
33 dataset (Dataset): A PyTorch dataset.
34 imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
35 each GPU.
36 workers_per_gpu (int): How many subprocesses to use for data loading
37 for each GPU.
38 num_gpus (int): Number of GPUs. Only used in non-distributed training.
39 dist (bool): Distributed training/test or not. Default: True.
40 shuffle (bool): Whether to shuffle the data at every epoch.
41 Default: True.
42 kwargs: any keyword argument to be used to initialize DataLoader
43
44 Returns:
45 DataLoader: A PyTorch dataloader.
46 """
47 rank, world_size = get_dist_info()
48 if dist:
49 # DistributedGroupSampler will definitely shuffle the data to satisfy
50 # that images on each GPU are in the same group
51 if shuffle:
52 sampler = DistributedGroupSampler(dataset, imgs_per_gpu,
53 world_size, rank)
54 else:
55 sampler = DistributedSampler(
56 dataset, world_size, rank, shuffle=False)
57 batch_size = imgs_per_gpu
58 num_workers = workers_per_gpu
59 else:
60 sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None
61 batch_size = num_gpus * imgs_per_gpu
62 num_workers = num_gpus * workers_per_gpu
63
64 def worker_init_fn(worker_id):
65 # The seed of each worker equals to
66 # num_worker * rank + worker_id + user_seed
67 worker_seed = num_workers * rank + worker_id + seed
68 np.random.seed(worker_seed)
69 random.seed(worker_seed)
70
71 data_loader = DataLoader(
72 dataset,
73 batch_size=batch_size,
74 sampler=sampler,
75 num_workers=num_workers,
76 collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
77 pin_memory=False,
78 worker_init_fn=worker_init_fn if seed is not None else None,
79 **kwargs)
80
81 return data_loader
82
[end of mmdet/datasets/loader/build_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmdet/datasets/loader/build_loader.py b/mmdet/datasets/loader/build_loader.py
--- a/mmdet/datasets/loader/build_loader.py
+++ b/mmdet/datasets/loader/build_loader.py
@@ -61,12 +61,9 @@
batch_size = num_gpus * imgs_per_gpu
num_workers = num_gpus * workers_per_gpu
- def worker_init_fn(worker_id):
- # The seed of each worker equals to
- # num_worker * rank + worker_id + user_seed
- worker_seed = num_workers * rank + worker_id + seed
- np.random.seed(worker_seed)
- random.seed(worker_seed)
+ init_fn = partial(
+ worker_init_fn, num_workers=num_workers, rank=rank,
+ seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
@@ -75,7 +72,15 @@
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
pin_memory=False,
- worker_init_fn=worker_init_fn if seed is not None else None,
+ worker_init_fn=init_fn,
**kwargs)
return data_loader
+
+
+def worker_init_fn(worker_id, num_workers, rank, seed):
+ # The seed of each worker equals to
+ # num_worker * rank + worker_id + user_seed
+ worker_seed = num_workers * rank + worker_id + seed
+ np.random.seed(worker_seed)
+ random.seed(worker_seed)
| {"golden_diff": "diff --git a/mmdet/datasets/loader/build_loader.py b/mmdet/datasets/loader/build_loader.py\n--- a/mmdet/datasets/loader/build_loader.py\n+++ b/mmdet/datasets/loader/build_loader.py\n@@ -61,12 +61,9 @@\n batch_size = num_gpus * imgs_per_gpu\n num_workers = num_gpus * workers_per_gpu\n \n- def worker_init_fn(worker_id):\n- # The seed of each worker equals to\n- # num_worker * rank + worker_id + user_seed\n- worker_seed = num_workers * rank + worker_id + seed\n- np.random.seed(worker_seed)\n- random.seed(worker_seed)\n+ init_fn = partial(\n+ worker_init_fn, num_workers=num_workers, rank=rank,\n+ seed=seed) if seed is not None else None\n \n data_loader = DataLoader(\n dataset,\n@@ -75,7 +72,15 @@\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),\n pin_memory=False,\n- worker_init_fn=worker_init_fn if seed is not None else None,\n+ worker_init_fn=init_fn,\n **kwargs)\n \n return data_loader\n+\n+\n+def worker_init_fn(worker_id, num_workers, rank, seed):\n+ # The seed of each worker equals to\n+ # num_worker * rank + worker_id + user_seed\n+ worker_seed = num_workers * rank + worker_id + seed\n+ np.random.seed(worker_seed)\n+ random.seed(worker_seed)\n", "issue": "AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'\nRecently, I update pytorch to 1.4. When running `tools/dist_train.sh submitted/faster_giou_train_config.py 1 --validate --autoscale-lr --seed 512`, one error raised: \"AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'\". I found random seed caused this error, when running without `--seed`, it's ok.\r\n\r\nI haven't ran script with `--seed` option in pytorch 1.3, so i don't know if it's the pytorch verion.\r\n\r\nI wonder if this is a bug or a feature, Thank you in advance!\n", "before_files": [{"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom torch.utils.data import DataLoader\n\nfrom .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))\n\n\ndef build_dataloader(dataset,\n imgs_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of\n each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n # DistributedGroupSampler will definitely shuffle the data to satisfy\n # that images on each GPU are in the same group\n if shuffle:\n sampler = DistributedGroupSampler(dataset, imgs_per_gpu,\n world_size, rank)\n else:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=False)\n batch_size = imgs_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None\n batch_size = num_gpus * imgs_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n def worker_init_fn(worker_id):\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),\n pin_memory=False,\n worker_init_fn=worker_init_fn if seed is not None else None,\n **kwargs)\n\n return data_loader\n", "path": "mmdet/datasets/loader/build_loader.py"}]} | 1,477 | 343 |
gh_patches_debug_25026 | rasdani/github-patches | git_diff | dask__distributed-5822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
importing distributed runs 4 `git` subprocesses in CI (when installed with -e)
I noticed that tests that run a dask subprocess are often flakey on CI (especially so on low performance macos runners)
https://github.com/dask/distributed/runs/4922796526?check_suite_focus=true#step:12:1849
This is an example of a process taking more than 5 seconds to boot on a mac in `test_dask_worker::test_memory_limit`:
```pytb
Traceback (most recent call last):
File "/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker", line 33, in <module>
sys.exit(load_entry_point('distributed', 'console_scripts', 'dask-worker')())
File "/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker", line 25, in importlib_load_entry_point
return next(matches).load()
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/metadata.py", line 77, in load
module = import_module(match.group('module'))
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/Users/runner/work/distributed/distributed/distributed/__init__.py", line 49, in <module>
versions = get_versions()
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 534, in get_versions
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 265, in git_pieces_from_vcs
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 78, in run_command
p = subprocess.Popen(
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py", line 951, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py", line 1777, in _execute_child
part = os.read(errpipe_read, 50000)
KeyboardInterrupt
```
</issue>
<code>
[start of distributed/__init__.py]
1 from . import config # isort:skip; load distributed configuration first
2 from . import widgets # isort:skip; load distributed widgets second
3 import dask
4 from dask.config import config # type: ignore
5
6 from ._version import get_versions
7 from .actor import Actor, BaseActorFuture
8 from .client import (
9 Client,
10 CompatibleExecutor,
11 Executor,
12 Future,
13 as_completed,
14 default_client,
15 fire_and_forget,
16 futures_of,
17 get_task_metadata,
18 get_task_stream,
19 performance_report,
20 wait,
21 )
22 from .core import Status, connect, rpc
23 from .deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster
24 from .diagnostics.plugin import (
25 Environ,
26 NannyPlugin,
27 PipInstall,
28 SchedulerPlugin,
29 UploadDirectory,
30 UploadFile,
31 WorkerPlugin,
32 )
33 from .diagnostics.progressbar import progress
34 from .event import Event
35 from .lock import Lock
36 from .multi_lock import MultiLock
37 from .nanny import Nanny
38 from .pubsub import Pub, Sub
39 from .queues import Queue
40 from .scheduler import Scheduler
41 from .security import Security
42 from .semaphore import Semaphore
43 from .threadpoolexecutor import rejoin
44 from .utils import CancelledError, TimeoutError, sync
45 from .variable import Variable
46 from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn
47 from .worker_client import local_client, worker_client
48
49 versions = get_versions()
50 __version__ = versions["version"]
51 __git_revision__ = versions["full-revisionid"]
52 del get_versions, versions
53
[end of distributed/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/__init__.py b/distributed/__init__.py
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -1,10 +1,12 @@
from . import config # isort:skip; load distributed configuration first
from . import widgets # isort:skip; load distributed widgets second
+
+
import dask
from dask.config import config # type: ignore
from ._version import get_versions
-from .actor import Actor, BaseActorFuture
+from .actor import Actor, ActorFuture, BaseActorFuture
from .client import (
Client,
CompatibleExecutor,
@@ -46,7 +48,20 @@
from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn
from .worker_client import local_client, worker_client
-versions = get_versions()
-__version__ = versions["version"]
-__git_revision__ = versions["full-revisionid"]
-del get_versions, versions
+
+def __getattr__(name):
+ global __version__, __git_revision__
+
+ if name == "__version__":
+ from importlib.metadata import version
+
+ __version__ = version("distributed")
+ return __version__
+
+ if name == "__git_revision__":
+ from ._version import get_versions
+
+ __git_revision__ = get_versions()["full-revisionid"]
+ return __git_revision__
+
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| {"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -1,10 +1,12 @@\n from . import config # isort:skip; load distributed configuration first\n from . import widgets # isort:skip; load distributed widgets second\n+\n+\n import dask\n from dask.config import config # type: ignore\n \n from ._version import get_versions\n-from .actor import Actor, BaseActorFuture\n+from .actor import Actor, ActorFuture, BaseActorFuture\n from .client import (\n Client,\n CompatibleExecutor,\n@@ -46,7 +48,20 @@\n from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn\n from .worker_client import local_client, worker_client\n \n-versions = get_versions()\n-__version__ = versions[\"version\"]\n-__git_revision__ = versions[\"full-revisionid\"]\n-del get_versions, versions\n+\n+def __getattr__(name):\n+ global __version__, __git_revision__\n+\n+ if name == \"__version__\":\n+ from importlib.metadata import version\n+\n+ __version__ = version(\"distributed\")\n+ return __version__\n+\n+ if name == \"__git_revision__\":\n+ from ._version import get_versions\n+\n+ __git_revision__ = get_versions()[\"full-revisionid\"]\n+ return __git_revision__\n+\n+ raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "issue": "importing distributed runs 4 `git` subprocesses in CI (when installed with -e)\nI noticed that tests that run a dask subprocess are often flakey on CI (especially so on low performance macos runners)\r\n\r\nhttps://github.com/dask/distributed/runs/4922796526?check_suite_focus=true#step:12:1849\r\nThis is an example of a process taking more than 5 seconds to boot on a mac in `test_dask_worker::test_memory_limit`:\r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker\", line 33, in <module>\r\n sys.exit(load_entry_point('distributed', 'console_scripts', 'dask-worker')())\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker\", line 25, in importlib_load_entry_point\r\n return next(matches).load()\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/metadata.py\", line 77, in load\r\n module = import_module(match.group('module'))\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 972, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 972, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"/Users/runner/work/distributed/distributed/distributed/__init__.py\", line 49, in <module>\r\n versions = get_versions()\r\n File \"/Users/runner/work/distributed/distributed/distributed/_version.py\", line 534, in get_versions\r\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\r\n File \"/Users/runner/work/distributed/distributed/distributed/_version.py\", line 265, in git_pieces_from_vcs\r\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\r\n File \"/Users/runner/work/distributed/distributed/distributed/_version.py\", line 78, in run_command\r\n p = subprocess.Popen(\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py\", line 951, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py\", line 1777, in _execute_child\r\n part = os.read(errpipe_read, 50000)\r\nKeyboardInterrupt\r\n```\n", "before_files": [{"content": "from . import config # isort:skip; load distributed configuration first\nfrom . import widgets # isort:skip; load distributed widgets second\nimport dask\nfrom dask.config import config # type: ignore\n\nfrom ._version import get_versions\nfrom .actor import Actor, BaseActorFuture\nfrom .client import (\n Client,\n CompatibleExecutor,\n Executor,\n Future,\n as_completed,\n default_client,\n fire_and_forget,\n futures_of,\n get_task_metadata,\n get_task_stream,\n performance_report,\n wait,\n)\nfrom .core import Status, connect, rpc\nfrom .deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster\nfrom .diagnostics.plugin import (\n Environ,\n NannyPlugin,\n PipInstall,\n SchedulerPlugin,\n UploadDirectory,\n UploadFile,\n WorkerPlugin,\n)\nfrom .diagnostics.progressbar import progress\nfrom .event import Event\nfrom .lock import Lock\nfrom .multi_lock import MultiLock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .security import Security\nfrom .semaphore import Semaphore\nfrom .threadpoolexecutor import rejoin\nfrom .utils import CancelledError, TimeoutError, sync\nfrom .variable import Variable\nfrom .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn\nfrom .worker_client import local_client, worker_client\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py"}]} | 1,879 | 339 |
gh_patches_debug_12499 | rasdani/github-patches | git_diff | zulip__zulip-16602 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translation for zh_TW gets ignored in some places
In the webapp, if I try switching to the translation for "Chinese (Taiwan)", a lot of the text on the screen is still untranslated:

That's even though many (at least) of those strings [do have translations in Transifex](https://www.transifex.com/zulip/zulip/translate/#zh_TW/$/67194598?q=text%3Asettings). Those translations have been there for months and do indeed seem to be in the repo, so it's not an issue of not having synced them.
I have a suspicion that the issue is with this code in `zerver/lib/i18n.py`:
```py3
def get_language_translation_data(language: str) -> Dict[str, str]:
if language == 'en':
return {}
elif language == 'zh-hans':
language = 'zh_Hans'
elif language == 'zh-hant':
language = 'zh_Hant'
elif language == 'id-id':
language = 'id_ID'
path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')
# …
```
That has a handful of special cases to try to translate between two different conventions for locale names. It sure looks like it'd need another one to support `zh_TW` aka `zh-tw`, and that without that this function will fail to do its job on zh_TW.
Better still, of course, would be to make this function stop being made of special cases. Here's a Django utility function that should do the job of all those cases: https://docs.djangoproject.com/en/2.2/ref/utils/#django.utils.translation.to_locale
---
~~Likely related, but possibly a separate issue: in the webapp language picker itself, the translation shouldn't be called "Chinese (Taiwan)" but rather something like "中文(台湾)" -- its name is written in English, whereas all the other languages have their names written in themselves.~~ (This other symptom is caused at least in part by #14565.)
(Both issues originally reported [in chat](https://chat.zulip.org/#narrow/stream/58-translation/topic/zh-Hant.20.2F.20zh_TW/near/1045033).)
</issue>
<code>
[start of zerver/lib/i18n.py]
1 # See https://zulip.readthedocs.io/en/latest/translating/internationalization.html
2
3 import logging
4 import operator
5 import os
6 from functools import lru_cache
7 from itertools import zip_longest
8 from typing import Any, Dict, List, Optional
9
10 import orjson
11 from django.conf import settings
12 from django.http import HttpRequest
13 from django.utils import translation
14
15
16 @lru_cache()
17 def get_language_list() -> List[Dict[str, Any]]:
18 path = os.path.join(settings.DEPLOY_ROOT, 'locale', 'language_name_map.json')
19 with open(path, "rb") as reader:
20 languages = orjson.loads(reader.read())
21 return languages['name_map']
22
23 def get_language_list_for_templates(default_language: str) -> List[Dict[str, Dict[str, str]]]:
24 language_list = [lang for lang in get_language_list()
25 if 'percent_translated' not in lang or
26 lang['percent_translated'] >= 5.]
27
28 formatted_list = []
29 lang_len = len(language_list)
30 firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)
31 firsts = list(range(0, firsts_end))
32 seconds = list(range(firsts_end, lang_len))
33 assert len(firsts) + len(seconds) == lang_len
34 for row in zip_longest(firsts, seconds):
35 item = {}
36 for position, ind in zip(['first', 'second'], row):
37 if ind is None:
38 continue
39
40 lang = language_list[ind]
41 percent = name = lang['name']
42 if 'percent_translated' in lang:
43 percent = "{} ({}%)".format(name, lang['percent_translated'])
44
45 selected = False
46 if default_language in (lang['code'], lang['locale']):
47 selected = True
48
49 item[position] = {
50 'name': name,
51 'code': lang['code'],
52 'percent': percent,
53 'selected': selected,
54 }
55
56 formatted_list.append(item)
57
58 return formatted_list
59
60 def get_language_name(code: str) -> str:
61 for lang in get_language_list():
62 if code in (lang['code'], lang['locale']):
63 return lang['name']
64 # Log problem, but still return a name
65 logging.error("Unknown language code '%s'", code)
66 return "Unknown"
67
68 def get_available_language_codes() -> List[str]:
69 language_list = get_language_list()
70 codes = [language['code'] for language in language_list]
71 return codes
72
73 def get_language_translation_data(language: str) -> Dict[str, str]:
74 if language == 'en':
75 return {}
76 elif language == 'zh-hans':
77 language = 'zh_Hans'
78 elif language == 'zh-hant':
79 language = 'zh_Hant'
80 elif language == 'id-id':
81 language = 'id_ID'
82 path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')
83 try:
84 with open(path, "rb") as reader:
85 return orjson.loads(reader.read())
86 except FileNotFoundError:
87 print(f'Translation for {language} not found at {path}')
88 return {}
89
90 def get_and_set_request_language(
91 request: HttpRequest,
92 user_configured_language: str,
93 testing_url_language: Optional[str]=None
94 ) -> str:
95 # We pick a language for the user as follows:
96 # * First priority is the language in the URL, for debugging.
97 # * If not in the URL, we use the language from the user's settings.
98 request_language = testing_url_language
99 if request_language is None:
100 request_language = user_configured_language
101 translation.activate(request_language)
102
103 # We also save the language to the user's session, so that
104 # something reasonable will happen in logged-in portico pages.
105 request.session[translation.LANGUAGE_SESSION_KEY] = translation.get_language()
106
107 return request_language
108
[end of zerver/lib/i18n.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/i18n.py b/zerver/lib/i18n.py
--- a/zerver/lib/i18n.py
+++ b/zerver/lib/i18n.py
@@ -73,13 +73,8 @@
def get_language_translation_data(language: str) -> Dict[str, str]:
if language == 'en':
return {}
- elif language == 'zh-hans':
- language = 'zh_Hans'
- elif language == 'zh-hant':
- language = 'zh_Hant'
- elif language == 'id-id':
- language = 'id_ID'
- path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')
+ locale = translation.to_locale(language)
+ path = os.path.join(settings.DEPLOY_ROOT, 'locale', locale, 'translations.json')
try:
with open(path, "rb") as reader:
return orjson.loads(reader.read())
| {"golden_diff": "diff --git a/zerver/lib/i18n.py b/zerver/lib/i18n.py\n--- a/zerver/lib/i18n.py\n+++ b/zerver/lib/i18n.py\n@@ -73,13 +73,8 @@\n def get_language_translation_data(language: str) -> Dict[str, str]:\n if language == 'en':\n return {}\n- elif language == 'zh-hans':\n- language = 'zh_Hans'\n- elif language == 'zh-hant':\n- language = 'zh_Hant'\n- elif language == 'id-id':\n- language = 'id_ID'\n- path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')\n+ locale = translation.to_locale(language)\n+ path = os.path.join(settings.DEPLOY_ROOT, 'locale', locale, 'translations.json')\n try:\n with open(path, \"rb\") as reader:\n return orjson.loads(reader.read())\n", "issue": "Translation for zh_TW gets ignored in some places\nIn the webapp, if I try switching to the translation for \"Chinese (Taiwan)\", a lot of the text on the screen is still untranslated:\r\n\r\nThat's even though many (at least) of those strings [do have translations in Transifex](https://www.transifex.com/zulip/zulip/translate/#zh_TW/$/67194598?q=text%3Asettings). Those translations have been there for months and do indeed seem to be in the repo, so it's not an issue of not having synced them.\r\n\r\nI have a suspicion that the issue is with this code in `zerver/lib/i18n.py`:\r\n```py3\r\ndef get_language_translation_data(language: str) -> Dict[str, str]:\r\n if language == 'en':\r\n return {}\r\n elif language == 'zh-hans':\r\n language = 'zh_Hans'\r\n elif language == 'zh-hant':\r\n language = 'zh_Hant'\r\n elif language == 'id-id':\r\n language = 'id_ID'\r\n path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')\r\n # \u2026\r\n```\r\n\r\nThat has a handful of special cases to try to translate between two different conventions for locale names. It sure looks like it'd need another one to support `zh_TW` aka `zh-tw`, and that without that this function will fail to do its job on zh_TW.\r\n\r\nBetter still, of course, would be to make this function stop being made of special cases. Here's a Django utility function that should do the job of all those cases: https://docs.djangoproject.com/en/2.2/ref/utils/#django.utils.translation.to_locale\r\n\r\n---\r\n\r\n~~Likely related, but possibly a separate issue: in the webapp language picker itself, the translation shouldn't be called \"Chinese (Taiwan)\" but rather something like \"\u4e2d\u6587\uff08\u53f0\u6e7e\uff09\" -- its name is written in English, whereas all the other languages have their names written in themselves.~~ (This other symptom is caused at least in part by #14565.)\r\n\r\n(Both issues originally reported [in chat](https://chat.zulip.org/#narrow/stream/58-translation/topic/zh-Hant.20.2F.20zh_TW/near/1045033).)\r\n\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/translating/internationalization.html\n\nimport logging\nimport operator\nimport os\nfrom functools import lru_cache\nfrom itertools import zip_longest\nfrom typing import Any, Dict, List, Optional\n\nimport orjson\nfrom django.conf import settings\nfrom django.http import HttpRequest\nfrom django.utils import translation\n\n\n@lru_cache()\ndef get_language_list() -> List[Dict[str, Any]]:\n path = os.path.join(settings.DEPLOY_ROOT, 'locale', 'language_name_map.json')\n with open(path, \"rb\") as reader:\n languages = orjson.loads(reader.read())\n return languages['name_map']\n\ndef get_language_list_for_templates(default_language: str) -> List[Dict[str, Dict[str, str]]]:\n language_list = [lang for lang in get_language_list()\n if 'percent_translated' not in lang or\n lang['percent_translated'] >= 5.]\n\n formatted_list = []\n lang_len = len(language_list)\n firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)\n firsts = list(range(0, firsts_end))\n seconds = list(range(firsts_end, lang_len))\n assert len(firsts) + len(seconds) == lang_len\n for row in zip_longest(firsts, seconds):\n item = {}\n for position, ind in zip(['first', 'second'], row):\n if ind is None:\n continue\n\n lang = language_list[ind]\n percent = name = lang['name']\n if 'percent_translated' in lang:\n percent = \"{} ({}%)\".format(name, lang['percent_translated'])\n\n selected = False\n if default_language in (lang['code'], lang['locale']):\n selected = True\n\n item[position] = {\n 'name': name,\n 'code': lang['code'],\n 'percent': percent,\n 'selected': selected,\n }\n\n formatted_list.append(item)\n\n return formatted_list\n\ndef get_language_name(code: str) -> str:\n for lang in get_language_list():\n if code in (lang['code'], lang['locale']):\n return lang['name']\n # Log problem, but still return a name\n logging.error(\"Unknown language code '%s'\", code)\n return \"Unknown\"\n\ndef get_available_language_codes() -> List[str]:\n language_list = get_language_list()\n codes = [language['code'] for language in language_list]\n return codes\n\ndef get_language_translation_data(language: str) -> Dict[str, str]:\n if language == 'en':\n return {}\n elif language == 'zh-hans':\n language = 'zh_Hans'\n elif language == 'zh-hant':\n language = 'zh_Hant'\n elif language == 'id-id':\n language = 'id_ID'\n path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')\n try:\n with open(path, \"rb\") as reader:\n return orjson.loads(reader.read())\n except FileNotFoundError:\n print(f'Translation for {language} not found at {path}')\n return {}\n\ndef get_and_set_request_language(\n request: HttpRequest,\n user_configured_language: str,\n testing_url_language: Optional[str]=None\n) -> str:\n # We pick a language for the user as follows:\n # * First priority is the language in the URL, for debugging.\n # * If not in the URL, we use the language from the user's settings.\n request_language = testing_url_language\n if request_language is None:\n request_language = user_configured_language\n translation.activate(request_language)\n\n # We also save the language to the user's session, so that\n # something reasonable will happen in logged-in portico pages.\n request.session[translation.LANGUAGE_SESSION_KEY] = translation.get_language()\n\n return request_language\n", "path": "zerver/lib/i18n.py"}]} | 2,163 | 212 |
gh_patches_debug_50428 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4831 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide an API to query the build status by commit
In order to do a check before release that everything is ok, I would like to have a way to obtain the current build status for a given commit. So, in addition to:
```
GET /api/v1/build/{id}/
```
also have this:
```
GET /api/v1/commit/{sha1}/
```
or
```
GET /api/v1/{user}/{project}/commit/{sha1}/
```
Is this possible right now?
</issue>
<code>
[start of readthedocs/restapi/views/model_views.py]
1 # -*- coding: utf-8 -*-
2 """Endpoints for listing Projects, Versions, Builds, etc."""
3
4 from __future__ import (
5 absolute_import, division, print_function, unicode_literals)
6
7 import logging
8
9 from allauth.socialaccount.models import SocialAccount
10 from builtins import str
11 from django.shortcuts import get_object_or_404
12 from django.template.loader import render_to_string
13 from rest_framework import decorators, permissions, status, viewsets
14 from rest_framework.decorators import detail_route
15 from rest_framework.renderers import BaseRenderer, JSONRenderer
16 from rest_framework.response import Response
17
18 from readthedocs.builds.constants import BRANCH, TAG
19 from readthedocs.builds.models import Build, BuildCommandResult, Version
20 from readthedocs.core.utils import trigger_build
21 from readthedocs.core.utils.extend import SettingsOverrideObject
22 from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
23 from readthedocs.oauth.services import GitHubService, registry
24 from readthedocs.projects.models import Domain, EmailHook, Project
25 from readthedocs.projects.version_handling import determine_stable_version
26
27 from .. import utils as api_utils
28 from ..permissions import (
29 APIPermission, APIRestrictedPermission, IsOwner, RelatedProjectIsOwner)
30 from ..serializers import (
31 BuildAdminSerializer, BuildCommandSerializer, BuildSerializer,
32 DomainSerializer, ProjectAdminSerializer, ProjectSerializer,
33 RemoteOrganizationSerializer, RemoteRepositorySerializer,
34 SocialAccountSerializer, VersionAdminSerializer, VersionSerializer)
35
36 log = logging.getLogger(__name__)
37
38
39 class PlainTextBuildRenderer(BaseRenderer):
40
41 """
42 Custom renderer for text/plain format.
43
44 charset is 'utf-8' by default.
45 """
46
47 media_type = 'text/plain'
48 format = 'txt'
49
50 def render(self, data, accepted_media_type=None, renderer_context=None):
51 renderer_context = renderer_context or {}
52 response = renderer_context.get('response')
53 if not response or response.exception:
54 return data.get('detail', '').encode(self.charset)
55 data = render_to_string(
56 'restapi/log.txt', {'build': data}
57 )
58 return data.encode(self.charset)
59
60
61 class UserSelectViewSet(viewsets.ModelViewSet):
62
63 """
64 View set that varies serializer class based on request user credentials.
65
66 Viewsets using this class should have an attribute `admin_serializer_class`,
67 which is a serializer that might have more fields that only admin/staff
68 users require. If the user is staff, this class will be returned instead.
69 """
70
71 def get_serializer_class(self):
72 try:
73 if (self.request.user.is_staff and
74 self.admin_serializer_class is not None):
75 return self.admin_serializer_class
76 except AttributeError:
77 pass
78 return self.serializer_class
79
80 def get_queryset(self):
81 """Use our API manager method to determine authorization on queryset."""
82 return self.model.objects.api(self.request.user)
83
84
85 class ProjectViewSet(UserSelectViewSet):
86
87 """List, filter, etc, Projects."""
88
89 permission_classes = [APIPermission]
90 renderer_classes = (JSONRenderer,)
91 serializer_class = ProjectSerializer
92 admin_serializer_class = ProjectAdminSerializer
93 model = Project
94 pagination_class = api_utils.ProjectPagination
95 filter_fields = ('slug',)
96
97 @decorators.detail_route()
98 def valid_versions(self, request, **kwargs):
99 """Maintain state of versions that are wanted."""
100 project = get_object_or_404(
101 Project.objects.api(request.user), pk=kwargs['pk'])
102 if (not project.num_major or not project.num_minor or
103 not project.num_point):
104 return Response(
105 {
106 'error': 'Project does not support point version control',
107 },
108 status=status.HTTP_400_BAD_REQUEST,
109 )
110 version_strings = project.supported_versions()
111 # Disable making old versions inactive for now.
112 # project.versions.exclude(verbose_name__in=version_strings).update(active=False)
113 project.versions.filter(verbose_name__in=version_strings).update(
114 active=True,
115 )
116 return Response({
117 'flat': version_strings,
118 })
119
120 @detail_route()
121 def translations(self, *_, **__):
122 translations = self.get_object().translations.all()
123 return Response({
124 'translations': ProjectSerializer(translations, many=True).data,
125 })
126
127 @detail_route()
128 def subprojects(self, request, **kwargs):
129 project = get_object_or_404(
130 Project.objects.api(request.user), pk=kwargs['pk'])
131 rels = project.subprojects.all()
132 children = [rel.child for rel in rels]
133 return Response({
134 'subprojects': ProjectSerializer(children, many=True).data,
135 })
136
137 @detail_route()
138 def active_versions(self, request, **kwargs):
139 project = get_object_or_404(
140 Project.objects.api(request.user), pk=kwargs['pk'])
141 versions = project.versions.filter(active=True)
142 return Response({
143 'versions': VersionSerializer(versions, many=True).data,
144 })
145
146 @decorators.detail_route(permission_classes=[permissions.IsAdminUser])
147 def token(self, request, **kwargs):
148 project = get_object_or_404(
149 Project.objects.api(request.user), pk=kwargs['pk'])
150 token = GitHubService.get_token_for_project(project, force_local=True)
151 return Response({
152 'token': token,
153 })
154
155 @decorators.detail_route()
156 def canonical_url(self, request, **kwargs):
157 project = get_object_or_404(
158 Project.objects.api(request.user), pk=kwargs['pk'])
159 return Response({
160 'url': project.get_docs_url(),
161 })
162
163 @decorators.detail_route(
164 permission_classes=[permissions.IsAdminUser], methods=['post'])
165 def sync_versions(self, request, **kwargs): # noqa: D205
166 """
167 Sync the version data in the repo (on the build server).
168
169 Version data in the repo is synced with what we have in the database.
170
171 :returns: the identifiers for the versions that have been deleted.
172 """
173 project = get_object_or_404(
174 Project.objects.api(request.user), pk=kwargs['pk'])
175
176 # If the currently highest non-prerelease version is active, then make
177 # the new latest version active as well.
178 old_highest_version = determine_stable_version(project.versions.all())
179 if old_highest_version is not None:
180 activate_new_stable = old_highest_version.active
181 else:
182 activate_new_stable = False
183
184 try:
185 # Update All Versions
186 data = request.data
187 added_versions = set()
188 if 'tags' in data:
189 ret_set = api_utils.sync_versions(
190 project=project, versions=data['tags'], type=TAG)
191 added_versions.update(ret_set)
192 if 'branches' in data:
193 ret_set = api_utils.sync_versions(
194 project=project, versions=data['branches'], type=BRANCH)
195 added_versions.update(ret_set)
196 deleted_versions = api_utils.delete_versions(project, data)
197 except Exception as e:
198 log.exception('Sync Versions Error')
199 return Response(
200 {
201 'error': str(e),
202 },
203 status=status.HTTP_400_BAD_REQUEST,
204 )
205
206 promoted_version = project.update_stable_version()
207 if promoted_version:
208 new_stable = project.get_stable_version()
209 log.info(
210 'Triggering new stable build: {project}:{version}'.format(
211 project=project.slug,
212 version=new_stable.identifier,
213 ))
214 trigger_build(project=project, version=new_stable)
215
216 # Marking the tag that is considered the new stable version as
217 # active and building it if it was just added.
218 if (activate_new_stable and
219 promoted_version.slug in added_versions):
220 promoted_version.active = True
221 promoted_version.save()
222 trigger_build(project=project, version=promoted_version)
223
224 return Response({
225 'added_versions': added_versions,
226 'deleted_versions': deleted_versions,
227 })
228
229
230 class VersionViewSet(UserSelectViewSet):
231
232 permission_classes = [APIRestrictedPermission]
233 renderer_classes = (JSONRenderer,)
234 serializer_class = VersionSerializer
235 admin_serializer_class = VersionAdminSerializer
236 model = Version
237 filter_fields = ('active', 'project__slug',)
238
239
240 class BuildViewSetBase(UserSelectViewSet):
241 permission_classes = [APIRestrictedPermission]
242 renderer_classes = (JSONRenderer, PlainTextBuildRenderer)
243 serializer_class = BuildSerializer
244 admin_serializer_class = BuildAdminSerializer
245 model = Build
246 filter_fields = ('project__slug',)
247
248
249 class BuildViewSet(SettingsOverrideObject):
250
251 """A pluggable class to allow for build cold storage."""
252
253 _default_class = BuildViewSetBase
254
255
256 class BuildCommandViewSet(UserSelectViewSet):
257 permission_classes = [APIRestrictedPermission]
258 renderer_classes = (JSONRenderer,)
259 serializer_class = BuildCommandSerializer
260 model = BuildCommandResult
261
262
263 class NotificationViewSet(viewsets.ReadOnlyModelViewSet):
264 permission_classes = (permissions.IsAuthenticated, RelatedProjectIsOwner)
265 renderer_classes = (JSONRenderer,)
266 model = EmailHook
267
268 def get_queryset(self):
269 return self.model.objects.api(self.request.user)
270
271
272 class DomainViewSet(UserSelectViewSet):
273 permission_classes = [APIRestrictedPermission]
274 renderer_classes = (JSONRenderer,)
275 serializer_class = DomainSerializer
276 model = Domain
277
278
279 class RemoteOrganizationViewSet(viewsets.ReadOnlyModelViewSet):
280 permission_classes = [IsOwner]
281 renderer_classes = (JSONRenderer,)
282 serializer_class = RemoteOrganizationSerializer
283 model = RemoteOrganization
284 pagination_class = api_utils.RemoteOrganizationPagination
285
286 def get_queryset(self):
287 return (
288 self.model.objects.api(self.request.user).filter(
289 account__provider__in=[
290 service.adapter.provider_id for service in registry
291 ]))
292
293
294 class RemoteRepositoryViewSet(viewsets.ReadOnlyModelViewSet):
295 permission_classes = [IsOwner]
296 renderer_classes = (JSONRenderer,)
297 serializer_class = RemoteRepositorySerializer
298 model = RemoteRepository
299 pagination_class = api_utils.RemoteProjectPagination
300
301 def get_queryset(self):
302 query = self.model.objects.api(self.request.user)
303 org = self.request.query_params.get('org', None)
304 if org is not None:
305 query = query.filter(organization__pk=org)
306
307 own = self.request.query_params.get('own', None)
308 if own is not None:
309 query = query.filter(
310 account__provider=own,
311 organization=None,
312 )
313
314 query = query.filter(
315 account__provider__in=[
316 service.adapter.provider_id for service in registry
317 ])
318 return query
319
320
321 class SocialAccountViewSet(viewsets.ReadOnlyModelViewSet):
322 permission_classes = [IsOwner]
323 renderer_classes = (JSONRenderer,)
324 serializer_class = SocialAccountSerializer
325 model = SocialAccount
326
327 def get_queryset(self):
328 return self.model.objects.filter(user=self.request.user.pk)
329
[end of readthedocs/restapi/views/model_views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/restapi/views/model_views.py b/readthedocs/restapi/views/model_views.py
--- a/readthedocs/restapi/views/model_views.py
+++ b/readthedocs/restapi/views/model_views.py
@@ -243,7 +243,7 @@
serializer_class = BuildSerializer
admin_serializer_class = BuildAdminSerializer
model = Build
- filter_fields = ('project__slug',)
+ filter_fields = ('project__slug', 'commit')
class BuildViewSet(SettingsOverrideObject):
| {"golden_diff": "diff --git a/readthedocs/restapi/views/model_views.py b/readthedocs/restapi/views/model_views.py\n--- a/readthedocs/restapi/views/model_views.py\n+++ b/readthedocs/restapi/views/model_views.py\n@@ -243,7 +243,7 @@\n serializer_class = BuildSerializer\n admin_serializer_class = BuildAdminSerializer\n model = Build\n- filter_fields = ('project__slug',)\n+ filter_fields = ('project__slug', 'commit')\n \n \n class BuildViewSet(SettingsOverrideObject):\n", "issue": "Provide an API to query the build status by commit\nIn order to do a check before release that everything is ok, I would like to have a way to obtain the current build status for a given commit. So, in addition to:\n\n```\nGET /api/v1/build/{id}/\n```\n\nalso have this:\n\n```\nGET /api/v1/commit/{sha1}/\n```\n\nor \n\n```\nGET /api/v1/{user}/{project}/commit/{sha1}/\n```\n\nIs this possible right now?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Endpoints for listing Projects, Versions, Builds, etc.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport logging\n\nfrom allauth.socialaccount.models import SocialAccount\nfrom builtins import str\nfrom django.shortcuts import get_object_or_404\nfrom django.template.loader import render_to_string\nfrom rest_framework import decorators, permissions, status, viewsets\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.renderers import BaseRenderer, JSONRenderer\nfrom rest_framework.response import Response\n\nfrom readthedocs.builds.constants import BRANCH, TAG\nfrom readthedocs.builds.models import Build, BuildCommandResult, Version\nfrom readthedocs.core.utils import trigger_build\nfrom readthedocs.core.utils.extend import SettingsOverrideObject\nfrom readthedocs.oauth.models import RemoteOrganization, RemoteRepository\nfrom readthedocs.oauth.services import GitHubService, registry\nfrom readthedocs.projects.models import Domain, EmailHook, Project\nfrom readthedocs.projects.version_handling import determine_stable_version\n\nfrom .. import utils as api_utils\nfrom ..permissions import (\n APIPermission, APIRestrictedPermission, IsOwner, RelatedProjectIsOwner)\nfrom ..serializers import (\n BuildAdminSerializer, BuildCommandSerializer, BuildSerializer,\n DomainSerializer, ProjectAdminSerializer, ProjectSerializer,\n RemoteOrganizationSerializer, RemoteRepositorySerializer,\n SocialAccountSerializer, VersionAdminSerializer, VersionSerializer)\n\nlog = logging.getLogger(__name__)\n\n\nclass PlainTextBuildRenderer(BaseRenderer):\n\n \"\"\"\n Custom renderer for text/plain format.\n\n charset is 'utf-8' by default.\n \"\"\"\n\n media_type = 'text/plain'\n format = 'txt'\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n renderer_context = renderer_context or {}\n response = renderer_context.get('response')\n if not response or response.exception:\n return data.get('detail', '').encode(self.charset)\n data = render_to_string(\n 'restapi/log.txt', {'build': data}\n )\n return data.encode(self.charset)\n\n\nclass UserSelectViewSet(viewsets.ModelViewSet):\n\n \"\"\"\n View set that varies serializer class based on request user credentials.\n\n Viewsets using this class should have an attribute `admin_serializer_class`,\n which is a serializer that might have more fields that only admin/staff\n users require. If the user is staff, this class will be returned instead.\n \"\"\"\n\n def get_serializer_class(self):\n try:\n if (self.request.user.is_staff and\n self.admin_serializer_class is not None):\n return self.admin_serializer_class\n except AttributeError:\n pass\n return self.serializer_class\n\n def get_queryset(self):\n \"\"\"Use our API manager method to determine authorization on queryset.\"\"\"\n return self.model.objects.api(self.request.user)\n\n\nclass ProjectViewSet(UserSelectViewSet):\n\n \"\"\"List, filter, etc, Projects.\"\"\"\n\n permission_classes = [APIPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = ProjectSerializer\n admin_serializer_class = ProjectAdminSerializer\n model = Project\n pagination_class = api_utils.ProjectPagination\n filter_fields = ('slug',)\n\n @decorators.detail_route()\n def valid_versions(self, request, **kwargs):\n \"\"\"Maintain state of versions that are wanted.\"\"\"\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n if (not project.num_major or not project.num_minor or\n not project.num_point):\n return Response(\n {\n 'error': 'Project does not support point version control',\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n version_strings = project.supported_versions()\n # Disable making old versions inactive for now.\n # project.versions.exclude(verbose_name__in=version_strings).update(active=False)\n project.versions.filter(verbose_name__in=version_strings).update(\n active=True,\n )\n return Response({\n 'flat': version_strings,\n })\n\n @detail_route()\n def translations(self, *_, **__):\n translations = self.get_object().translations.all()\n return Response({\n 'translations': ProjectSerializer(translations, many=True).data,\n })\n\n @detail_route()\n def subprojects(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n rels = project.subprojects.all()\n children = [rel.child for rel in rels]\n return Response({\n 'subprojects': ProjectSerializer(children, many=True).data,\n })\n\n @detail_route()\n def active_versions(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n versions = project.versions.filter(active=True)\n return Response({\n 'versions': VersionSerializer(versions, many=True).data,\n })\n\n @decorators.detail_route(permission_classes=[permissions.IsAdminUser])\n def token(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n token = GitHubService.get_token_for_project(project, force_local=True)\n return Response({\n 'token': token,\n })\n\n @decorators.detail_route()\n def canonical_url(self, request, **kwargs):\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n return Response({\n 'url': project.get_docs_url(),\n })\n\n @decorators.detail_route(\n permission_classes=[permissions.IsAdminUser], methods=['post'])\n def sync_versions(self, request, **kwargs): # noqa: D205\n \"\"\"\n Sync the version data in the repo (on the build server).\n\n Version data in the repo is synced with what we have in the database.\n\n :returns: the identifiers for the versions that have been deleted.\n \"\"\"\n project = get_object_or_404(\n Project.objects.api(request.user), pk=kwargs['pk'])\n\n # If the currently highest non-prerelease version is active, then make\n # the new latest version active as well.\n old_highest_version = determine_stable_version(project.versions.all())\n if old_highest_version is not None:\n activate_new_stable = old_highest_version.active\n else:\n activate_new_stable = False\n\n try:\n # Update All Versions\n data = request.data\n added_versions = set()\n if 'tags' in data:\n ret_set = api_utils.sync_versions(\n project=project, versions=data['tags'], type=TAG)\n added_versions.update(ret_set)\n if 'branches' in data:\n ret_set = api_utils.sync_versions(\n project=project, versions=data['branches'], type=BRANCH)\n added_versions.update(ret_set)\n deleted_versions = api_utils.delete_versions(project, data)\n except Exception as e:\n log.exception('Sync Versions Error')\n return Response(\n {\n 'error': str(e),\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n promoted_version = project.update_stable_version()\n if promoted_version:\n new_stable = project.get_stable_version()\n log.info(\n 'Triggering new stable build: {project}:{version}'.format(\n project=project.slug,\n version=new_stable.identifier,\n ))\n trigger_build(project=project, version=new_stable)\n\n # Marking the tag that is considered the new stable version as\n # active and building it if it was just added.\n if (activate_new_stable and\n promoted_version.slug in added_versions):\n promoted_version.active = True\n promoted_version.save()\n trigger_build(project=project, version=promoted_version)\n\n return Response({\n 'added_versions': added_versions,\n 'deleted_versions': deleted_versions,\n })\n\n\nclass VersionViewSet(UserSelectViewSet):\n\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = VersionSerializer\n admin_serializer_class = VersionAdminSerializer\n model = Version\n filter_fields = ('active', 'project__slug',)\n\n\nclass BuildViewSetBase(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer, PlainTextBuildRenderer)\n serializer_class = BuildSerializer\n admin_serializer_class = BuildAdminSerializer\n model = Build\n filter_fields = ('project__slug',)\n\n\nclass BuildViewSet(SettingsOverrideObject):\n\n \"\"\"A pluggable class to allow for build cold storage.\"\"\"\n\n _default_class = BuildViewSetBase\n\n\nclass BuildCommandViewSet(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = BuildCommandSerializer\n model = BuildCommandResult\n\n\nclass NotificationViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated, RelatedProjectIsOwner)\n renderer_classes = (JSONRenderer,)\n model = EmailHook\n\n def get_queryset(self):\n return self.model.objects.api(self.request.user)\n\n\nclass DomainViewSet(UserSelectViewSet):\n permission_classes = [APIRestrictedPermission]\n renderer_classes = (JSONRenderer,)\n serializer_class = DomainSerializer\n model = Domain\n\n\nclass RemoteOrganizationViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = RemoteOrganizationSerializer\n model = RemoteOrganization\n pagination_class = api_utils.RemoteOrganizationPagination\n\n def get_queryset(self):\n return (\n self.model.objects.api(self.request.user).filter(\n account__provider__in=[\n service.adapter.provider_id for service in registry\n ]))\n\n\nclass RemoteRepositoryViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = RemoteRepositorySerializer\n model = RemoteRepository\n pagination_class = api_utils.RemoteProjectPagination\n\n def get_queryset(self):\n query = self.model.objects.api(self.request.user)\n org = self.request.query_params.get('org', None)\n if org is not None:\n query = query.filter(organization__pk=org)\n\n own = self.request.query_params.get('own', None)\n if own is not None:\n query = query.filter(\n account__provider=own,\n organization=None,\n )\n\n query = query.filter(\n account__provider__in=[\n service.adapter.provider_id for service in registry\n ])\n return query\n\n\nclass SocialAccountViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = [IsOwner]\n renderer_classes = (JSONRenderer,)\n serializer_class = SocialAccountSerializer\n model = SocialAccount\n\n def get_queryset(self):\n return self.model.objects.filter(user=self.request.user.pk)\n", "path": "readthedocs/restapi/views/model_views.py"}]} | 3,845 | 114 |
gh_patches_debug_19677 | rasdani/github-patches | git_diff | conda__conda-7178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Channel pins in "environment.yaml" files are not saved to package specs
Channel pins specified in environment files are not respected. For example,run ```conda env create``` with this environment file:
```yaml
name: channel-not-written-to-user-specs
dependencies:
- defaults::six
```
If we look at conda-meta/six*.json we can see that the channel pin has not been added to specs. Where we should read ```"requested_spec": "defaults::six"```, we only find ```"requested_spec": "six"```.
This is with conda 4.4.0rc2.
</issue>
<code>
[start of conda_env/installers/conda.py]
1 from __future__ import absolute_import
2
3 from os.path import basename
4
5 from conda._vendor.boltons.setutils import IndexedSet
6 from conda.base.context import context
7 from conda.core.solve import Solver
8 from conda.models.channel import Channel, prioritize_channels
9
10
11 def install(prefix, specs, args, env, *_, **kwargs):
12 # TODO: support all various ways this happens
13 # Including 'nodefaults' in the channels list disables the defaults
14 new_specs = []
15 channel_urls = set()
16 for elem in specs:
17 if "::" in elem:
18 channel_urls.add(elem.split("::")[0])
19 new_specs.append(elem.split("::")[-1])
20 else:
21 new_specs.append(elem)
22 specs = new_specs
23 channel_urls = list(channel_urls)
24 # TODO: support all various ways this happens
25 # Including 'nodefaults' in the channels list disables the defaults
26 channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']
27 if 'nodefaults' not in env.channels:
28 channel_urls.extend(context.channels)
29 _channel_priority_map = prioritize_channels(channel_urls)
30
31 channels = IndexedSet(Channel(url) for url in _channel_priority_map)
32 subdirs = IndexedSet(basename(url) for url in _channel_priority_map)
33
34 solver = Solver(prefix, channels, subdirs, specs_to_add=specs)
35 unlink_link_transaction = solver.solve_for_transaction(prune=getattr(args, 'prune', False))
36
37 pfe = unlink_link_transaction._get_pfe()
38 pfe.execute()
39 unlink_link_transaction.execute()
40
[end of conda_env/installers/conda.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_env/installers/conda.py b/conda_env/installers/conda.py
--- a/conda_env/installers/conda.py
+++ b/conda_env/installers/conda.py
@@ -11,19 +11,8 @@
def install(prefix, specs, args, env, *_, **kwargs):
# TODO: support all various ways this happens
# Including 'nodefaults' in the channels list disables the defaults
- new_specs = []
- channel_urls = set()
- for elem in specs:
- if "::" in elem:
- channel_urls.add(elem.split("::")[0])
- new_specs.append(elem.split("::")[-1])
- else:
- new_specs.append(elem)
- specs = new_specs
- channel_urls = list(channel_urls)
- # TODO: support all various ways this happens
- # Including 'nodefaults' in the channels list disables the defaults
- channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']
+ channel_urls = [chan for chan in env.channels if chan != 'nodefaults']
+
if 'nodefaults' not in env.channels:
channel_urls.extend(context.channels)
_channel_priority_map = prioritize_channels(channel_urls)
| {"golden_diff": "diff --git a/conda_env/installers/conda.py b/conda_env/installers/conda.py\n--- a/conda_env/installers/conda.py\n+++ b/conda_env/installers/conda.py\n@@ -11,19 +11,8 @@\n def install(prefix, specs, args, env, *_, **kwargs):\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n- new_specs = []\n- channel_urls = set()\n- for elem in specs:\n- if \"::\" in elem:\n- channel_urls.add(elem.split(\"::\")[0])\n- new_specs.append(elem.split(\"::\")[-1])\n- else:\n- new_specs.append(elem)\n- specs = new_specs\n- channel_urls = list(channel_urls)\n- # TODO: support all various ways this happens\n- # Including 'nodefaults' in the channels list disables the defaults\n- channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']\n+ channel_urls = [chan for chan in env.channels if chan != 'nodefaults']\n+\n if 'nodefaults' not in env.channels:\n channel_urls.extend(context.channels)\n _channel_priority_map = prioritize_channels(channel_urls)\n", "issue": "Channel pins in \"environment.yaml\" files are not saved to package specs\nChannel pins specified in environment files are not respected. For example,run ```conda env create``` with this environment file:\r\n\r\n```yaml\r\nname: channel-not-written-to-user-specs\r\n\r\ndependencies:\r\n - defaults::six\r\n```\r\n\r\nIf we look at conda-meta/six*.json we can see that the channel pin has not been added to specs. Where we should read ```\"requested_spec\": \"defaults::six\"```, we only find ```\"requested_spec\": \"six\"```.\r\n\r\nThis is with conda 4.4.0rc2.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom os.path import basename\n\nfrom conda._vendor.boltons.setutils import IndexedSet\nfrom conda.base.context import context\nfrom conda.core.solve import Solver\nfrom conda.models.channel import Channel, prioritize_channels\n\n\ndef install(prefix, specs, args, env, *_, **kwargs):\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n new_specs = []\n channel_urls = set()\n for elem in specs:\n if \"::\" in elem:\n channel_urls.add(elem.split(\"::\")[0])\n new_specs.append(elem.split(\"::\")[-1])\n else:\n new_specs.append(elem)\n specs = new_specs\n channel_urls = list(channel_urls)\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']\n if 'nodefaults' not in env.channels:\n channel_urls.extend(context.channels)\n _channel_priority_map = prioritize_channels(channel_urls)\n\n channels = IndexedSet(Channel(url) for url in _channel_priority_map)\n subdirs = IndexedSet(basename(url) for url in _channel_priority_map)\n\n solver = Solver(prefix, channels, subdirs, specs_to_add=specs)\n unlink_link_transaction = solver.solve_for_transaction(prune=getattr(args, 'prune', False))\n\n pfe = unlink_link_transaction._get_pfe()\n pfe.execute()\n unlink_link_transaction.execute()\n", "path": "conda_env/installers/conda.py"}]} | 1,091 | 282 |
gh_patches_debug_10175 | rasdani/github-patches | git_diff | getredash__redash-2070 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
An unauthorized (read only) user can create queries
### Issue Summary
I created a user on the default group and set the data source capabilities to "Read only" on that group. Accessing as this brand new user, I really can't create a new query, but I can fork a existing one, and modify it as I want. I think this could be a security flaw.
### Steps to Reproduce
1. Installed with docker, connect to a data source (I created a MySQL one);
2. Create a few queries;
3. Create a user and assign him to the "default" group;
4. Set the permissions of the data source for the default group to read only;
5. Do login with the brand new user and click to list the queries;
6. Choice one of the queries and fork it.
I expect that a "read only" role should really be read only.
### Technical details:
* Redash Version: 1.0.3+b2850
* Browser/OS: Google Chrome 59/Fedora 25
* How did you install Redash: Docker with the suggested `docker-compose.production.yml`
</issue>
<code>
[start of redash/handlers/queries.py]
1 from itertools import chain
2
3 import sqlparse
4 from flask import jsonify, request
5 from flask_login import login_required
6 from flask_restful import abort
7 from funcy import distinct, take
8 from sqlalchemy.orm.exc import StaleDataError
9
10 from redash import models, settings
11 from redash.handlers.base import (BaseResource, get_object_or_404,
12 org_scoped_rule, paginate, routes)
13 from redash.handlers.query_results import run_query
14 from redash.permissions import (can_modify, not_view_only, require_access,
15 require_admin_or_owner,
16 require_object_modify_permission,
17 require_permission, view_only)
18 from redash.utils import collect_parameters_from_request
19
20
21 @routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
22 @login_required
23 def format_sql_query(org_slug=None):
24 """
25 Formats an SQL query using the Python ``sqlparse`` formatter.
26
27 :<json string query: The SQL text to format
28 :>json string query: Formatted SQL text
29 """
30 arguments = request.get_json(force=True)
31 query = arguments.get("query", "")
32
33 return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})
34
35
36 class QuerySearchResource(BaseResource):
37 @require_permission('view_query')
38 def get(self):
39 """
40 Search query text, titles, and descriptions.
41
42 :qparam string q: Search term
43
44 Responds with a list of :ref:`query <query-response-label>` objects.
45 """
46 term = request.args.get('q', '')
47 include_drafts = request.args.get('include_drafts') is not None
48
49 return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids, include_drafts=include_drafts)]
50
51
52 class QueryRecentResource(BaseResource):
53 @require_permission('view_query')
54 def get(self):
55 """
56 Retrieve up to 20 queries modified in the last 7 days.
57
58 Responds with a list of :ref:`query <query-response-label>` objects.
59 """
60
61 if settings.FEATURE_DUMB_RECENTS:
62 results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)
63 queries = [q.to_dict(with_last_modified_by=False, with_user=False) for q in results]
64 else:
65 queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)
66 recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in queries]
67
68 global_recent = []
69 if len(recent) < 10:
70 global_recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in models.Query.recent(self.current_user.group_ids)]
71
72 queries = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
73
74 return queries
75
76
77 class QueryListResource(BaseResource):
78 @require_permission('create_query')
79 def post(self):
80 """
81 Create a new query.
82
83 :<json number data_source_id: The ID of the data source this query will run on
84 :<json string query: Query text
85 :<json string name:
86 :<json string description:
87 :<json string schedule: Schedule interval, in seconds, for repeated execution of this query
88 :<json object options: Query options
89
90 .. _query-response-label:
91
92 :>json number id: Query ID
93 :>json number latest_query_data_id: ID for latest output data from this query
94 :>json string name:
95 :>json string description:
96 :>json string query: Query text
97 :>json string query_hash: Hash of query text
98 :>json string schedule: Schedule interval, in seconds, for repeated execution of this query
99 :>json string api_key: Key for public access to this query's results.
100 :>json boolean is_archived: Whether this query is displayed in indexes and search results or not.
101 :>json boolean is_draft: Whether this query is a draft or not
102 :>json string updated_at: Time of last modification, in ISO format
103 :>json string created_at: Time of creation, in ISO format
104 :>json number data_source_id: ID of the data source this query will run on
105 :>json object options: Query options
106 :>json number version: Revision version (for update conflict avoidance)
107 :>json number user_id: ID of query creator
108 :>json number last_modified_by_id: ID of user who last modified this query
109 :>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)
110 :>json number runtime: Runtime of last query execution, in seconds (may be null)
111 """
112 query_def = request.get_json(force=True)
113 data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
114 require_access(data_source.groups, self.current_user, not_view_only)
115
116 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
117 query_def.pop(field, None)
118
119 query_def['query_text'] = query_def.pop('query')
120 query_def['user'] = self.current_user
121 query_def['data_source'] = data_source
122 query_def['org'] = self.current_org
123 query_def['is_draft'] = True
124 query = models.Query.create(**query_def)
125 models.db.session.add(query)
126 models.db.session.commit()
127
128 self.record_event({
129 'action': 'create',
130 'object_id': query.id,
131 'object_type': 'query'
132 })
133
134 return query.to_dict()
135
136 @require_permission('view_query')
137 def get(self):
138 """
139 Retrieve a list of queries.
140
141 :qparam number page_size: Number of queries to return
142 :qparam number page: Page number to retrieve
143
144 Responds with an array of :ref:`query <query-response-label>` objects.
145 """
146
147 results = models.Query.all_queries(self.current_user.group_ids, self.current_user.id)
148 page = request.args.get('page', 1, type=int)
149 page_size = request.args.get('page_size', 25, type=int)
150 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
151
152
153 class MyQueriesResource(BaseResource):
154 @require_permission('view_query')
155 def get(self):
156 """
157 Retrieve a list of queries created by the current user.
158
159 :qparam number page_size: Number of queries to return
160 :qparam number page: Page number to retrieve
161
162 Responds with an array of :ref:`query <query-response-label>` objects.
163 """
164 drafts = request.args.get('drafts') is not None
165 results = models.Query.by_user(self.current_user)
166 page = request.args.get('page', 1, type=int)
167 page_size = request.args.get('page_size', 25, type=int)
168 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
169
170
171 class QueryResource(BaseResource):
172 @require_permission('edit_query')
173 def post(self, query_id):
174 """
175 Modify a query.
176
177 :param query_id: ID of query to update
178 :<json number data_source_id: The ID of the data source this query will run on
179 :<json string query: Query text
180 :<json string name:
181 :<json string description:
182 :<json string schedule: Schedule interval, in seconds, for repeated execution of this query
183 :<json object options: Query options
184
185 Responds with the updated :ref:`query <query-response-label>` object.
186 """
187 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
188 query_def = request.get_json(force=True)
189
190 require_object_modify_permission(query, self.current_user)
191
192 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
193 query_def.pop(field, None)
194
195 if 'query' in query_def:
196 query_def['query_text'] = query_def.pop('query')
197
198 query_def['last_modified_by'] = self.current_user
199 query_def['changed_by'] = self.current_user
200 # SQLAlchemy handles the case where a concurrent transaction beats us
201 # to the update. But we still have to make sure that we're not starting
202 # out behind.
203 if 'version' in query_def and query_def['version'] != query.version:
204 abort(409)
205
206 try:
207 self.update_model(query, query_def)
208 models.db.session.commit()
209 except StaleDataError:
210 abort(409)
211
212 return query.to_dict(with_visualizations=True)
213
214 @require_permission('view_query')
215 def get(self, query_id):
216 """
217 Retrieve a query.
218
219 :param query_id: ID of query to fetch
220
221 Responds with the :ref:`query <query-response-label>` contents.
222 """
223 q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
224 require_access(q.groups, self.current_user, view_only)
225
226 result = q.to_dict(with_visualizations=True)
227 result['can_edit'] = can_modify(q, self.current_user)
228 return result
229
230 # TODO: move to resource of its own? (POST /queries/{id}/archive)
231 def delete(self, query_id):
232 """
233 Archives a query.
234
235 :param query_id: ID of query to archive
236 """
237 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
238 require_admin_or_owner(query.user_id)
239 query.archive(self.current_user)
240 models.db.session.commit()
241
242
243 class QueryForkResource(BaseResource):
244 @require_permission('edit_query')
245 def post(self, query_id):
246 """
247 Creates a new query, copying the query text from an existing one.
248
249 :param query_id: ID of query to fork
250
251 Responds with created :ref:`query <query-response-label>` object.
252 """
253 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
254 forked_query = query.fork(self.current_user)
255 models.db.session.commit()
256 return forked_query.to_dict(with_visualizations=True)
257
258
259 class QueryRefreshResource(BaseResource):
260 def post(self, query_id):
261 """
262 Execute a query, updating the query object with the results.
263
264 :param query_id: ID of query to execute
265
266 Responds with query task details.
267 """
268 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
269 require_access(query.groups, self.current_user, not_view_only)
270
271 parameter_values = collect_parameters_from_request(request.args)
272
273 return run_query(query.data_source, parameter_values, query.query_text, query.id)
274
[end of redash/handlers/queries.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py
--- a/redash/handlers/queries.py
+++ b/redash/handlers/queries.py
@@ -251,6 +251,7 @@
Responds with created :ref:`query <query-response-label>` object.
"""
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
+ require_access(query.data_source.groups, self.current_user, not_view_only)
forked_query = query.fork(self.current_user)
models.db.session.commit()
return forked_query.to_dict(with_visualizations=True)
| {"golden_diff": "diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py\n--- a/redash/handlers/queries.py\n+++ b/redash/handlers/queries.py\n@@ -251,6 +251,7 @@\n Responds with created :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n+ require_access(query.data_source.groups, self.current_user, not_view_only)\n forked_query = query.fork(self.current_user)\n models.db.session.commit()\n return forked_query.to_dict(with_visualizations=True)\n", "issue": "An unauthorized (read only) user can create queries \n### Issue Summary\r\n\r\nI created a user on the default group and set the data source capabilities to \"Read only\" on that group. Accessing as this brand new user, I really can't create a new query, but I can fork a existing one, and modify it as I want. I think this could be a security flaw.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Installed with docker, connect to a data source (I created a MySQL one);\r\n2. Create a few queries;\r\n3. Create a user and assign him to the \"default\" group;\r\n4. Set the permissions of the data source for the default group to read only;\r\n5. Do login with the brand new user and click to list the queries;\r\n6. Choice one of the queries and fork it.\r\n\r\nI expect that a \"read only\" role should really be read only.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.3+b2850\r\n* Browser/OS: Google Chrome 59/Fedora 25\r\n* How did you install Redash: Docker with the suggested `docker-compose.production.yml`\r\n\n", "before_files": [{"content": "from itertools import chain\n\nimport sqlparse\nfrom flask import jsonify, request\nfrom flask_login import login_required\nfrom flask_restful import abort\nfrom funcy import distinct, take\nfrom sqlalchemy.orm.exc import StaleDataError\n\nfrom redash import models, settings\nfrom redash.handlers.base import (BaseResource, get_object_or_404,\n org_scoped_rule, paginate, routes)\nfrom redash.handlers.query_results import run_query\nfrom redash.permissions import (can_modify, not_view_only, require_access,\n require_admin_or_owner,\n require_object_modify_permission,\n require_permission, view_only)\nfrom redash.utils import collect_parameters_from_request\n\n\[email protected](org_scoped_rule('/api/queries/format'), methods=['POST'])\n@login_required\ndef format_sql_query(org_slug=None):\n \"\"\"\n Formats an SQL query using the Python ``sqlparse`` formatter.\n\n :<json string query: The SQL text to format\n :>json string query: Formatted SQL text\n \"\"\"\n arguments = request.get_json(force=True)\n query = arguments.get(\"query\", \"\")\n\n return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})\n\n\nclass QuerySearchResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Search query text, titles, and descriptions.\n\n :qparam string q: Search term\n\n Responds with a list of :ref:`query <query-response-label>` objects.\n \"\"\"\n term = request.args.get('q', '')\n include_drafts = request.args.get('include_drafts') is not None\n\n return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids, include_drafts=include_drafts)]\n\n\nclass QueryRecentResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve up to 20 queries modified in the last 7 days.\n\n Responds with a list of :ref:`query <query-response-label>` objects.\n \"\"\"\n\n if settings.FEATURE_DUMB_RECENTS:\n results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)\n queries = [q.to_dict(with_last_modified_by=False, with_user=False) for q in results]\n else:\n queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)\n recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in queries]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in models.Query.recent(self.current_user.group_ids)]\n\n queries = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n return queries\n\n\nclass QueryListResource(BaseResource):\n @require_permission('create_query')\n def post(self):\n \"\"\"\n Create a new query.\n\n :<json number data_source_id: The ID of the data source this query will run on\n :<json string query: Query text\n :<json string name:\n :<json string description:\n :<json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :<json object options: Query options\n\n .. _query-response-label:\n\n :>json number id: Query ID\n :>json number latest_query_data_id: ID for latest output data from this query\n :>json string name:\n :>json string description:\n :>json string query: Query text\n :>json string query_hash: Hash of query text\n :>json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :>json string api_key: Key for public access to this query's results.\n :>json boolean is_archived: Whether this query is displayed in indexes and search results or not.\n :>json boolean is_draft: Whether this query is a draft or not\n :>json string updated_at: Time of last modification, in ISO format\n :>json string created_at: Time of creation, in ISO format\n :>json number data_source_id: ID of the data source this query will run on\n :>json object options: Query options\n :>json number version: Revision version (for update conflict avoidance)\n :>json number user_id: ID of query creator\n :>json number last_modified_by_id: ID of user who last modified this query\n :>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)\n :>json number runtime: Runtime of last query execution, in seconds (may be null)\n \"\"\"\n query_def = request.get_json(force=True)\n data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)\n require_access(data_source.groups, self.current_user, not_view_only)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n query_def['org'] = self.current_org\n query_def['is_draft'] = True\n query = models.Query.create(**query_def)\n models.db.session.add(query)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'create',\n 'object_id': query.id,\n 'object_type': 'query'\n })\n\n return query.to_dict()\n\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve a list of queries.\n\n :qparam number page_size: Number of queries to return\n :qparam number page: Page number to retrieve\n\n Responds with an array of :ref:`query <query-response-label>` objects.\n \"\"\"\n\n results = models.Query.all_queries(self.current_user.group_ids, self.current_user.id)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass MyQueriesResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve a list of queries created by the current user.\n\n :qparam number page_size: Number of queries to return\n :qparam number page: Page number to retrieve\n\n Responds with an array of :ref:`query <query-response-label>` objects.\n \"\"\"\n drafts = request.args.get('drafts') is not None\n results = models.Query.by_user(self.current_user)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass QueryResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n \"\"\"\n Modify a query.\n\n :param query_id: ID of query to update\n :<json number data_source_id: The ID of the data source this query will run on\n :<json string query: Query text\n :<json string name:\n :<json string description:\n :<json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :<json object options: Query options\n\n Responds with the updated :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n query_def = request.get_json(force=True)\n\n require_object_modify_permission(query, self.current_user)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:\n query_def.pop(field, None)\n\n if 'query' in query_def:\n query_def['query_text'] = query_def.pop('query')\n\n query_def['last_modified_by'] = self.current_user\n query_def['changed_by'] = self.current_user\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in query_def and query_def['version'] != query.version:\n abort(409)\n\n try:\n self.update_model(query, query_def)\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n return query.to_dict(with_visualizations=True)\n\n @require_permission('view_query')\n def get(self, query_id):\n \"\"\"\n Retrieve a query.\n\n :param query_id: ID of query to fetch\n\n Responds with the :ref:`query <query-response-label>` contents.\n \"\"\"\n q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(q.groups, self.current_user, view_only)\n\n result = q.to_dict(with_visualizations=True)\n result['can_edit'] = can_modify(q, self.current_user)\n return result\n\n # TODO: move to resource of its own? (POST /queries/{id}/archive)\n def delete(self, query_id):\n \"\"\"\n Archives a query.\n\n :param query_id: ID of query to archive\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_admin_or_owner(query.user_id)\n query.archive(self.current_user)\n models.db.session.commit()\n\n\nclass QueryForkResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n \"\"\"\n Creates a new query, copying the query text from an existing one.\n\n :param query_id: ID of query to fork\n\n Responds with created :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n forked_query = query.fork(self.current_user)\n models.db.session.commit()\n return forked_query.to_dict(with_visualizations=True)\n\n\nclass QueryRefreshResource(BaseResource):\n def post(self, query_id):\n \"\"\"\n Execute a query, updating the query object with the results.\n\n :param query_id: ID of query to execute\n\n Responds with query task details.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n\n parameter_values = collect_parameters_from_request(request.args)\n\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\n", "path": "redash/handlers/queries.py"}]} | 3,922 | 148 |
gh_patches_debug_8316 | rasdani/github-patches | git_diff | zulip__zulip-21579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document custom expiration times for invitations
In #19680, we added the ability to set custom expiration times for invitations and invite links. This should be documented in the help center at https://zulip.com/help/invite-new-users.
Note that this feature will be further extended in #19681; we might be able to write the documentation so that it doesn't have to be updated when this happens.
</issue>
<code>
[start of zerver/lib/markdown/tabbed_sections.py]
1 import re
2 from typing import Any, Dict, List, Mapping, Optional
3
4 import markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7
8 from zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES
9
10 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$")
11 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$")
12 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|\s*(.+?)\s*\}$")
13
14 CODE_SECTION_TEMPLATE = """
15 <div class="code-section {tab_class}" markdown="1">
16 {nav_bar}
17 <div class="blocks">
18 {blocks}
19 </div>
20 </div>
21 """.strip()
22
23 NAV_BAR_TEMPLATE = """
24 <ul class="nav">
25 {tabs}
26 </ul>
27 """.strip()
28
29 NAV_LIST_ITEM_TEMPLATE = """
30 <li data-language="{data_language}" tabindex="0">{label}</li>
31 """.strip()
32
33 DIV_TAB_CONTENT_TEMPLATE = """
34 <div data-language="{data_language}" markdown="1">
35 {content}
36 </div>
37 """.strip()
38
39 # If adding new entries here, also check if you need to update
40 # tabbed-instructions.js
41 TAB_SECTION_LABELS = {
42 "desktop-web": "Desktop/Web",
43 "ios": "iOS",
44 "android": "Android",
45 "mac": "macOS",
46 "windows": "Windows",
47 "linux": "Linux",
48 "python": "Python",
49 "js": "JavaScript",
50 "curl": "curl",
51 "zulip-send": "zulip-send",
52 "web": "Web",
53 "desktop": "Desktop",
54 "mobile": "Mobile",
55 "mm-default": "Default installation",
56 "mm-docker": "Docker",
57 "mm-gitlab-omnibus": "GitLab Omnibus",
58 "send-email-invitations": "Send email invitations",
59 "share-an-invite-link": "Share an invite link",
60 "require-invitations": "Require invitations",
61 "allow-anyone-to-join": "Allow anyone to join",
62 "restrict-by-email-domain": "Restrict by email domain",
63 "zoom": "Zoom",
64 "jitsi-meet": "Jitsi Meet",
65 "bigbluebutton": "BigBlueButton",
66 "disable": "Disabled",
67 "chrome": "Chrome",
68 "firefox": "Firefox",
69 "desktop-app": "Desktop app",
70 "system-proxy-settings": "System proxy settings",
71 "custom-proxy-settings": "Custom proxy settings",
72 "stream": "From a stream view",
73 "not-stream": "From other views",
74 "via-recent-topics": "Via recent topics",
75 "via-left-sidebar": "Via left sidebar",
76 "instructions-for-all-platforms": "Instructions for all platforms",
77 "public-streams": "Public streams",
78 "private-streams": "Private streams",
79 "web-public-streams": "Web-public streams",
80 "via-user-profile": "Via the user's profile",
81 "via-organization-settings": "Via organization settings",
82 "via-browser-address-bar": "Via browser's address bar",
83 }
84
85
86 class TabbedSectionsGenerator(Extension):
87 def extendMarkdown(self, md: markdown.Markdown) -> None:
88 md.preprocessors.register(
89 TabbedSectionsPreprocessor(md, self.getConfigs()),
90 "tabbed_sections",
91 PREPROCESSOR_PRIORITES["tabbed_sections"],
92 )
93
94
95 class TabbedSectionsPreprocessor(Preprocessor):
96 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
97 super().__init__(md)
98
99 def run(self, lines: List[str]) -> List[str]:
100 tab_section = self.parse_tabs(lines)
101 while tab_section:
102 if "tabs" in tab_section:
103 tab_class = "has-tabs"
104 else:
105 tab_class = "no-tabs"
106 tab_section["tabs"] = [
107 {
108 "tab_name": "instructions-for-all-platforms",
109 "start": tab_section["start_tabs_index"],
110 }
111 ]
112 nav_bar = self.generate_nav_bar(tab_section)
113 content_blocks = self.generate_content_blocks(tab_section, lines)
114 rendered_tabs = CODE_SECTION_TEMPLATE.format(
115 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks
116 )
117
118 start = tab_section["start_tabs_index"]
119 end = tab_section["end_tabs_index"] + 1
120 lines = [*lines[:start], rendered_tabs, *lines[end:]]
121 tab_section = self.parse_tabs(lines)
122 return lines
123
124 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
125 tab_content_blocks = []
126 for index, tab in enumerate(tab_section["tabs"]):
127 start_index = tab["start"] + 1
128 try:
129 # If there are more tabs, we can use the starting index
130 # of the next tab as the ending index of the previous one
131 end_index = tab_section["tabs"][index + 1]["start"]
132 except IndexError:
133 # Otherwise, just use the end of the entire section
134 end_index = tab_section["end_tabs_index"]
135
136 content = "\n".join(lines[start_index:end_index]).strip()
137 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
138 data_language=tab["tab_name"],
139 # Wrapping the content in two newlines is necessary here.
140 # If we don't do this, the inner Markdown does not get
141 # rendered properly.
142 content=f"\n{content}\n",
143 )
144 tab_content_blocks.append(tab_content_block)
145 return "\n".join(tab_content_blocks)
146
147 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
148 li_elements = []
149 for tab in tab_section["tabs"]:
150 tab_name = tab.get("tab_name")
151 tab_label = TAB_SECTION_LABELS.get(tab_name)
152 if tab_label is None:
153 raise ValueError(
154 f"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
155 )
156
157 li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)
158 li_elements.append(li)
159
160 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
161
162 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
163 block: Dict[str, Any] = {}
164 for index, line in enumerate(lines):
165 start_match = START_TABBED_SECTION_REGEX.search(line)
166 if start_match:
167 block["start_tabs_index"] = index
168
169 tab_content_match = TAB_CONTENT_REGEX.search(line)
170 if tab_content_match:
171 block.setdefault("tabs", [])
172 tab = {"start": index, "tab_name": tab_content_match.group(1)}
173 block["tabs"].append(tab)
174
175 end_match = END_TABBED_SECTION_REGEX.search(line)
176 if end_match:
177 block["end_tabs_index"] = index
178 break
179 return block
180
181
182 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
183 return TabbedSectionsGenerator(**kwargs)
184
[end of zerver/lib/markdown/tabbed_sections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -55,8 +55,6 @@
"mm-default": "Default installation",
"mm-docker": "Docker",
"mm-gitlab-omnibus": "GitLab Omnibus",
- "send-email-invitations": "Send email invitations",
- "share-an-invite-link": "Share an invite link",
"require-invitations": "Require invitations",
"allow-anyone-to-join": "Allow anyone to join",
"restrict-by-email-domain": "Restrict by email domain",
| {"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -55,8 +55,6 @@\n \"mm-default\": \"Default installation\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n- \"send-email-invitations\": \"Send email invitations\",\n- \"share-an-invite-link\": \"Share an invite link\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n", "issue": "Document custom expiration times for invitations\nIn #19680, we added the ability to set custom expiration times for invitations and invite links. This should be documented in the help center at https://zulip.com/help/invite-new-users.\r\n\r\nNote that this feature will be further extended in #19681; we might be able to write the documentation so that it doesn't have to be updated when this happens.\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\n\nfrom zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|\\s*(.+?)\\s*\\}$\")\n\nCODE_SECTION_TEMPLATE = \"\"\"\n<div class=\"code-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-language=\"{data_language}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-language=\"{data_language}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"send-email-invitations\": \"Send email invitations\",\n \"share-an-invite-link\": \"Share an invite link\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-topics\": \"Via recent topics\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-profile\": \"Via the user's profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-browser-address-bar\": \"Via browser's address bar\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_name\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab[\"tab_name\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_name = tab.get(\"tab_name\")\n tab_label = TAB_SECTION_LABELS.get(tab_name)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_name\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]} | 2,605 | 162 |
gh_patches_debug_5234 | rasdani/github-patches | git_diff | zulip__zulip-20148 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clear `prefers_web_public_view` cookie when logging in or registering an account
In #19902, we added functionality to remember who had accessed Zulip as a spectator and immediately take them to the spectator experience. We probably want to clear the `prefers_web_public_view` cookie when logging in via `do_login` or registering a new account, since the user likely doesn't want to get the spectator experience on future visits to the site.
</issue>
<code>
[start of zerver/views/home.py]
1 import logging
2 import secrets
3 from typing import List, Optional, Tuple
4
5 from django.conf import settings
6 from django.http import HttpRequest, HttpResponse
7 from django.shortcuts import redirect, render
8 from django.utils.cache import patch_cache_control
9
10 from zerver.context_processors import get_valid_realm_from_request
11 from zerver.decorator import web_public_view, zulip_login_required, zulip_redirect_to_login
12 from zerver.forms import ToSForm
13 from zerver.lib.actions import do_change_tos_version, realm_user_count
14 from zerver.lib.compatibility import is_outdated_desktop_app, is_unsupported_browser
15 from zerver.lib.home import build_page_params_for_home_page_load, get_user_permission_info
16 from zerver.lib.request import RequestNotes
17 from zerver.lib.streams import access_stream_by_name
18 from zerver.lib.subdomains import get_subdomain
19 from zerver.lib.utils import statsd
20 from zerver.models import PreregistrationUser, Realm, Stream, UserProfile
21 from zerver.views.auth import get_safe_redirect_to
22 from zerver.views.portico import hello_view
23
24
25 def need_accept_tos(user_profile: Optional[UserProfile]) -> bool:
26 if user_profile is None:
27 return False
28
29 if settings.TERMS_OF_SERVICE is None: # nocoverage
30 return False
31
32 if settings.TOS_VERSION is None:
33 return False
34
35 return int(settings.TOS_VERSION.split(".")[0]) > user_profile.major_tos_version()
36
37
38 @zulip_login_required
39 def accounts_accept_terms(request: HttpRequest) -> HttpResponse:
40 assert request.user.is_authenticated
41
42 if request.method == "POST":
43 form = ToSForm(request.POST)
44 if form.is_valid():
45 do_change_tos_version(request.user, settings.TOS_VERSION)
46 return redirect(home)
47 else:
48 form = ToSForm()
49
50 email = request.user.delivery_email
51 special_message_template = None
52 if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
53 special_message_template = "zerver/" + settings.FIRST_TIME_TOS_TEMPLATE
54 return render(
55 request,
56 "zerver/accounts_accept_terms.html",
57 context={
58 "form": form,
59 "email": email,
60 "special_message_template": special_message_template,
61 },
62 )
63
64
65 def detect_narrowed_window(
66 request: HttpRequest, user_profile: Optional[UserProfile]
67 ) -> Tuple[List[List[str]], Optional[Stream], Optional[str]]:
68 """This function implements Zulip's support for a mini Zulip window
69 that just handles messages from a single narrow"""
70 if user_profile is None:
71 return [], None, None
72
73 narrow: List[List[str]] = []
74 narrow_stream = None
75 narrow_topic = request.GET.get("topic")
76
77 if request.GET.get("stream"):
78 try:
79 # TODO: We should support stream IDs and PMs here as well.
80 narrow_stream_name = request.GET.get("stream")
81 (narrow_stream, ignored_sub) = access_stream_by_name(user_profile, narrow_stream_name)
82 narrow = [["stream", narrow_stream.name]]
83 except Exception:
84 logging.warning("Invalid narrow requested, ignoring", extra=dict(request=request))
85 if narrow_stream is not None and narrow_topic is not None:
86 narrow.append(["topic", narrow_topic])
87 return narrow, narrow_stream, narrow_topic
88
89
90 def update_last_reminder(user_profile: Optional[UserProfile]) -> None:
91 """Reset our don't-spam-users-with-email counter since the
92 user has since logged in
93 """
94 if user_profile is None:
95 return
96
97 if user_profile.last_reminder is not None: # nocoverage
98 # TODO: Look into the history of last_reminder; we may have
99 # eliminated that as a useful concept for non-bot users.
100 user_profile.last_reminder = None
101 user_profile.save(update_fields=["last_reminder"])
102
103
104 def home(request: HttpRequest) -> HttpResponse:
105 if not settings.ROOT_DOMAIN_LANDING_PAGE:
106 return home_real(request)
107
108 # If settings.ROOT_DOMAIN_LANDING_PAGE, sends the user the landing
109 # page, not the login form, on the root domain
110
111 subdomain = get_subdomain(request)
112 if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
113 return home_real(request)
114
115 return hello_view(request)
116
117
118 @web_public_view
119 def home_real(request: HttpRequest) -> HttpResponse:
120 # Before we do any real work, check if the app is banned.
121 client_user_agent = request.META.get("HTTP_USER_AGENT", "")
122 (insecure_desktop_app, banned_desktop_app, auto_update_broken) = is_outdated_desktop_app(
123 client_user_agent
124 )
125 if banned_desktop_app:
126 return render(
127 request,
128 "zerver/insecure_desktop_app.html",
129 context={
130 "auto_update_broken": auto_update_broken,
131 },
132 )
133 (unsupported_browser, browser_name) = is_unsupported_browser(client_user_agent)
134 if unsupported_browser:
135 return render(
136 request,
137 "zerver/unsupported_browser.html",
138 context={
139 "browser_name": browser_name,
140 },
141 )
142
143 # We need to modify the session object every two weeks or it will expire.
144 # This line makes reloading the page a sufficient action to keep the
145 # session alive.
146 request.session.modified = True
147
148 if request.user.is_authenticated:
149 user_profile = request.user
150 realm = user_profile.realm
151 else:
152 realm = get_valid_realm_from_request(request)
153
154 # TODO: Ideally, we'd open Zulip directly as a spectator if
155 # the URL had clicked a link to content on a web-public
156 # stream. We could maybe do this by parsing `next`, but it's
157 # not super convenient with Zulip's hash-based URL scheme.
158
159 # The "Access without an account" button on the login page
160 # submits a POST to this page with this hidden field set.
161 if request.POST.get("prefers_web_public_view") == "true":
162 request.session["prefers_web_public_view"] = True
163 # We serve a redirect here, rather than serving a page, to
164 # avoid browser "Confirm form resubmission" prompts on reload.
165 redirect_to = get_safe_redirect_to(request.POST.get("next"), realm.uri)
166 return redirect(redirect_to)
167
168 prefers_web_public_view = request.session.get("prefers_web_public_view")
169 if not prefers_web_public_view:
170 # For users who haven't opted into the spectator
171 # experience, we redirect to the login page.
172 return zulip_redirect_to_login(request, settings.HOME_NOT_LOGGED_IN)
173
174 # For users who have selected public access, we load the
175 # spectator experience. We fall through to the shared code
176 # for loading the application, with user_profile=None encoding
177 # that we're a spectator, not a logged-in user.
178 user_profile = None
179
180 update_last_reminder(user_profile)
181
182 statsd.incr("views.home")
183
184 # If a user hasn't signed the current Terms of Service, send them there
185 if need_accept_tos(user_profile):
186 return accounts_accept_terms(request)
187
188 narrow, narrow_stream, narrow_topic = detect_narrowed_window(request, user_profile)
189
190 if user_profile is not None:
191 first_in_realm = realm_user_count(user_profile.realm) == 1
192 # If you are the only person in the realm and you didn't invite
193 # anyone, we'll continue to encourage you to do so on the frontend.
194 prompt_for_invites = (
195 first_in_realm
196 and not PreregistrationUser.objects.filter(referred_by=user_profile).count()
197 )
198 needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING
199
200 else:
201 first_in_realm = False
202 prompt_for_invites = False
203 # The current tutorial doesn't super make sense for logged-out users.
204 needs_tutorial = False
205
206 queue_id, page_params = build_page_params_for_home_page_load(
207 request=request,
208 user_profile=user_profile,
209 realm=realm,
210 insecure_desktop_app=insecure_desktop_app,
211 narrow=narrow,
212 narrow_stream=narrow_stream,
213 narrow_topic=narrow_topic,
214 first_in_realm=first_in_realm,
215 prompt_for_invites=prompt_for_invites,
216 needs_tutorial=needs_tutorial,
217 )
218
219 log_data = RequestNotes.get_notes(request).log_data
220 assert log_data is not None
221 log_data["extra"] = f"[{queue_id}]"
222
223 csp_nonce = secrets.token_hex(24)
224
225 user_permission_info = get_user_permission_info(user_profile)
226
227 response = render(
228 request,
229 "zerver/app/index.html",
230 context={
231 "user_profile": user_profile,
232 "page_params": page_params,
233 "csp_nonce": csp_nonce,
234 "color_scheme": user_permission_info.color_scheme,
235 },
236 )
237 patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
238 return response
239
240
241 @zulip_login_required
242 def desktop_home(request: HttpRequest) -> HttpResponse:
243 return redirect(home)
244
[end of zerver/views/home.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/views/home.py b/zerver/views/home.py
--- a/zerver/views/home.py
+++ b/zerver/views/home.py
@@ -148,6 +148,10 @@
if request.user.is_authenticated:
user_profile = request.user
realm = user_profile.realm
+
+ # User is logged in and hence no longer `prefers_web_public_view`.
+ if "prefers_web_public_view" in request.session.keys():
+ del request.session["prefers_web_public_view"]
else:
realm = get_valid_realm_from_request(request)
| {"golden_diff": "diff --git a/zerver/views/home.py b/zerver/views/home.py\n--- a/zerver/views/home.py\n+++ b/zerver/views/home.py\n@@ -148,6 +148,10 @@\n if request.user.is_authenticated:\n user_profile = request.user\n realm = user_profile.realm\n+\n+ # User is logged in and hence no longer `prefers_web_public_view`.\n+ if \"prefers_web_public_view\" in request.session.keys():\n+ del request.session[\"prefers_web_public_view\"]\n else:\n realm = get_valid_realm_from_request(request)\n", "issue": "Clear `prefers_web_public_view` cookie when logging in or registering an account\nIn #19902, we added functionality to remember who had accessed Zulip as a spectator and immediately take them to the spectator experience. We probably want to clear the `prefers_web_public_view` cookie when logging in via `do_login` or registering a new account, since the user likely doesn't want to get the spectator experience on future visits to the site.\n", "before_files": [{"content": "import logging\nimport secrets\nfrom typing import List, Optional, Tuple\n\nfrom django.conf import settings\nfrom django.http import HttpRequest, HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.utils.cache import patch_cache_control\n\nfrom zerver.context_processors import get_valid_realm_from_request\nfrom zerver.decorator import web_public_view, zulip_login_required, zulip_redirect_to_login\nfrom zerver.forms import ToSForm\nfrom zerver.lib.actions import do_change_tos_version, realm_user_count\nfrom zerver.lib.compatibility import is_outdated_desktop_app, is_unsupported_browser\nfrom zerver.lib.home import build_page_params_for_home_page_load, get_user_permission_info\nfrom zerver.lib.request import RequestNotes\nfrom zerver.lib.streams import access_stream_by_name\nfrom zerver.lib.subdomains import get_subdomain\nfrom zerver.lib.utils import statsd\nfrom zerver.models import PreregistrationUser, Realm, Stream, UserProfile\nfrom zerver.views.auth import get_safe_redirect_to\nfrom zerver.views.portico import hello_view\n\n\ndef need_accept_tos(user_profile: Optional[UserProfile]) -> bool:\n if user_profile is None:\n return False\n\n if settings.TERMS_OF_SERVICE is None: # nocoverage\n return False\n\n if settings.TOS_VERSION is None:\n return False\n\n return int(settings.TOS_VERSION.split(\".\")[0]) > user_profile.major_tos_version()\n\n\n@zulip_login_required\ndef accounts_accept_terms(request: HttpRequest) -> HttpResponse:\n assert request.user.is_authenticated\n\n if request.method == \"POST\":\n form = ToSForm(request.POST)\n if form.is_valid():\n do_change_tos_version(request.user, settings.TOS_VERSION)\n return redirect(home)\n else:\n form = ToSForm()\n\n email = request.user.delivery_email\n special_message_template = None\n if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:\n special_message_template = \"zerver/\" + settings.FIRST_TIME_TOS_TEMPLATE\n return render(\n request,\n \"zerver/accounts_accept_terms.html\",\n context={\n \"form\": form,\n \"email\": email,\n \"special_message_template\": special_message_template,\n },\n )\n\n\ndef detect_narrowed_window(\n request: HttpRequest, user_profile: Optional[UserProfile]\n) -> Tuple[List[List[str]], Optional[Stream], Optional[str]]:\n \"\"\"This function implements Zulip's support for a mini Zulip window\n that just handles messages from a single narrow\"\"\"\n if user_profile is None:\n return [], None, None\n\n narrow: List[List[str]] = []\n narrow_stream = None\n narrow_topic = request.GET.get(\"topic\")\n\n if request.GET.get(\"stream\"):\n try:\n # TODO: We should support stream IDs and PMs here as well.\n narrow_stream_name = request.GET.get(\"stream\")\n (narrow_stream, ignored_sub) = access_stream_by_name(user_profile, narrow_stream_name)\n narrow = [[\"stream\", narrow_stream.name]]\n except Exception:\n logging.warning(\"Invalid narrow requested, ignoring\", extra=dict(request=request))\n if narrow_stream is not None and narrow_topic is not None:\n narrow.append([\"topic\", narrow_topic])\n return narrow, narrow_stream, narrow_topic\n\n\ndef update_last_reminder(user_profile: Optional[UserProfile]) -> None:\n \"\"\"Reset our don't-spam-users-with-email counter since the\n user has since logged in\n \"\"\"\n if user_profile is None:\n return\n\n if user_profile.last_reminder is not None: # nocoverage\n # TODO: Look into the history of last_reminder; we may have\n # eliminated that as a useful concept for non-bot users.\n user_profile.last_reminder = None\n user_profile.save(update_fields=[\"last_reminder\"])\n\n\ndef home(request: HttpRequest) -> HttpResponse:\n if not settings.ROOT_DOMAIN_LANDING_PAGE:\n return home_real(request)\n\n # If settings.ROOT_DOMAIN_LANDING_PAGE, sends the user the landing\n # page, not the login form, on the root domain\n\n subdomain = get_subdomain(request)\n if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:\n return home_real(request)\n\n return hello_view(request)\n\n\n@web_public_view\ndef home_real(request: HttpRequest) -> HttpResponse:\n # Before we do any real work, check if the app is banned.\n client_user_agent = request.META.get(\"HTTP_USER_AGENT\", \"\")\n (insecure_desktop_app, banned_desktop_app, auto_update_broken) = is_outdated_desktop_app(\n client_user_agent\n )\n if banned_desktop_app:\n return render(\n request,\n \"zerver/insecure_desktop_app.html\",\n context={\n \"auto_update_broken\": auto_update_broken,\n },\n )\n (unsupported_browser, browser_name) = is_unsupported_browser(client_user_agent)\n if unsupported_browser:\n return render(\n request,\n \"zerver/unsupported_browser.html\",\n context={\n \"browser_name\": browser_name,\n },\n )\n\n # We need to modify the session object every two weeks or it will expire.\n # This line makes reloading the page a sufficient action to keep the\n # session alive.\n request.session.modified = True\n\n if request.user.is_authenticated:\n user_profile = request.user\n realm = user_profile.realm\n else:\n realm = get_valid_realm_from_request(request)\n\n # TODO: Ideally, we'd open Zulip directly as a spectator if\n # the URL had clicked a link to content on a web-public\n # stream. We could maybe do this by parsing `next`, but it's\n # not super convenient with Zulip's hash-based URL scheme.\n\n # The \"Access without an account\" button on the login page\n # submits a POST to this page with this hidden field set.\n if request.POST.get(\"prefers_web_public_view\") == \"true\":\n request.session[\"prefers_web_public_view\"] = True\n # We serve a redirect here, rather than serving a page, to\n # avoid browser \"Confirm form resubmission\" prompts on reload.\n redirect_to = get_safe_redirect_to(request.POST.get(\"next\"), realm.uri)\n return redirect(redirect_to)\n\n prefers_web_public_view = request.session.get(\"prefers_web_public_view\")\n if not prefers_web_public_view:\n # For users who haven't opted into the spectator\n # experience, we redirect to the login page.\n return zulip_redirect_to_login(request, settings.HOME_NOT_LOGGED_IN)\n\n # For users who have selected public access, we load the\n # spectator experience. We fall through to the shared code\n # for loading the application, with user_profile=None encoding\n # that we're a spectator, not a logged-in user.\n user_profile = None\n\n update_last_reminder(user_profile)\n\n statsd.incr(\"views.home\")\n\n # If a user hasn't signed the current Terms of Service, send them there\n if need_accept_tos(user_profile):\n return accounts_accept_terms(request)\n\n narrow, narrow_stream, narrow_topic = detect_narrowed_window(request, user_profile)\n\n if user_profile is not None:\n first_in_realm = realm_user_count(user_profile.realm) == 1\n # If you are the only person in the realm and you didn't invite\n # anyone, we'll continue to encourage you to do so on the frontend.\n prompt_for_invites = (\n first_in_realm\n and not PreregistrationUser.objects.filter(referred_by=user_profile).count()\n )\n needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING\n\n else:\n first_in_realm = False\n prompt_for_invites = False\n # The current tutorial doesn't super make sense for logged-out users.\n needs_tutorial = False\n\n queue_id, page_params = build_page_params_for_home_page_load(\n request=request,\n user_profile=user_profile,\n realm=realm,\n insecure_desktop_app=insecure_desktop_app,\n narrow=narrow,\n narrow_stream=narrow_stream,\n narrow_topic=narrow_topic,\n first_in_realm=first_in_realm,\n prompt_for_invites=prompt_for_invites,\n needs_tutorial=needs_tutorial,\n )\n\n log_data = RequestNotes.get_notes(request).log_data\n assert log_data is not None\n log_data[\"extra\"] = f\"[{queue_id}]\"\n\n csp_nonce = secrets.token_hex(24)\n\n user_permission_info = get_user_permission_info(user_profile)\n\n response = render(\n request,\n \"zerver/app/index.html\",\n context={\n \"user_profile\": user_profile,\n \"page_params\": page_params,\n \"csp_nonce\": csp_nonce,\n \"color_scheme\": user_permission_info.color_scheme,\n },\n )\n patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)\n return response\n\n\n@zulip_login_required\ndef desktop_home(request: HttpRequest) -> HttpResponse:\n return redirect(home)\n", "path": "zerver/views/home.py"}]} | 3,243 | 128 |
gh_patches_debug_40513 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2866 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 2769 status guide
**URL:** dashboard
**user:** initiator
**expected behaviour:** space between line and dots of the status guide is always the same
**behaviour:** if wording is in two lines, the space between line and dot gets bigger
**important screensize:** depending on screensize, e.g. 1020x723
**device & browser:** all browsers
**Comment/Question:**
Screenshot
<img width="203" alt="Bildschirmfoto 2020-03-29 um 21 45 11" src="https://user-images.githubusercontent.com/35491681/77876518-7b9d9b80-7253-11ea-8d7c-2b0fc1681e02.png">
</issue>
<code>
[start of meinberlin/apps/bplan/views.py]
1 from django.utils.translation import ugettext_lazy as _
2 from django.views import generic
3 from django.views.generic import TemplateView
4
5 from adhocracy4.dashboard.blueprints import ProjectBlueprint
6 from adhocracy4.dashboard.components.forms.views import \
7 ProjectComponentFormView
8 from adhocracy4.dashboard.mixins import DashboardBaseMixin
9 from meinberlin.apps.bplan import phases as bplan_phases
10 from meinberlin.apps.extprojects.views import ExternalProjectCreateView
11
12 from . import forms
13 from . import models
14
15
16 class BplanStatementSentView(TemplateView):
17 template_name = 'meinberlin_bplan/statement_sent.html'
18
19
20 class BplanFinishedView(TemplateView):
21 template_name = 'meinberlin_bplan/bplan_finished.html'
22
23
24 class BplanProjectCreateView(ExternalProjectCreateView):
25
26 model = models.Bplan
27 slug_url_kwarg = 'project_slug'
28 form_class = forms.BplanProjectCreateForm
29 template_name = \
30 'meinberlin_bplan/bplan_create_dashboard.html'
31 success_message = _('Development plan successfully created.')
32
33 blueprint = ProjectBlueprint(
34 title=_('Development Plan'),
35 description=_('Create a statement formular for development plans'
36 ' to be embedded on external sites.'),
37 content=[
38 bplan_phases.StatementPhase(),
39 ],
40 image='',
41 settings_model=None,
42 )
43
44
45 class BplanProjectUpdateView(ProjectComponentFormView):
46
47 model = models.Bplan
48
49 @property
50 def project(self):
51 project = super().project
52 return project.externalproject.bplan
53
54 def get_object(self, queryset=None):
55 return self.project
56
57
58 class BplanProjectListView(DashboardBaseMixin,
59 generic.ListView):
60 model = models.Bplan
61 paginate_by = 12
62 template_name = 'meinberlin_bplan/bplan_list_dashboard.html'
63 permission_required = 'a4projects.add_project'
64 menu_item = 'project'
65
66 def get_queryset(self):
67 return super().get_queryset().filter(
68 organisation=self.organisation
69 )
70
71 def get_permission_object(self):
72 return self.organisation
73
[end of meinberlin/apps/bplan/views.py]
[start of meinberlin/apps/dashboard/views.py]
1 import json
2 from urllib import parse
3
4 from django.apps import apps
5 from django.contrib import messages
6 from django.contrib.messages.views import SuccessMessageMixin
7 from django.http import HttpResponseRedirect
8 from django.urls import resolve
9 from django.urls import reverse
10 from django.utils.translation import ugettext_lazy as _
11 from django.views import generic
12 from django.views.generic.detail import SingleObjectMixin
13
14 from adhocracy4.dashboard import mixins
15 from adhocracy4.dashboard import signals
16 from adhocracy4.dashboard import views as a4dashboard_views
17 from adhocracy4.dashboard.blueprints import get_blueprints
18 from adhocracy4.modules import models as module_models
19 from adhocracy4.phases import models as phase_models
20 from adhocracy4.projects import models as project_models
21 from adhocracy4.projects.mixins import ProjectMixin
22 from meinberlin.apps.dashboard.forms import DashboardProjectCreateForm
23
24
25 class ModuleBlueprintListView(ProjectMixin,
26 mixins.DashboardBaseMixin,
27 mixins.BlueprintMixin,
28 generic.DetailView):
29 template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'
30 permission_required = 'a4projects.change_project'
31 model = project_models.Project
32 slug_url_kwarg = 'project_slug'
33 menu_item = 'project'
34
35 @property
36 def blueprints(self):
37 return get_blueprints()
38
39 def get_permission_object(self):
40 return self.project
41
42
43 class ModuleCreateView(ProjectMixin,
44 mixins.DashboardBaseMixin,
45 mixins.BlueprintMixin,
46 SingleObjectMixin,
47 generic.View):
48 permission_required = 'a4projects.change_project'
49 model = project_models.Project
50 slug_url_kwarg = 'project_slug'
51
52 def post(self, request, *args, **kwargs):
53 project = self.get_object()
54 weight = 1
55 if project.modules:
56 weight = max(
57 project.modules.values_list('weight', flat=True)
58 ) + 1
59 module = module_models.Module(
60 name=self.blueprint.title,
61 weight=weight,
62 project=project,
63 is_draft=True,
64 )
65 module.save()
66 signals.module_created.send(sender=None,
67 module=module,
68 user=self.request.user)
69
70 self._create_module_settings(module)
71 self._create_phases(module, self.blueprint.content)
72
73 cookie = request.COOKIES.get('dashboard_projects_closed_accordeons',
74 '[]')
75 ids = json.loads(parse.unquote(cookie))
76 if self.project.id not in ids:
77 ids.append(self.project.id)
78
79 cookie = parse.quote(json.dumps(ids))
80
81 response = HttpResponseRedirect(self.get_next(module))
82 response.set_cookie('dashboard_projects_closed_accordeons', cookie)
83 return response
84
85 def _create_module_settings(self, module):
86 if self.blueprint.settings_model:
87 settings_model = apps.get_model(*self.blueprint.settings_model)
88 module_settings = settings_model(module=module)
89 module_settings.save()
90
91 def _create_phases(self, module, blueprint_phases):
92 for index, phase_content in enumerate(blueprint_phases):
93 phase = phase_models.Phase(
94 type=phase_content.identifier,
95 name=phase_content.name,
96 description=phase_content.description,
97 weight=index,
98 module=module,
99 )
100 phase.save()
101
102 def get_next(self, module):
103 return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={
104 'module_slug': module.slug
105 })
106
107 def get_permission_object(self):
108 return self.project
109
110
111 class ModulePublishView(SingleObjectMixin,
112 generic.View):
113 permission_required = 'a4projects.change_project'
114 model = module_models.Module
115 slug_url_kwarg = 'module_slug'
116
117 def get_permission_object(self):
118 return self.get_object().project
119
120 def post(self, request, *args, **kwargs):
121 action = request.POST.get('action', None)
122 if action == 'publish':
123 self.publish_module()
124 elif action == 'unpublish':
125 self.unpublish_module()
126 else:
127 messages.warning(self.request, _('Invalid action'))
128
129 return HttpResponseRedirect(self.get_next())
130
131 def get_next(self):
132 if 'referrer' in self.request.POST:
133 return self.request.POST['referrer']
134 elif 'HTTP_REFERER' in self.request.META:
135 return self.request.META['HTTP_REFERER']
136
137 return reverse('a4dashboard:project-edit', kwargs={
138 'project_slug': self.project.slug
139 })
140
141 def publish_module(self):
142 module = self.get_object()
143 if not module.is_draft:
144 messages.info(self.request, _('Module is already added'))
145 return
146
147 module.is_draft = False
148 module.save()
149
150 signals.module_published.send(sender=None,
151 module=module,
152 user=self.request.user)
153
154 messages.success(self.request,
155 _('Module successfully added.'))
156
157 def unpublish_module(self):
158 module = self.get_object()
159 if module.is_draft:
160 messages.info(self.request, _('Module is already removed'))
161 return
162
163 module.is_draft = True
164 module.save()
165
166 signals.module_unpublished.send(sender=None,
167 module=module,
168 user=self.request.user)
169
170 messages.success(self.request,
171 _('Module successfully removed.'))
172
173
174 class ModuleDeleteView(generic.DeleteView):
175 permission_required = 'a4projects.change_project'
176 model = module_models.Module
177 success_message = _('The module has been deleted')
178
179 def delete(self, request, *args, **kwargs):
180 messages.success(self.request, self.success_message)
181 return super().delete(request, *args, **kwargs)
182
183 def get_permission_object(self):
184 return self.get_object().project
185
186 def get_success_url(self):
187 referrer = self.request.POST.get('referrer', None) \
188 or self.request.META.get('HTTP_REFERER', None)
189 if referrer:
190 view, args, kwargs = resolve(referrer)
191 if 'module_slug' not in kwargs \
192 or not kwargs['module_slug'] == self.get_object().slug:
193 return referrer
194
195 return reverse('a4dashboard:project-edit', kwargs={
196 'project_slug': self.get_object().project.slug
197 })
198
199
200 class DashboardProjectListView(a4dashboard_views.ProjectListView):
201 def get_queryset(self):
202 return super().get_queryset().filter(
203 projectcontainer=None,
204 externalproject=None
205 )
206
207
208 class ProjectCreateView(mixins.DashboardBaseMixin,
209 SuccessMessageMixin,
210 generic.CreateView):
211 model = project_models.Project
212 slug_url_kwarg = 'project_slug'
213 form_class = DashboardProjectCreateForm
214 template_name = 'a4dashboard/project_create_form.html'
215 permission_required = 'a4projects.add_project'
216 menu_item = 'project'
217 success_message = _('Project successfully created.')
218
219 def get_permission_object(self):
220 return self.organisation
221
222 def get_form_kwargs(self):
223 kwargs = super().get_form_kwargs()
224 kwargs['organisation'] = self.organisation
225 kwargs['creator'] = self.request.user
226 return kwargs
227
228 def get_success_url(self):
229 return reverse('a4dashboard:project-edit',
230 kwargs={'project_slug': self.object.slug})
231
232 def form_valid(self, form):
233 response = super().form_valid(form)
234 signals.project_created.send(sender=None,
235 project=self.object,
236 user=self.request.user)
237
238 return response
239
[end of meinberlin/apps/dashboard/views.py]
[start of meinberlin/apps/extprojects/views.py]
1 from django.utils.translation import ugettext_lazy as _
2 from django.views import generic
3
4 from adhocracy4.dashboard.blueprints import ProjectBlueprint
5 from adhocracy4.dashboard.components.forms.views import \
6 ProjectComponentFormView
7 from adhocracy4.dashboard.mixins import DashboardBaseMixin
8 from adhocracy4.dashboard.views import ProjectCreateView
9 from meinberlin.apps.extprojects import phases as extprojects_phases
10
11 from . import apps
12 from . import forms
13 from . import models
14
15
16 class ExternalProjectCreateView(ProjectCreateView):
17
18 model = models.ExternalProject
19 slug_url_kwarg = 'project_slug'
20 form_class = forms.ExternalProjectCreateForm
21 template_name = \
22 'meinberlin_extprojects/external_project_create_dashboard.html'
23 success_message = _('External project successfully created.')
24
25 blueprint = ProjectBlueprint(
26 title=_('Linkage'),
27 description=_(
28 'Linkages are handled on a different platform.'
29 ),
30 content=[
31 extprojects_phases.ExternalPhase(),
32 ],
33 image='',
34 settings_model=None,
35 )
36
37
38 class ExternalProjectUpdateView(ProjectComponentFormView):
39
40 model = models.ExternalProject
41
42 @property
43 def project(self):
44 project = super().project
45 return project.externalproject
46
47 def get_object(self, queryset=None):
48 return self.project
49
50
51 class ExternalProjectListView(DashboardBaseMixin,
52 generic.ListView):
53 model = models.ExternalProject
54 paginate_by = 12
55 template_name = 'meinberlin_extprojects/extproject_list_dashboard.html'
56 permission_required = 'a4projects.add_project'
57 menu_item = 'project'
58
59 def get_queryset(self):
60 project_type = '{}.{}'.format(
61 apps.Config.label,
62 'ExternalProject'
63 )
64 return super().get_queryset().filter(
65 organisation=self.organisation,
66 project_type=project_type
67 )
68
69 def get_permission_object(self):
70 return self.organisation
71
[end of meinberlin/apps/extprojects/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/bplan/views.py b/meinberlin/apps/bplan/views.py
--- a/meinberlin/apps/bplan/views.py
+++ b/meinberlin/apps/bplan/views.py
@@ -28,7 +28,7 @@
form_class = forms.BplanProjectCreateForm
template_name = \
'meinberlin_bplan/bplan_create_dashboard.html'
- success_message = _('Development plan successfully created.')
+ success_message = _('Development plan was created.')
blueprint = ProjectBlueprint(
title=_('Development Plan'),
diff --git a/meinberlin/apps/dashboard/views.py b/meinberlin/apps/dashboard/views.py
--- a/meinberlin/apps/dashboard/views.py
+++ b/meinberlin/apps/dashboard/views.py
@@ -48,6 +48,7 @@
permission_required = 'a4projects.change_project'
model = project_models.Project
slug_url_kwarg = 'project_slug'
+ success_message = _('The module was created')
def post(self, request, *args, **kwargs):
project = self.get_object()
@@ -69,6 +70,7 @@
self._create_module_settings(module)
self._create_phases(module, self.blueprint.content)
+ messages.success(self.request, self.success_message)
cookie = request.COOKIES.get('dashboard_projects_closed_accordeons',
'[]')
@@ -152,7 +154,7 @@
user=self.request.user)
messages.success(self.request,
- _('Module successfully added.'))
+ _('The module is displayed in the project.'))
def unpublish_module(self):
module = self.get_object()
@@ -168,7 +170,8 @@
user=self.request.user)
messages.success(self.request,
- _('Module successfully removed.'))
+ _('The module is no longer displayed in the project.'
+ ))
class ModuleDeleteView(generic.DeleteView):
@@ -214,7 +217,7 @@
template_name = 'a4dashboard/project_create_form.html'
permission_required = 'a4projects.add_project'
menu_item = 'project'
- success_message = _('Project successfully created.')
+ success_message = _('Project was created.')
def get_permission_object(self):
return self.organisation
diff --git a/meinberlin/apps/extprojects/views.py b/meinberlin/apps/extprojects/views.py
--- a/meinberlin/apps/extprojects/views.py
+++ b/meinberlin/apps/extprojects/views.py
@@ -20,7 +20,7 @@
form_class = forms.ExternalProjectCreateForm
template_name = \
'meinberlin_extprojects/external_project_create_dashboard.html'
- success_message = _('External project successfully created.')
+ success_message = _('External project was created.')
blueprint = ProjectBlueprint(
title=_('Linkage'),
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/views.py b/meinberlin/apps/bplan/views.py\n--- a/meinberlin/apps/bplan/views.py\n+++ b/meinberlin/apps/bplan/views.py\n@@ -28,7 +28,7 @@\n form_class = forms.BplanProjectCreateForm\n template_name = \\\n 'meinberlin_bplan/bplan_create_dashboard.html'\n- success_message = _('Development plan successfully created.')\n+ success_message = _('Development plan was created.')\n \n blueprint = ProjectBlueprint(\n title=_('Development Plan'),\ndiff --git a/meinberlin/apps/dashboard/views.py b/meinberlin/apps/dashboard/views.py\n--- a/meinberlin/apps/dashboard/views.py\n+++ b/meinberlin/apps/dashboard/views.py\n@@ -48,6 +48,7 @@\n permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n+ success_message = _('The module was created')\n \n def post(self, request, *args, **kwargs):\n project = self.get_object()\n@@ -69,6 +70,7 @@\n \n self._create_module_settings(module)\n self._create_phases(module, self.blueprint.content)\n+ messages.success(self.request, self.success_message)\n \n cookie = request.COOKIES.get('dashboard_projects_closed_accordeons',\n '[]')\n@@ -152,7 +154,7 @@\n user=self.request.user)\n \n messages.success(self.request,\n- _('Module successfully added.'))\n+ _('The module is displayed in the project.'))\n \n def unpublish_module(self):\n module = self.get_object()\n@@ -168,7 +170,8 @@\n user=self.request.user)\n \n messages.success(self.request,\n- _('Module successfully removed.'))\n+ _('The module is no longer displayed in the project.'\n+ ))\n \n \n class ModuleDeleteView(generic.DeleteView):\n@@ -214,7 +217,7 @@\n template_name = 'a4dashboard/project_create_form.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n- success_message = _('Project successfully created.')\n+ success_message = _('Project was created.')\n \n def get_permission_object(self):\n return self.organisation\ndiff --git a/meinberlin/apps/extprojects/views.py b/meinberlin/apps/extprojects/views.py\n--- a/meinberlin/apps/extprojects/views.py\n+++ b/meinberlin/apps/extprojects/views.py\n@@ -20,7 +20,7 @@\n form_class = forms.ExternalProjectCreateForm\n template_name = \\\n 'meinberlin_extprojects/external_project_create_dashboard.html'\n- success_message = _('External project successfully created.')\n+ success_message = _('External project was created.')\n \n blueprint = ProjectBlueprint(\n title=_('Linkage'),\n", "issue": "testing 2769 status guide\n**URL:** dashboard\r\n**user:** initiator\r\n**expected behaviour:** space between line and dots of the status guide is always the same\r\n**behaviour:** if wording is in two lines, the space between line and dot gets bigger\r\n**important screensize:** depending on screensize, e.g. 1020x723\r\n**device & browser:** all browsers\r\n**Comment/Question:** \r\n\r\nScreenshot\r\n<img width=\"203\" alt=\"Bildschirmfoto 2020-03-29 um 21 45 11\" src=\"https://user-images.githubusercontent.com/35491681/77876518-7b9d9b80-7253-11ea-8d7c-2b0fc1681e02.png\">\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import TemplateView\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom adhocracy4.dashboard.components.forms.views import \\\n ProjectComponentFormView\nfrom adhocracy4.dashboard.mixins import DashboardBaseMixin\nfrom meinberlin.apps.bplan import phases as bplan_phases\nfrom meinberlin.apps.extprojects.views import ExternalProjectCreateView\n\nfrom . import forms\nfrom . import models\n\n\nclass BplanStatementSentView(TemplateView):\n template_name = 'meinberlin_bplan/statement_sent.html'\n\n\nclass BplanFinishedView(TemplateView):\n template_name = 'meinberlin_bplan/bplan_finished.html'\n\n\nclass BplanProjectCreateView(ExternalProjectCreateView):\n\n model = models.Bplan\n slug_url_kwarg = 'project_slug'\n form_class = forms.BplanProjectCreateForm\n template_name = \\\n 'meinberlin_bplan/bplan_create_dashboard.html'\n success_message = _('Development plan successfully created.')\n\n blueprint = ProjectBlueprint(\n title=_('Development Plan'),\n description=_('Create a statement formular for development plans'\n ' to be embedded on external sites.'),\n content=[\n bplan_phases.StatementPhase(),\n ],\n image='',\n settings_model=None,\n )\n\n\nclass BplanProjectUpdateView(ProjectComponentFormView):\n\n model = models.Bplan\n\n @property\n def project(self):\n project = super().project\n return project.externalproject.bplan\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass BplanProjectListView(DashboardBaseMixin,\n generic.ListView):\n model = models.Bplan\n paginate_by = 12\n template_name = 'meinberlin_bplan/bplan_list_dashboard.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n\n def get_queryset(self):\n return super().get_queryset().filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n", "path": "meinberlin/apps/bplan/views.py"}, {"content": "import json\nfrom urllib import parse\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import HttpResponseRedirect\nfrom django.urls import resolve\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic.detail import SingleObjectMixin\n\nfrom adhocracy4.dashboard import mixins\nfrom adhocracy4.dashboard import signals\nfrom adhocracy4.dashboard import views as a4dashboard_views\nfrom adhocracy4.dashboard.blueprints import get_blueprints\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.phases import models as phase_models\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom meinberlin.apps.dashboard.forms import DashboardProjectCreateForm\n\n\nclass ModuleBlueprintListView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n generic.DetailView):\n template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'\n permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n menu_item = 'project'\n\n @property\n def blueprints(self):\n return get_blueprints()\n\n def get_permission_object(self):\n return self.project\n\n\nclass ModuleCreateView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n\n def post(self, request, *args, **kwargs):\n project = self.get_object()\n weight = 1\n if project.modules:\n weight = max(\n project.modules.values_list('weight', flat=True)\n ) + 1\n module = module_models.Module(\n name=self.blueprint.title,\n weight=weight,\n project=project,\n is_draft=True,\n )\n module.save()\n signals.module_created.send(sender=None,\n module=module,\n user=self.request.user)\n\n self._create_module_settings(module)\n self._create_phases(module, self.blueprint.content)\n\n cookie = request.COOKIES.get('dashboard_projects_closed_accordeons',\n '[]')\n ids = json.loads(parse.unquote(cookie))\n if self.project.id not in ids:\n ids.append(self.project.id)\n\n cookie = parse.quote(json.dumps(ids))\n\n response = HttpResponseRedirect(self.get_next(module))\n response.set_cookie('dashboard_projects_closed_accordeons', cookie)\n return response\n\n def _create_module_settings(self, module):\n if self.blueprint.settings_model:\n settings_model = apps.get_model(*self.blueprint.settings_model)\n module_settings = settings_model(module=module)\n module_settings.save()\n\n def _create_phases(self, module, blueprint_phases):\n for index, phase_content in enumerate(blueprint_phases):\n phase = phase_models.Phase(\n type=phase_content.identifier,\n name=phase_content.name,\n description=phase_content.description,\n weight=index,\n module=module,\n )\n phase.save()\n\n def get_next(self, module):\n return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={\n 'module_slug': module.slug\n })\n\n def get_permission_object(self):\n return self.project\n\n\nclass ModulePublishView(SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n slug_url_kwarg = 'module_slug'\n\n def get_permission_object(self):\n return self.get_object().project\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get('action', None)\n if action == 'publish':\n self.publish_module()\n elif action == 'unpublish':\n self.unpublish_module()\n else:\n messages.warning(self.request, _('Invalid action'))\n\n return HttpResponseRedirect(self.get_next())\n\n def get_next(self):\n if 'referrer' in self.request.POST:\n return self.request.POST['referrer']\n elif 'HTTP_REFERER' in self.request.META:\n return self.request.META['HTTP_REFERER']\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.project.slug\n })\n\n def publish_module(self):\n module = self.get_object()\n if not module.is_draft:\n messages.info(self.request, _('Module is already added'))\n return\n\n module.is_draft = False\n module.save()\n\n signals.module_published.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully added.'))\n\n def unpublish_module(self):\n module = self.get_object()\n if module.is_draft:\n messages.info(self.request, _('Module is already removed'))\n return\n\n module.is_draft = True\n module.save()\n\n signals.module_unpublished.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully removed.'))\n\n\nclass ModuleDeleteView(generic.DeleteView):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n success_message = _('The module has been deleted')\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_permission_object(self):\n return self.get_object().project\n\n def get_success_url(self):\n referrer = self.request.POST.get('referrer', None) \\\n or self.request.META.get('HTTP_REFERER', None)\n if referrer:\n view, args, kwargs = resolve(referrer)\n if 'module_slug' not in kwargs \\\n or not kwargs['module_slug'] == self.get_object().slug:\n return referrer\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.get_object().project.slug\n })\n\n\nclass DashboardProjectListView(a4dashboard_views.ProjectListView):\n def get_queryset(self):\n return super().get_queryset().filter(\n projectcontainer=None,\n externalproject=None\n )\n\n\nclass ProjectCreateView(mixins.DashboardBaseMixin,\n SuccessMessageMixin,\n generic.CreateView):\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n form_class = DashboardProjectCreateForm\n template_name = 'a4dashboard/project_create_form.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n success_message = _('Project successfully created.')\n\n def get_permission_object(self):\n return self.organisation\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['organisation'] = self.organisation\n kwargs['creator'] = self.request.user\n return kwargs\n\n def get_success_url(self):\n return reverse('a4dashboard:project-edit',\n kwargs={'project_slug': self.object.slug})\n\n def form_valid(self, form):\n response = super().form_valid(form)\n signals.project_created.send(sender=None,\n project=self.object,\n user=self.request.user)\n\n return response\n", "path": "meinberlin/apps/dashboard/views.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom adhocracy4.dashboard.components.forms.views import \\\n ProjectComponentFormView\nfrom adhocracy4.dashboard.mixins import DashboardBaseMixin\nfrom adhocracy4.dashboard.views import ProjectCreateView\nfrom meinberlin.apps.extprojects import phases as extprojects_phases\n\nfrom . import apps\nfrom . import forms\nfrom . import models\n\n\nclass ExternalProjectCreateView(ProjectCreateView):\n\n model = models.ExternalProject\n slug_url_kwarg = 'project_slug'\n form_class = forms.ExternalProjectCreateForm\n template_name = \\\n 'meinberlin_extprojects/external_project_create_dashboard.html'\n success_message = _('External project successfully created.')\n\n blueprint = ProjectBlueprint(\n title=_('Linkage'),\n description=_(\n 'Linkages are handled on a different platform.'\n ),\n content=[\n extprojects_phases.ExternalPhase(),\n ],\n image='',\n settings_model=None,\n )\n\n\nclass ExternalProjectUpdateView(ProjectComponentFormView):\n\n model = models.ExternalProject\n\n @property\n def project(self):\n project = super().project\n return project.externalproject\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ExternalProjectListView(DashboardBaseMixin,\n generic.ListView):\n model = models.ExternalProject\n paginate_by = 12\n template_name = 'meinberlin_extprojects/extproject_list_dashboard.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n\n def get_queryset(self):\n project_type = '{}.{}'.format(\n apps.Config.label,\n 'ExternalProject'\n )\n return super().get_queryset().filter(\n organisation=self.organisation,\n project_type=project_type\n )\n\n def get_permission_object(self):\n return self.organisation\n", "path": "meinberlin/apps/extprojects/views.py"}]} | 4,094 | 635 |
gh_patches_debug_1923 | rasdani/github-patches | git_diff | ivy-llc__ivy-18252 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
broadcast_to
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 )
10
11
12 @to_ivy_arrays_and_back
13 def reshape(x, shape):
14 return ivy.reshape(x, shape)
15
16
17 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
18 @to_ivy_arrays_and_back
19 def abs(x, name=None):
20 return ivy.abs(x)
21
22
23 absolute = abs
24
25
26 @to_ivy_arrays_and_back
27 def stack(x, axis=0, name=None):
28 return ivy.stack(x, axis=axis)
29
30
31 @with_unsupported_dtypes({"2.5.0 and below": ("int8", "int16")}, "paddle")
32 @to_ivy_arrays_and_back
33 def concat(x, axis, name=None):
34 return ivy.concat(x, axis=axis)
35
36
37 @with_unsupported_dtypes(
38 {"2.5.0 and below": ("int8", "uint8", "int16", "float16")},
39 "paddle",
40 )
41 @to_ivy_arrays_and_back
42 def tile(x, repeat_times, name=None):
43 return ivy.tile(x, repeats=repeat_times)
44
45
46 @with_unsupported_dtypes(
47 {"2.5.0 and below": ("int16", "complex64", "complex128")},
48 "paddle",
49 )
50 @to_ivy_arrays_and_back
51 def split(x, num_or_sections, axis=0, name=None):
52 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
53
54
55 @with_unsupported_dtypes(
56 {"2.5.0 and below": ("float16", "bfloat16", "int8", "int16")},
57 "paddle",
58 )
59 @to_ivy_arrays_and_back
60 def squeeze(x, axis=None, name=None):
61 return ivy.squeeze(x, axis=axis)
62
63
64 @with_supported_dtypes(
65 {
66 "2.5.0 and below": (
67 "bool",
68 "float16",
69 "float32",
70 "float64",
71 "int32",
72 "int64",
73 "uint8",
74 )
75 },
76 "paddle",
77 )
78 @to_ivy_arrays_and_back
79 def cast(x, dtype):
80 return ivy.astype(x, dtype)
81
[end of ivy/functional/frontends/paddle/tensor/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py
--- a/ivy/functional/frontends/paddle/tensor/manipulation.py
+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py
@@ -78,3 +78,12 @@
@to_ivy_arrays_and_back
def cast(x, dtype):
return ivy.astype(x, dtype)
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("bool", "float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def broadcast_to(x, shape, name=None):
+ return ivy.broadcast_to(x, shape)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -78,3 +78,12 @@\n @to_ivy_arrays_and_back\n def cast(x, dtype):\n return ivy.astype(x, dtype)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def broadcast_to(x, shape, name=None):\n+ return ivy.broadcast_to(x, shape)\n", "issue": "broadcast_to\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]} | 1,258 | 186 |
gh_patches_debug_25597 | rasdani/github-patches | git_diff | litestar-org__litestar-1404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of docs/examples/stores/registry_configure_integrations.py]
1 from pathlib import Path
2
3 from starlite import Starlite
4 from starlite.middleware.session.server_side import ServerSideSessionConfig
5 from starlite.stores.file import FileStore
6 from starlite.stores.redis import RedisStore
7
8 app = Starlite(
9 stores={
10 "sessions": RedisStore.with_client(),
11 "request_cache": FileStore(Path("request-cache")),
12 },
13 middleware=[ServerSideSessionConfig().middleware],
14 )
15
[end of docs/examples/stores/registry_configure_integrations.py]
[start of starlite/config/response_cache.py]
1 from __future__ import annotations
2
3 from dataclasses import dataclass, field
4 from typing import TYPE_CHECKING, Any
5 from urllib.parse import urlencode
6
7 __all__ = ("ResponseCacheConfig", "default_cache_key_builder")
8
9
10 if TYPE_CHECKING:
11 from starlite import Starlite
12 from starlite.connection import Request
13 from starlite.stores.base import Store
14 from starlite.types import CacheKeyBuilder
15
16
17 def default_cache_key_builder(request: Request[Any, Any, Any]) -> str:
18 """Given a request object, returns a cache key by combining the path with the sorted query params.
19
20 Args:
21 request: request used to generate cache key.
22
23 Returns:
24 A combination of url path and query parameters
25 """
26 query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())
27 query_params.sort(key=lambda x: x[0])
28 return request.url.path + urlencode(query_params, doseq=True)
29
30
31 @dataclass
32 class ResponseCacheConfig:
33 """Configuration for response caching.
34
35 To enable response caching, pass an instance of this class to :class:`Starlite <.app.Starlite>` using the
36 ``response_cache_config`` key.
37 """
38
39 default_expiration: int = field(default=60)
40 """Default cache expiration in seconds."""
41 key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)
42 """:class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`."""
43 store: str = "request_cache"
44 """Name of the :class:`Store <.stores.base.Store>` to use."""
45
46 def get_store_from_app(self, app: Starlite) -> Store:
47 """Get the store defined in :attr:`store` from an :class:`Starlite <.app.Starlite>` instance."""
48 return app.stores.get(self.store)
49
[end of starlite/config/response_cache.py]
[start of docs/examples/stores/registry_default_factory_namespacing.py]
1 from starlite import Starlite, get
2 from starlite.middleware.rate_limit import RateLimitConfig
3 from starlite.middleware.session.server_side import ServerSideSessionConfig
4 from starlite.stores.redis import RedisStore
5 from starlite.stores.registry import StoreRegistry
6
7 root_store = RedisStore.with_client()
8
9
10 @get(cache=True)
11 def cached_handler() -> str:
12 # this will use app.stores.get("request_cache")
13 return "Hello, world!"
14
15
16 app = Starlite(
17 [cached_handler],
18 stores=StoreRegistry(default_factory=root_store.with_namespace),
19 middleware=[
20 RateLimitConfig(("second", 1)).middleware,
21 ServerSideSessionConfig().middleware,
22 ],
23 )
24
[end of docs/examples/stores/registry_default_factory_namespacing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/stores/registry_configure_integrations.py b/docs/examples/stores/registry_configure_integrations.py
--- a/docs/examples/stores/registry_configure_integrations.py
+++ b/docs/examples/stores/registry_configure_integrations.py
@@ -8,7 +8,7 @@
app = Starlite(
stores={
"sessions": RedisStore.with_client(),
- "request_cache": FileStore(Path("request-cache")),
+ "response_cache": FileStore(Path("response-cache")),
},
middleware=[ServerSideSessionConfig().middleware],
)
diff --git a/docs/examples/stores/registry_default_factory_namespacing.py b/docs/examples/stores/registry_default_factory_namespacing.py
--- a/docs/examples/stores/registry_default_factory_namespacing.py
+++ b/docs/examples/stores/registry_default_factory_namespacing.py
@@ -9,7 +9,7 @@
@get(cache=True)
def cached_handler() -> str:
- # this will use app.stores.get("request_cache")
+ # this will use app.stores.get("response_cache")
return "Hello, world!"
diff --git a/starlite/config/response_cache.py b/starlite/config/response_cache.py
--- a/starlite/config/response_cache.py
+++ b/starlite/config/response_cache.py
@@ -40,7 +40,7 @@
"""Default cache expiration in seconds."""
key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)
""":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`."""
- store: str = "request_cache"
+ store: str = "response_cache"
"""Name of the :class:`Store <.stores.base.Store>` to use."""
def get_store_from_app(self, app: Starlite) -> Store:
| {"golden_diff": "diff --git a/docs/examples/stores/registry_configure_integrations.py b/docs/examples/stores/registry_configure_integrations.py\n--- a/docs/examples/stores/registry_configure_integrations.py\n+++ b/docs/examples/stores/registry_configure_integrations.py\n@@ -8,7 +8,7 @@\n app = Starlite(\n stores={\n \"sessions\": RedisStore.with_client(),\n- \"request_cache\": FileStore(Path(\"request-cache\")),\n+ \"response_cache\": FileStore(Path(\"response-cache\")),\n },\n middleware=[ServerSideSessionConfig().middleware],\n )\ndiff --git a/docs/examples/stores/registry_default_factory_namespacing.py b/docs/examples/stores/registry_default_factory_namespacing.py\n--- a/docs/examples/stores/registry_default_factory_namespacing.py\n+++ b/docs/examples/stores/registry_default_factory_namespacing.py\n@@ -9,7 +9,7 @@\n \n @get(cache=True)\n def cached_handler() -> str:\n- # this will use app.stores.get(\"request_cache\")\n+ # this will use app.stores.get(\"response_cache\")\n return \"Hello, world!\"\n \n \ndiff --git a/starlite/config/response_cache.py b/starlite/config/response_cache.py\n--- a/starlite/config/response_cache.py\n+++ b/starlite/config/response_cache.py\n@@ -40,7 +40,7 @@\n \"\"\"Default cache expiration in seconds.\"\"\"\n key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)\n \"\"\":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`.\"\"\"\n- store: str = \"request_cache\"\n+ store: str = \"response_cache\"\n \"\"\"Name of the :class:`Store <.stores.base.Store>` to use.\"\"\"\n \n def get_store_from_app(self, app: Starlite) -> Store:\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from pathlib import Path\n\nfrom starlite import Starlite\nfrom starlite.middleware.session.server_side import ServerSideSessionConfig\nfrom starlite.stores.file import FileStore\nfrom starlite.stores.redis import RedisStore\n\napp = Starlite(\n stores={\n \"sessions\": RedisStore.with_client(),\n \"request_cache\": FileStore(Path(\"request-cache\")),\n },\n middleware=[ServerSideSessionConfig().middleware],\n)\n", "path": "docs/examples/stores/registry_configure_integrations.py"}, {"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any\nfrom urllib.parse import urlencode\n\n__all__ = (\"ResponseCacheConfig\", \"default_cache_key_builder\")\n\n\nif TYPE_CHECKING:\n from starlite import Starlite\n from starlite.connection import Request\n from starlite.stores.base import Store\n from starlite.types import CacheKeyBuilder\n\n\ndef default_cache_key_builder(request: Request[Any, Any, Any]) -> str:\n \"\"\"Given a request object, returns a cache key by combining the path with the sorted query params.\n\n Args:\n request: request used to generate cache key.\n\n Returns:\n A combination of url path and query parameters\n \"\"\"\n query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())\n query_params.sort(key=lambda x: x[0])\n return request.url.path + urlencode(query_params, doseq=True)\n\n\n@dataclass\nclass ResponseCacheConfig:\n \"\"\"Configuration for response caching.\n\n To enable response caching, pass an instance of this class to :class:`Starlite <.app.Starlite>` using the\n ``response_cache_config`` key.\n \"\"\"\n\n default_expiration: int = field(default=60)\n \"\"\"Default cache expiration in seconds.\"\"\"\n key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)\n \"\"\":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`.\"\"\"\n store: str = \"request_cache\"\n \"\"\"Name of the :class:`Store <.stores.base.Store>` to use.\"\"\"\n\n def get_store_from_app(self, app: Starlite) -> Store:\n \"\"\"Get the store defined in :attr:`store` from an :class:`Starlite <.app.Starlite>` instance.\"\"\"\n return app.stores.get(self.store)\n", "path": "starlite/config/response_cache.py"}, {"content": "from starlite import Starlite, get\nfrom starlite.middleware.rate_limit import RateLimitConfig\nfrom starlite.middleware.session.server_side import ServerSideSessionConfig\nfrom starlite.stores.redis import RedisStore\nfrom starlite.stores.registry import StoreRegistry\n\nroot_store = RedisStore.with_client()\n\n\n@get(cache=True)\ndef cached_handler() -> str:\n # this will use app.stores.get(\"request_cache\")\n return \"Hello, world!\"\n\n\napp = Starlite(\n [cached_handler],\n stores=StoreRegistry(default_factory=root_store.with_namespace),\n middleware=[\n RateLimitConfig((\"second\", 1)).middleware,\n ServerSideSessionConfig().middleware,\n ],\n)\n", "path": "docs/examples/stores/registry_default_factory_namespacing.py"}]} | 1,557 | 394 |
gh_patches_debug_15137 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-4067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
module ini_file fails when values are indented with tabs
##### Issue Type:
Bug Report
##### Ansible Version:
ansible 1.7.2
##### Environment:
Ubuntu 12.04
##### Summary:
When the `ini_file` encounters tabs in a file the task fails.
##### Steps To Reproduce:
Create a global `~/.gitconfig` file by calling `gitconfig --global user.name 'Test User'` and try to change the name using
```
ansible testhost -m ini_file -a "dest=~/.gitconfig section=user option=email value='[email protected]'"
```
##### Expected Results:
Task should run succesfully and final `~/.gitconfig` file should be something like this:
```
[user]
name = Test User
email = [email protected]
```
##### Actual Results:
Task fails and final `~/.gitconfig` file is:
```
[user]
name = Test User
```
module ini_file fails when values are indented with tabs
##### Issue Type:
Bug Report
##### Ansible Version:
ansible 1.7.2
##### Environment:
Ubuntu 12.04
##### Summary:
When the `ini_file` encounters tabs in a file the task fails.
##### Steps To Reproduce:
Create a global `~/.gitconfig` file by calling `gitconfig --global user.name 'Test User'` and try to change the name using
```
ansible testhost -m ini_file -a "dest=~/.gitconfig section=user option=email value='[email protected]'"
```
##### Expected Results:
Task should run succesfully and final `~/.gitconfig` file should be something like this:
```
[user]
name = Test User
email = [email protected]
```
##### Actual Results:
Task fails and final `~/.gitconfig` file is:
```
[user]
name = Test User
```
</issue>
<code>
[start of files/ini_file.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
5 # (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
6 #
7 # This file is part of Ansible
8 #
9 # Ansible is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # Ansible is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
21 #
22
23 DOCUMENTATION = '''
24 ---
25 module: ini_file
26 short_description: Tweak settings in INI files
27 extends_documentation_fragment: files
28 description:
29 - Manage (add, remove, change) individual settings in an INI-style file without having
30 to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
31 sections if they don't exist.
32 - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
33 version_added: "0.9"
34 options:
35 dest:
36 description:
37 - Path to the INI-style file; this file is created if required
38 required: true
39 default: null
40 section:
41 description:
42 - Section name in INI file. This is added if C(state=present) automatically when
43 a single value is being set.
44 required: true
45 default: null
46 option:
47 description:
48 - if set (required for changing a I(value)), this is the name of the option.
49 - May be omitted if adding/removing a whole I(section).
50 required: false
51 default: null
52 value:
53 description:
54 - the string value to be associated with an I(option). May be omitted when removing an I(option).
55 required: false
56 default: null
57 backup:
58 description:
59 - Create a backup file including the timestamp information so you can get
60 the original file back if you somehow clobbered it incorrectly.
61 required: false
62 default: "no"
63 choices: [ "yes", "no" ]
64 others:
65 description:
66 - all arguments accepted by the M(file) module also work here
67 required: false
68 state:
69 description:
70 - If set to C(absent) the option or section will be removed if present instead of created.
71 required: false
72 default: "present"
73 choices: [ "present", "absent" ]
74 no_extra_spaces:
75 description:
76 - do not insert spaces before and after '=' symbol
77 required: false
78 default: false
79 version_added: "2.1"
80 notes:
81 - While it is possible to add an I(option) without specifying a I(value), this makes
82 no sense.
83 - A section named C(default) cannot be added by the module, but if it exists, individual
84 options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)
85 Either use M(template) to create a base INI file with a C([default]) section, or use
86 M(lineinfile) to add the missing line.
87 requirements: [ ConfigParser ]
88 author:
89 - "Jan-Piet Mens (@jpmens)"
90 - "Ales Nosek (@noseka1)"
91 '''
92
93 EXAMPLES = '''
94 # Ensure "fav=lemonade is in section "[drinks]" in specified file
95 - ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes
96
97 - ini_file: dest=/etc/anotherconf
98 section=drinks
99 option=temperature
100 value=cold
101 backup=yes
102 '''
103
104 import ConfigParser
105 import sys
106 import os
107
108 # ==============================================================
109 # match_opt
110
111 def match_opt(option, line):
112 option = re.escape(option)
113 return re.match('%s *=' % option, line) \
114 or re.match('# *%s *=' % option, line) \
115 or re.match('; *%s *=' % option, line)
116
117 # ==============================================================
118 # match_active_opt
119
120 def match_active_opt(option, line):
121 option = re.escape(option)
122 return re.match('%s *=' % option, line)
123
124 # ==============================================================
125 # do_ini
126
127 def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):
128
129
130 if not os.path.exists(filename):
131 try:
132 open(filename,'w').close()
133 except:
134 module.fail_json(msg="Destination file %s not writable" % filename)
135 ini_file = open(filename, 'r')
136 try:
137 ini_lines = ini_file.readlines()
138 # append a fake section line to simplify the logic
139 ini_lines.append('[')
140 finally:
141 ini_file.close()
142
143 within_section = not section
144 section_start = 0
145 changed = False
146 if no_extra_spaces:
147 assignment_format = '%s=%s\n'
148 else:
149 assignment_format = '%s = %s\n'
150
151 for index, line in enumerate(ini_lines):
152 if line.startswith('[%s]' % section):
153 within_section = True
154 section_start = index
155 elif line.startswith('['):
156 if within_section:
157 if state == 'present':
158 # insert missing option line at the end of the section
159 ini_lines.insert(index, assignment_format % (option, value))
160 changed = True
161 elif state == 'absent' and not option:
162 # remove the entire section
163 del ini_lines[section_start:index]
164 changed = True
165 break
166 else:
167 if within_section and option:
168 if state == 'present':
169 # change the existing option line
170 if match_opt(option, line):
171 newline = assignment_format % (option, value)
172 changed = ini_lines[index] != newline
173 ini_lines[index] = newline
174 if changed:
175 # remove all possible option occurences from the rest of the section
176 index = index + 1
177 while index < len(ini_lines):
178 line = ini_lines[index]
179 if line.startswith('['):
180 break
181 if match_active_opt(option, line):
182 del ini_lines[index]
183 else:
184 index = index + 1
185 break
186 else:
187 # comment out the existing option line
188 if match_active_opt(option, line):
189 ini_lines[index] = '#%s' % ini_lines[index]
190 changed = True
191 break
192
193 # remove the fake section line
194 del ini_lines[-1:]
195
196 if not within_section and option and state == 'present':
197 ini_lines.append('[%s]\n' % section)
198 ini_lines.append(assignment_format % (option, value))
199 changed = True
200
201
202 if changed and not module.check_mode:
203 if backup:
204 module.backup_local(filename)
205 ini_file = open(filename, 'w')
206 try:
207 ini_file.writelines(ini_lines)
208 finally:
209 ini_file.close()
210
211 return changed
212
213 # ==============================================================
214 # main
215
216 def main():
217
218 module = AnsibleModule(
219 argument_spec = dict(
220 dest = dict(required=True),
221 section = dict(required=True),
222 option = dict(required=False),
223 value = dict(required=False),
224 backup = dict(default='no', type='bool'),
225 state = dict(default='present', choices=['present', 'absent']),
226 no_extra_spaces = dict(required=False, default=False, type='bool')
227 ),
228 add_file_common_args = True,
229 supports_check_mode = True
230 )
231
232 info = dict()
233
234 dest = os.path.expanduser(module.params['dest'])
235 section = module.params['section']
236 option = module.params['option']
237 value = module.params['value']
238 state = module.params['state']
239 backup = module.params['backup']
240 no_extra_spaces = module.params['no_extra_spaces']
241
242 changed = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)
243
244 file_args = module.load_file_common_arguments(module.params)
245 changed = module.set_fs_attributes_if_different(file_args, changed)
246
247 # Mission complete
248 module.exit_json(dest=dest, changed=changed, msg="OK")
249
250 # import module snippets
251 from ansible.module_utils.basic import *
252 if __name__ == '__main__':
253 main()
254
[end of files/ini_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/files/ini_file.py b/files/ini_file.py
--- a/files/ini_file.py
+++ b/files/ini_file.py
@@ -110,16 +110,16 @@
def match_opt(option, line):
option = re.escape(option)
- return re.match('%s *=' % option, line) \
- or re.match('# *%s *=' % option, line) \
- or re.match('; *%s *=' % option, line)
+ return re.match('%s( |\t)*=' % option, line) \
+ or re.match('# *%s( |\t)*=' % option, line) \
+ or re.match('; *%s( |\t)*=' % option, line)
# ==============================================================
# match_active_opt
def match_active_opt(option, line):
option = re.escape(option)
- return re.match('%s *=' % option, line)
+ return re.match('%s( |\t)*=' % option, line)
# ==============================================================
# do_ini
| {"golden_diff": "diff --git a/files/ini_file.py b/files/ini_file.py\n--- a/files/ini_file.py\n+++ b/files/ini_file.py\n@@ -110,16 +110,16 @@\n \n def match_opt(option, line):\n option = re.escape(option)\n- return re.match('%s *=' % option, line) \\\n- or re.match('# *%s *=' % option, line) \\\n- or re.match('; *%s *=' % option, line)\n+ return re.match('%s( |\\t)*=' % option, line) \\\n+ or re.match('# *%s( |\\t)*=' % option, line) \\\n+ or re.match('; *%s( |\\t)*=' % option, line)\n \n # ==============================================================\n # match_active_opt\n \n def match_active_opt(option, line):\n option = re.escape(option)\n- return re.match('%s *=' % option, line)\n+ return re.match('%s( |\\t)*=' % option, line)\n \n # ==============================================================\n # do_ini\n", "issue": "module ini_file fails when values are indented with tabs\n##### Issue Type:\n\nBug Report\n##### Ansible Version:\n\nansible 1.7.2\n##### Environment:\n\nUbuntu 12.04\n##### Summary:\n\nWhen the `ini_file` encounters tabs in a file the task fails.\n##### Steps To Reproduce:\n\nCreate a global `~/.gitconfig` file by calling `gitconfig --global user.name 'Test User'` and try to change the name using\n\n```\nansible testhost -m ini_file -a \"dest=~/.gitconfig section=user option=email value='[email protected]'\"\n```\n##### Expected Results:\n\nTask should run succesfully and final `~/.gitconfig` file should be something like this:\n\n```\n[user]\nname = Test User\nemail = [email protected]\n```\n##### Actual Results:\n\nTask fails and final `~/.gitconfig` file is:\n\n```\n[user]\n name = Test User\n```\n\nmodule ini_file fails when values are indented with tabs\n##### Issue Type:\n\nBug Report\n##### Ansible Version:\n\nansible 1.7.2\n##### Environment:\n\nUbuntu 12.04\n##### Summary:\n\nWhen the `ini_file` encounters tabs in a file the task fails.\n##### Steps To Reproduce:\n\nCreate a global `~/.gitconfig` file by calling `gitconfig --global user.name 'Test User'` and try to change the name using\n\n```\nansible testhost -m ini_file -a \"dest=~/.gitconfig section=user option=email value='[email protected]'\"\n```\n##### Expected Results:\n\nTask should run succesfully and final `~/.gitconfig` file should be something like this:\n\n```\n[user]\nname = Test User\nemail = [email protected]\n```\n##### Actual Results:\n\nTask fails and final `~/.gitconfig` file is:\n\n```\n[user]\n name = Test User\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>\n# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(template) or M(assemble). Adds missing\n sections if they don't exist.\n - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\nversion_added: \"0.9\"\noptions:\n dest:\n description:\n - Path to the INI-style file; this file is created if required\n required: true\n default: null\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n required: true\n default: null\n option:\n description:\n - if set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n required: false\n default: null\n value:\n description:\n - the string value to be associated with an I(option). May be omitted when removing an I(option).\n required: false\n default: null\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n others:\n description:\n - all arguments accepted by the M(file) module also work here\n required: false\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n required: false\n default: \"present\"\n choices: [ \"present\", \"absent\" ]\n no_extra_spaces:\n description:\n - do not insert spaces before and after '=' symbol\n required: false\n default: false\n version_added: \"2.1\"\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes\n no sense.\n - A section named C(default) cannot be added by the module, but if it exists, individual\n options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)\n Either use M(template) to create a base INI file with a C([default]) section, or use\n M(lineinfile) to add the missing line.\nrequirements: [ ConfigParser ]\nauthor:\n - \"Jan-Piet Mens (@jpmens)\"\n - \"Ales Nosek (@noseka1)\"\n'''\n\nEXAMPLES = '''\n# Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes\n\n- ini_file: dest=/etc/anotherconf\n section=drinks\n option=temperature\n value=cold\n backup=yes\n'''\n\nimport ConfigParser\nimport sys\nimport os\n\n# ==============================================================\n# match_opt\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match('%s *=' % option, line) \\\n or re.match('# *%s *=' % option, line) \\\n or re.match('; *%s *=' % option, line)\n\n# ==============================================================\n# match_active_opt\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match('%s *=' % option, line)\n\n# ==============================================================\n# do_ini\n\ndef do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):\n\n\n if not os.path.exists(filename):\n try:\n open(filename,'w').close()\n except:\n module.fail_json(msg=\"Destination file %s not writable\" % filename)\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n # append a fake section line to simplify the logic\n ini_lines.append('[')\n finally:\n ini_file.close()\n\n within_section = not section\n section_start = 0\n changed = False\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n ini_lines.insert(index, assignment_format % (option, value))\n changed = True\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n newline = assignment_format % (option, value)\n changed = ini_lines[index] != newline\n ini_lines[index] = newline\n if changed:\n # remove all possible option occurences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n else:\n # comment out the existing option line\n if match_active_opt(option, line):\n ini_lines[index] = '#%s' % ini_lines[index]\n changed = True\n break\n\n # remove the fake section line\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n ini_lines.append(assignment_format % (option, value))\n changed = True\n\n\n if changed and not module.check_mode:\n if backup:\n module.backup_local(filename)\n ini_file = open(filename, 'w')\n try:\n ini_file.writelines(ini_lines)\n finally:\n ini_file.close()\n\n return changed\n\n# ==============================================================\n# main\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n dest = dict(required=True),\n section = dict(required=True),\n option = dict(required=False),\n value = dict(required=False),\n backup = dict(default='no', type='bool'),\n state = dict(default='present', choices=['present', 'absent']),\n no_extra_spaces = dict(required=False, default=False, type='bool')\n ),\n add_file_common_args = True,\n supports_check_mode = True\n )\n\n info = dict()\n\n dest = os.path.expanduser(module.params['dest'])\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n\n changed = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)\n\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n # Mission complete\n module.exit_json(dest=dest, changed=changed, msg=\"OK\")\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "files/ini_file.py"}]} | 3,488 | 231 |
gh_patches_debug_35074 | rasdani/github-patches | git_diff | vacanza__python-holidays-1718 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[SonarCloud] Suppress warning `Python:S5852` or replace the affected RegEx strings with non-* equivalent
From [SonarCloud check](https://sonarcloud.io/project/security_hotspots?id=vacanza_python-holidays&branch=beta&resolved=false&inNewCodePeriod=true), for `dev` branch
> Make sure the regex used here, which is vulnerable to polynomial runtime due to backtracking, cannot lead to denial of service.
> Using slow regular expressions is security-sensitive[python:S5852](https://sonarcloud.io/organizations/vacanza/rules?open=python%3AS5852&rule_key=python%3AS5852)
All 3 affected instances are:
- `pr = re.findall(r"^(.*) \(#\d+ .*\)$", pr)[0]` in [scripts/generate_release_notes.py](https://github.com/vacanza/python-holidays/blob/beta/scripts/generate_release_notes.py)
- `subdivision_group_re = re.compile(".*: (.*)")` and `subdivision_aliases_re = re.compile(r"(.*)\s\((.*?)\)")` in [test/test_docs.py](https://github.com/vacanza/python-holidays/blob/beta/tests/test_docs.py)
</issue>
<code>
[start of scripts/generate_release_notes.py]
1 #!/usr/bin/env python3
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Authors: dr-prodigy <[email protected]> (c) 2017-2023
10 # ryanss <[email protected]> (c) 2014-2017
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 import argparse
15 import re
16 import sys
17 from datetime import date
18 from pathlib import Path
19 from typing import Dict, Set
20
21 from git import Repo
22 from github import Github
23 from github.GithubException import UnknownObjectException
24
25 sys.path.append(f"{Path.cwd()}")
26 import holidays # noqa: E402
27
28 BRANCH_NAME = "dev"
29 HEADER_TEMPLATE = """
30 Version {version}
31 ============
32
33 Released {month} {day}, {year}
34 """
35 IGNORED_CONTRIBUTORS = {"dependabot[bot]", "github-actions[bot]"}
36 REPOSITORY_NAME = "vacanza/python-holidays"
37
38
39 class ReleaseNotesGenerator:
40 """
41 Generates release notes based on local git commits and GitHub PRs metadata.
42
43 Usage example: scripts/generate_release_notes.py
44 """
45
46 def __init__(self) -> None:
47 arg_parser = argparse.ArgumentParser()
48 arg_parser.add_argument(
49 "-a",
50 "--author-only",
51 action="extend",
52 default=[],
53 help="Add only author as a contributor for this PR",
54 nargs="+",
55 type=int,
56 )
57 arg_parser.add_argument(
58 "-c",
59 "--cut-off-at",
60 help="Cut off at PR",
61 required=False,
62 type=int,
63 )
64 arg_parser.add_argument(
65 "-e",
66 "--exclude",
67 action="extend",
68 default=[],
69 help="Exclude this PR from the release notes",
70 nargs="+",
71 type=int,
72 )
73 arg_parser.add_argument(
74 "-v",
75 "--verbose",
76 action="store_true",
77 default=False,
78 help="Verbose output",
79 )
80 self.args = arg_parser.parse_args()
81
82 self.local_repo = Repo(Path.cwd())
83 self.remote_repo = Github(self.github_token).get_repo(REPOSITORY_NAME)
84
85 self.previous_commits: Set[str] = set()
86 self.pull_requests: Dict[int, str] = {}
87
88 self.tag = holidays.__version__
89
90 try:
91 latest_tag = self.remote_repo.get_tags()[0]
92 self.latest_tag_name = latest_tag.name
93 self.previous_commits.add(latest_tag.commit.sha)
94 except IndexError:
95 self.latest_tag_name = None
96
97 @property
98 def github_token(self, path=Path(".github_token")):
99 """Return GitHub access token."""
100 return path.read_text(encoding="UTF-8").strip()
101
102 @property
103 def is_ready(self):
104 """Perform environment checks and input validation."""
105 current_branch = str(self.local_repo.active_branch)
106 if current_branch != BRANCH_NAME:
107 exit(
108 f"Switch to '{BRANCH_NAME}' first (currently in "
109 f"'{current_branch}'). Use 'git switch {BRANCH_NAME}'."
110 )
111
112 return True
113
114 @property
115 def sorted_pull_requests(self):
116 def custom_order(pr):
117 pr = re.findall(r"^(.*) \(#\d+ .*\)$", pr)[0]
118
119 if re.findall(r"^(Introduce|Refactor)", pr) or re.findall(r"Add .* support", pr):
120 weight = 10
121 elif re.findall(r"^Add .* holidays$", pr):
122 weight = 20
123 elif re.findall(r"(^Localize|localization$)", pr):
124 weight = 30
125 elif re.findall(r"^Fix", pr):
126 weight = 40
127 elif re.findall(r"^(Change|Improve|Optimize|Update|Upgrade)", pr):
128 weight = 50
129 else:
130 weight = 100
131
132 return (weight, pr)
133
134 return sorted(self.pull_requests.values(), key=custom_order)
135
136 def add_pull_request(self, pull_request):
137 """Add pull request information to the release notes dict."""
138 author = pull_request.user.login if pull_request.user else None
139 if author in IGNORED_CONTRIBUTORS:
140 print((f"Skipping #{pull_request.number} {pull_request.title}" f" by {author}"))
141 return None
142
143 # Skip failed release attempt PRs, version upgrades.
144 pr_title = pull_request.title
145 skip_titles = (f"v.{self.tag}", "Bump", "Revert")
146 for skip_title in skip_titles:
147 if pr_title.startswith(skip_title):
148 return None
149
150 # Get contributors (expand from commits by default).
151 contributors = set()
152 if pull_request.number not in self.args.author_only:
153 for commit in pull_request.get_commits():
154 if commit.author:
155 contributors.add(commit.author.login)
156
157 if author in contributors:
158 contributors.remove(author)
159 contributors = (f"@{c}" for c in [author] + sorted(contributors, key=str.lower))
160 self.pull_requests[pull_request.number] = (
161 f"{pull_request.title} (#{pull_request.number} by " f"{', '.join(contributors)})"
162 )
163
164 def generate_release_notes(self):
165 """Generate release notes contents."""
166 print("Processing pull requests...")
167 self.get_new_pull_requests()
168 self.get_old_pull_requests()
169 print("Done!")
170
171 def get_new_pull_requests(self):
172 """Get PRs created after the latest release.
173
174 This operation also populates a set of previous release commits.
175 """
176 cut_off_at = self.args.cut_off_at
177 excluded_pr_numbers = set(self.args.exclude)
178 for pull_request in self.remote_repo.get_pulls(state="closed"):
179 # Stop getting pull requests after previous release tag or specific PR number.
180 cut_off = cut_off_at and pull_request.number == cut_off_at
181 if cut_off or pull_request.title == self.latest_tag_name:
182 # Get previous release commits SHAs.
183 for commit in pull_request.get_commits():
184 self.previous_commits.add(commit.sha)
185 break
186
187 # Skip closed unmerged PRs.
188 if not pull_request.merged:
189 continue
190
191 if pull_request.number in excluded_pr_numbers:
192 if self.args.verbose:
193 print(f"Excluding PR #{pull_request.number} as requested")
194 continue
195
196 if self.args.verbose:
197 messages = [f"Fetching PR #{pull_request.number}"]
198 if pull_request.number in self.args.author_only:
199 messages.append("(keeping PR author as a sole contributor)")
200 print(" ".join(messages))
201
202 self.add_pull_request(pull_request)
203
204 def get_old_pull_requests(self):
205 """Get PRs created before the latest release."""
206 pull_request_numbers = set()
207 for commit in self.local_repo.iter_commits():
208 if commit.hexsha in self.previous_commits:
209 break
210
211 try:
212 pull_request_number = re.findall(
213 r"#(\d{3,})",
214 commit.message,
215 )[0]
216 pull_request_numbers.add(int(pull_request_number))
217 except IndexError:
218 continue
219
220 # Fetch old PRs metadata only. Skip all known PRs.
221 pull_request_numbers -= set(self.pull_requests.keys())
222 pull_request_numbers -= set(self.args.exclude)
223 for pull_request_number in pull_request_numbers:
224 if self.args.verbose:
225 messages = [f"Fetching PR #{pull_request_number}"]
226 if pull_request_number in self.args.author_only:
227 messages.append("(keeping PR author as a sole contributor)")
228 print(" ".join(messages))
229
230 try:
231 self.add_pull_request(self.remote_repo.get_pull(pull_request_number))
232 # 3rd party contributions to forks.
233 except UnknownObjectException:
234 pass
235
236 def print_release_notes(self):
237 """Print generated release notes."""
238 print("")
239 if self.pull_requests:
240 today = date.today()
241 print(
242 HEADER_TEMPLATE.format(
243 day=today.day,
244 month=today.strftime("%B"),
245 version=self.tag,
246 year=today.year,
247 )
248 )
249 print("\n".join((f"- {pr}" for pr in self.sorted_pull_requests)))
250
251 else:
252 print(f"No changes since {self.latest_tag_name} release.")
253
254
255 if __name__ == "__main__":
256 rng = ReleaseNotesGenerator()
257 if rng.is_ready:
258 rng.generate_release_notes()
259 rng.print_release_notes()
260
[end of scripts/generate_release_notes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/generate_release_notes.py b/scripts/generate_release_notes.py
--- a/scripts/generate_release_notes.py
+++ b/scripts/generate_release_notes.py
@@ -114,22 +114,20 @@
@property
def sorted_pull_requests(self):
def custom_order(pr):
- pr = re.findall(r"^(.*) \(#\d+ .*\)$", pr)[0]
-
- if re.findall(r"^(Introduce|Refactor)", pr) or re.findall(r"Add .* support", pr):
+ if re.findall(r"^(Introduce|Refactor)", pr[0]) or re.findall(r"Add .* support", pr[0]):
weight = 10
- elif re.findall(r"^Add .* holidays$", pr):
+ elif re.findall(r"^Add .* holidays$", pr[0]):
weight = 20
- elif re.findall(r"(^Localize|localization$)", pr):
+ elif re.findall(r"(^Localize)|(localization$)", pr[0]):
weight = 30
- elif re.findall(r"^Fix", pr):
+ elif re.findall(r"^Fix", pr[0]):
weight = 40
- elif re.findall(r"^(Change|Improve|Optimize|Update|Upgrade)", pr):
+ elif re.findall(r"^(Change|Improve|Optimize|Update|Upgrade)", pr[0]):
weight = 50
else:
weight = 100
- return (weight, pr)
+ return weight, pr
return sorted(self.pull_requests.values(), key=custom_order)
@@ -158,7 +156,8 @@
contributors.remove(author)
contributors = (f"@{c}" for c in [author] + sorted(contributors, key=str.lower))
self.pull_requests[pull_request.number] = (
- f"{pull_request.title} (#{pull_request.number} by " f"{', '.join(contributors)})"
+ pull_request.title,
+ f"#{pull_request.number} by {', '.join(contributors)}",
)
def generate_release_notes(self):
@@ -246,7 +245,7 @@
year=today.year,
)
)
- print("\n".join((f"- {pr}" for pr in self.sorted_pull_requests)))
+ print("\n".join((f"- {pr[0]} ({pr[1]})" for pr in self.sorted_pull_requests)))
else:
print(f"No changes since {self.latest_tag_name} release.")
| {"golden_diff": "diff --git a/scripts/generate_release_notes.py b/scripts/generate_release_notes.py\n--- a/scripts/generate_release_notes.py\n+++ b/scripts/generate_release_notes.py\n@@ -114,22 +114,20 @@\n @property\n def sorted_pull_requests(self):\n def custom_order(pr):\n- pr = re.findall(r\"^(.*) \\(#\\d+ .*\\)$\", pr)[0]\n-\n- if re.findall(r\"^(Introduce|Refactor)\", pr) or re.findall(r\"Add .* support\", pr):\n+ if re.findall(r\"^(Introduce|Refactor)\", pr[0]) or re.findall(r\"Add .* support\", pr[0]):\n weight = 10\n- elif re.findall(r\"^Add .* holidays$\", pr):\n+ elif re.findall(r\"^Add .* holidays$\", pr[0]):\n weight = 20\n- elif re.findall(r\"(^Localize|localization$)\", pr):\n+ elif re.findall(r\"(^Localize)|(localization$)\", pr[0]):\n weight = 30\n- elif re.findall(r\"^Fix\", pr):\n+ elif re.findall(r\"^Fix\", pr[0]):\n weight = 40\n- elif re.findall(r\"^(Change|Improve|Optimize|Update|Upgrade)\", pr):\n+ elif re.findall(r\"^(Change|Improve|Optimize|Update|Upgrade)\", pr[0]):\n weight = 50\n else:\n weight = 100\n \n- return (weight, pr)\n+ return weight, pr\n \n return sorted(self.pull_requests.values(), key=custom_order)\n \n@@ -158,7 +156,8 @@\n contributors.remove(author)\n contributors = (f\"@{c}\" for c in [author] + sorted(contributors, key=str.lower))\n self.pull_requests[pull_request.number] = (\n- f\"{pull_request.title} (#{pull_request.number} by \" f\"{', '.join(contributors)})\"\n+ pull_request.title,\n+ f\"#{pull_request.number} by {', '.join(contributors)}\",\n )\n \n def generate_release_notes(self):\n@@ -246,7 +245,7 @@\n year=today.year,\n )\n )\n- print(\"\\n\".join((f\"- {pr}\" for pr in self.sorted_pull_requests)))\n+ print(\"\\n\".join((f\"- {pr[0]} ({pr[1]})\" for pr in self.sorted_pull_requests)))\n \n else:\n print(f\"No changes since {self.latest_tag_name} release.\")\n", "issue": "[SonarCloud] Suppress warning `Python:S5852` or replace the affected RegEx strings with non-* equivalent\nFrom [SonarCloud check](https://sonarcloud.io/project/security_hotspots?id=vacanza_python-holidays&branch=beta&resolved=false&inNewCodePeriod=true), for `dev` branch\r\n\r\n> Make sure the regex used here, which is vulnerable to polynomial runtime due to backtracking, cannot lead to denial of service.\r\n\r\n> Using slow regular expressions is security-sensitive[python:S5852](https://sonarcloud.io/organizations/vacanza/rules?open=python%3AS5852&rule_key=python%3AS5852)\r\n\r\nAll 3 affected instances are:\r\n- `pr = re.findall(r\"^(.*) \\(#\\d+ .*\\)$\", pr)[0]` in [scripts/generate_release_notes.py](https://github.com/vacanza/python-holidays/blob/beta/scripts/generate_release_notes.py)\r\n- `subdivision_group_re = re.compile(\".*: (.*)\")` and `subdivision_aliases_re = re.compile(r\"(.*)\\s\\((.*?)\\)\")` in [test/test_docs.py](https://github.com/vacanza/python-holidays/blob/beta/tests/test_docs.py)\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport argparse\nimport re\nimport sys\nfrom datetime import date\nfrom pathlib import Path\nfrom typing import Dict, Set\n\nfrom git import Repo\nfrom github import Github\nfrom github.GithubException import UnknownObjectException\n\nsys.path.append(f\"{Path.cwd()}\")\nimport holidays # noqa: E402\n\nBRANCH_NAME = \"dev\"\nHEADER_TEMPLATE = \"\"\"\nVersion {version}\n============\n\nReleased {month} {day}, {year}\n\"\"\"\nIGNORED_CONTRIBUTORS = {\"dependabot[bot]\", \"github-actions[bot]\"}\nREPOSITORY_NAME = \"vacanza/python-holidays\"\n\n\nclass ReleaseNotesGenerator:\n \"\"\"\n Generates release notes based on local git commits and GitHub PRs metadata.\n\n Usage example: scripts/generate_release_notes.py\n \"\"\"\n\n def __init__(self) -> None:\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n \"-a\",\n \"--author-only\",\n action=\"extend\",\n default=[],\n help=\"Add only author as a contributor for this PR\",\n nargs=\"+\",\n type=int,\n )\n arg_parser.add_argument(\n \"-c\",\n \"--cut-off-at\",\n help=\"Cut off at PR\",\n required=False,\n type=int,\n )\n arg_parser.add_argument(\n \"-e\",\n \"--exclude\",\n action=\"extend\",\n default=[],\n help=\"Exclude this PR from the release notes\",\n nargs=\"+\",\n type=int,\n )\n arg_parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n default=False,\n help=\"Verbose output\",\n )\n self.args = arg_parser.parse_args()\n\n self.local_repo = Repo(Path.cwd())\n self.remote_repo = Github(self.github_token).get_repo(REPOSITORY_NAME)\n\n self.previous_commits: Set[str] = set()\n self.pull_requests: Dict[int, str] = {}\n\n self.tag = holidays.__version__\n\n try:\n latest_tag = self.remote_repo.get_tags()[0]\n self.latest_tag_name = latest_tag.name\n self.previous_commits.add(latest_tag.commit.sha)\n except IndexError:\n self.latest_tag_name = None\n\n @property\n def github_token(self, path=Path(\".github_token\")):\n \"\"\"Return GitHub access token.\"\"\"\n return path.read_text(encoding=\"UTF-8\").strip()\n\n @property\n def is_ready(self):\n \"\"\"Perform environment checks and input validation.\"\"\"\n current_branch = str(self.local_repo.active_branch)\n if current_branch != BRANCH_NAME:\n exit(\n f\"Switch to '{BRANCH_NAME}' first (currently in \"\n f\"'{current_branch}'). Use 'git switch {BRANCH_NAME}'.\"\n )\n\n return True\n\n @property\n def sorted_pull_requests(self):\n def custom_order(pr):\n pr = re.findall(r\"^(.*) \\(#\\d+ .*\\)$\", pr)[0]\n\n if re.findall(r\"^(Introduce|Refactor)\", pr) or re.findall(r\"Add .* support\", pr):\n weight = 10\n elif re.findall(r\"^Add .* holidays$\", pr):\n weight = 20\n elif re.findall(r\"(^Localize|localization$)\", pr):\n weight = 30\n elif re.findall(r\"^Fix\", pr):\n weight = 40\n elif re.findall(r\"^(Change|Improve|Optimize|Update|Upgrade)\", pr):\n weight = 50\n else:\n weight = 100\n\n return (weight, pr)\n\n return sorted(self.pull_requests.values(), key=custom_order)\n\n def add_pull_request(self, pull_request):\n \"\"\"Add pull request information to the release notes dict.\"\"\"\n author = pull_request.user.login if pull_request.user else None\n if author in IGNORED_CONTRIBUTORS:\n print((f\"Skipping #{pull_request.number} {pull_request.title}\" f\" by {author}\"))\n return None\n\n # Skip failed release attempt PRs, version upgrades.\n pr_title = pull_request.title\n skip_titles = (f\"v.{self.tag}\", \"Bump\", \"Revert\")\n for skip_title in skip_titles:\n if pr_title.startswith(skip_title):\n return None\n\n # Get contributors (expand from commits by default).\n contributors = set()\n if pull_request.number not in self.args.author_only:\n for commit in pull_request.get_commits():\n if commit.author:\n contributors.add(commit.author.login)\n\n if author in contributors:\n contributors.remove(author)\n contributors = (f\"@{c}\" for c in [author] + sorted(contributors, key=str.lower))\n self.pull_requests[pull_request.number] = (\n f\"{pull_request.title} (#{pull_request.number} by \" f\"{', '.join(contributors)})\"\n )\n\n def generate_release_notes(self):\n \"\"\"Generate release notes contents.\"\"\"\n print(\"Processing pull requests...\")\n self.get_new_pull_requests()\n self.get_old_pull_requests()\n print(\"Done!\")\n\n def get_new_pull_requests(self):\n \"\"\"Get PRs created after the latest release.\n\n This operation also populates a set of previous release commits.\n \"\"\"\n cut_off_at = self.args.cut_off_at\n excluded_pr_numbers = set(self.args.exclude)\n for pull_request in self.remote_repo.get_pulls(state=\"closed\"):\n # Stop getting pull requests after previous release tag or specific PR number.\n cut_off = cut_off_at and pull_request.number == cut_off_at\n if cut_off or pull_request.title == self.latest_tag_name:\n # Get previous release commits SHAs.\n for commit in pull_request.get_commits():\n self.previous_commits.add(commit.sha)\n break\n\n # Skip closed unmerged PRs.\n if not pull_request.merged:\n continue\n\n if pull_request.number in excluded_pr_numbers:\n if self.args.verbose:\n print(f\"Excluding PR #{pull_request.number} as requested\")\n continue\n\n if self.args.verbose:\n messages = [f\"Fetching PR #{pull_request.number}\"]\n if pull_request.number in self.args.author_only:\n messages.append(\"(keeping PR author as a sole contributor)\")\n print(\" \".join(messages))\n\n self.add_pull_request(pull_request)\n\n def get_old_pull_requests(self):\n \"\"\"Get PRs created before the latest release.\"\"\"\n pull_request_numbers = set()\n for commit in self.local_repo.iter_commits():\n if commit.hexsha in self.previous_commits:\n break\n\n try:\n pull_request_number = re.findall(\n r\"#(\\d{3,})\",\n commit.message,\n )[0]\n pull_request_numbers.add(int(pull_request_number))\n except IndexError:\n continue\n\n # Fetch old PRs metadata only. Skip all known PRs.\n pull_request_numbers -= set(self.pull_requests.keys())\n pull_request_numbers -= set(self.args.exclude)\n for pull_request_number in pull_request_numbers:\n if self.args.verbose:\n messages = [f\"Fetching PR #{pull_request_number}\"]\n if pull_request_number in self.args.author_only:\n messages.append(\"(keeping PR author as a sole contributor)\")\n print(\" \".join(messages))\n\n try:\n self.add_pull_request(self.remote_repo.get_pull(pull_request_number))\n # 3rd party contributions to forks.\n except UnknownObjectException:\n pass\n\n def print_release_notes(self):\n \"\"\"Print generated release notes.\"\"\"\n print(\"\")\n if self.pull_requests:\n today = date.today()\n print(\n HEADER_TEMPLATE.format(\n day=today.day,\n month=today.strftime(\"%B\"),\n version=self.tag,\n year=today.year,\n )\n )\n print(\"\\n\".join((f\"- {pr}\" for pr in self.sorted_pull_requests)))\n\n else:\n print(f\"No changes since {self.latest_tag_name} release.\")\n\n\nif __name__ == \"__main__\":\n rng = ReleaseNotesGenerator()\n if rng.is_ready:\n rng.generate_release_notes()\n rng.print_release_notes()\n", "path": "scripts/generate_release_notes.py"}]} | 3,355 | 568 |
gh_patches_debug_6946 | rasdani/github-patches | git_diff | joke2k__faker-995 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyfloat producing values above max_value
* Faker version: 2.0.1
* OS: Windows 10
Calling `pyfloat` with the `max_value` set can produce values above `max_value` leading to issues.
### Steps to reproduce
Run my example code below.
### Expected behavior
Given my following example, I would expect to _only_ see numbers in the range `1 <= x <= 2`.
```
for _ in range(25):
print(faker.pyfloat(min_value=1, max_value=2))
```
### Actual behavior
However, my example will produce values in the range `1 <= x < 3`. This behavior is very confusing and unexpected. Output I got from running my above example:
```
1.200055
2.709
2.319785
2.773763416717
1.521
1.9
2.52454
2.91
1.87016
2.35457
1.92215
1.7
2.461453
1.922252943
1.416632
1.448
2.0
1.5
2.31
2.5114
2.18
1.8
2.12503540581
2.0
```
</issue>
<code>
[start of faker/providers/python/__init__.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 from decimal import Decimal
6 import sys
7
8 import six
9
10 from .. import BaseProvider
11
12
13 class Provider(BaseProvider):
14 def pybool(self):
15 return self.random_int(0, 1) == 1
16
17 def pystr(self, min_chars=None, max_chars=20):
18 """
19 Generates a random string of upper and lowercase letters.
20 :type min_chars: int
21 :type max_chars: int
22 :return: String. Random of random length between min and max characters.
23 """
24 if min_chars is None:
25 return "".join(self.random_letters(length=max_chars))
26 else:
27 assert (
28 max_chars >= min_chars), "Maximum length must be greater than or equal to minium length"
29 return "".join(
30 self.random_letters(
31 length=self.generator.random.randint(min_chars, max_chars),
32 ),
33 )
34
35 def pyfloat(self, left_digits=None, right_digits=None, positive=False,
36 min_value=None, max_value=None):
37
38 if left_digits is not None and left_digits < 0:
39 raise ValueError(
40 'A float number cannot have less than 0 digits in its '
41 'integer part')
42 if right_digits is not None and right_digits < 0:
43 raise ValueError(
44 'A float number cannot have less than 0 digits in its '
45 'fractional part')
46 if left_digits == 0 and right_digits == 0:
47 raise ValueError(
48 'A float number cannot have less than 0 digits in total')
49 if None not in (min_value, max_value) and min_value > max_value:
50 raise ValueError('Min value cannot be greater than max value')
51
52 left_digits = left_digits if left_digits is not None else (
53 self.random_int(1, sys.float_info.dig))
54 right_digits = right_digits if right_digits is not None else (
55 self.random_int(0, sys.float_info.dig - left_digits))
56 sign = ''
57 if (min_value is not None) or (max_value is not None):
58 if min_value is None:
59 min_value = max_value - self.random_int()
60 if max_value is None:
61 max_value = min_value + self.random_int()
62
63 left_number = self.random_int(min_value, max_value)
64 else:
65 sign = '+' if positive else self.random_element(('+', '-'))
66 left_number = self.random_number(left_digits)
67
68 return float("{0}{1}.{2}".format(
69 sign,
70 left_number,
71 self.random_number(right_digits),
72 ))
73
74 def pyint(self, min_value=0, max_value=9999, step=1):
75 return self.generator.random_int(min_value, max_value, step=step)
76
77 def pydecimal(self, left_digits=None, right_digits=None, positive=False,
78 min_value=None, max_value=None):
79
80 float_ = self.pyfloat(
81 left_digits, right_digits, positive, min_value, max_value)
82 return Decimal(str(float_))
83
84 def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):
85 return tuple(
86 self.pyset(
87 nb_elements,
88 variable_nb_elements,
89 *value_types))
90
91 def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):
92 return set(
93 self._pyiterable(
94 nb_elements,
95 variable_nb_elements,
96 *value_types))
97
98 def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):
99 return list(
100 self._pyiterable(
101 nb_elements,
102 variable_nb_elements,
103 *value_types))
104
105 def pyiterable(
106 self,
107 nb_elements=10,
108 variable_nb_elements=True,
109 *value_types):
110 return self.random_element([self.pylist, self.pytuple, self.pyset])(
111 nb_elements, variable_nb_elements, *value_types)
112
113 def _random_type(self, type_list):
114 value_type = self.random_element(type_list)
115
116 method_name = "py{0}".format(value_type)
117 if hasattr(self, method_name):
118 value_type = method_name
119
120 return self.generator.format(value_type)
121
122 def _pyiterable(
123 self,
124 nb_elements=10,
125 variable_nb_elements=True,
126 *value_types):
127
128 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()
129 for t in value_types
130 # avoid recursion
131 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
132 if not value_types:
133 value_types = ['str', 'str', 'str', 'str', 'float',
134 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
135
136 if variable_nb_elements:
137 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
138
139 for _ in range(nb_elements):
140 yield self._random_type(value_types)
141
142 def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):
143 """
144 Returns a dictionary.
145
146 :nb_elements: number of elements for dictionary
147 :variable_nb_elements: is use variable number of elements for dictionary
148 :value_types: type of dictionary values
149 """
150 if variable_nb_elements:
151 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
152
153 return dict(zip(
154 self.generator.words(nb_elements),
155 self._pyiterable(nb_elements, False, *value_types),
156 ))
157
158 def pystruct(self, count=10, *value_types):
159
160 value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()
161 for t in value_types
162 # avoid recursion
163 if t != 'struct']
164 if not value_types:
165 value_types = ['str', 'str', 'str', 'str', 'float',
166 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
167
168 types = []
169 d = {}
170 nd = {}
171 for i in range(count):
172 d[self.generator.word()] = self._random_type(value_types)
173 types.append(self._random_type(value_types))
174 nd[self.generator.word()] = {i: self._random_type(value_types),
175 i + 1: [self._random_type(value_types),
176 self._random_type(value_types),
177 self._random_type(value_types)],
178 i + 2: {i: self._random_type(value_types),
179 i + 1: self._random_type(value_types),
180 i + 2: [self._random_type(value_types),
181 self._random_type(value_types)]}}
182 return types, d, nd
183
[end of faker/providers/python/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -60,7 +60,7 @@
if max_value is None:
max_value = min_value + self.random_int()
- left_number = self.random_int(min_value, max_value)
+ left_number = self.random_int(min_value, max_value - 1)
else:
sign = '+' if positive else self.random_element(('+', '-'))
left_number = self.random_number(left_digits)
| {"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -60,7 +60,7 @@\n if max_value is None:\n max_value = min_value + self.random_int()\n \n- left_number = self.random_int(min_value, max_value)\n+ left_number = self.random_int(min_value, max_value - 1)\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n", "issue": "pyfloat producing values above max_value\n* Faker version: 2.0.1\r\n* OS: Windows 10\r\n\r\nCalling `pyfloat` with the `max_value` set can produce values above `max_value` leading to issues.\r\n\r\n### Steps to reproduce\r\n\r\nRun my example code below.\r\n\r\n### Expected behavior\r\n\r\nGiven my following example, I would expect to _only_ see numbers in the range `1 <= x <= 2`.\r\n\r\n```\r\nfor _ in range(25):\r\n print(faker.pyfloat(min_value=1, max_value=2))\r\n```\r\n\r\n### Actual behavior\r\n\r\nHowever, my example will produce values in the range `1 <= x < 3`. This behavior is very confusing and unexpected. Output I got from running my above example:\r\n\r\n```\r\n1.200055\r\n2.709\r\n2.319785\r\n2.773763416717\r\n1.521\r\n1.9\r\n2.52454\r\n2.91\r\n1.87016\r\n2.35457\r\n1.92215\r\n1.7\r\n2.461453\r\n1.922252943\r\n1.416632\r\n1.448\r\n2.0\r\n1.5\r\n2.31\r\n2.5114\r\n2.18\r\n1.8\r\n2.12503540581\r\n2.0\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nimport six\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minium length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = ''\n if (min_value is not None) or (max_value is not None):\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n\n left_number = self.random_int(min_value, max_value)\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n return float(\"{0}{1}.{2}\".format(\n sign,\n left_number,\n self.random_number(right_digits),\n ))\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n *value_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types),\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, six.string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float',\n 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]} | 2,784 | 133 |
gh_patches_debug_16037 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2549 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2532 does not support TimeoutSecondsPath and HeartbeatSecondsPath
*cfn-lint version: (`cfn-lint --version`)* 0.61.0
*Description of issue.*
I'm getting the E2532 when trying to use the `TimeoutSecondsPath` parameter in a `Task`.
It is mentioned that it is supported both at https://states-language.net/spec.html#statetypes and https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-task-state.html
Example:
```
"run": {
"Type": "Task",
"Resource": "arn:aws:states:::ecs:runTask.sync",
"Parameters": {
"LaunchType": "FARGATE",
"Cluster": "arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:cluster/${BatchCluster}",
"TaskDefinition.$": "$.taskDefinition"
}
"TimeoutSecondsPath": "$.timeout",
"ResultPath": null,
"Next": "done"
}
```
Error from the cfn-lint run:
```
E2532 State Machine Definition key (TimeoutSecondsPath) for State (run) of Type (Task) is not valid
```
</issue>
<code>
[start of src/cfnlint/rules/resources/stepfunctions/StateMachine.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import json
6
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class StateMachine(CloudFormationLintRule):
11 """Check State Machine Definition"""
12
13 id = "E2532"
14 shortdesc = "Check State Machine Definition for proper syntax"
15 description = (
16 "Check the State Machine String Definition to make sure its JSON. "
17 "Validate basic syntax of the file to determine validity."
18 )
19 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html"
20 tags = ["resources", "stepfunctions"]
21
22 def __init__(self):
23 """Init"""
24 super().__init__()
25 self.resource_property_types.append("AWS::StepFunctions::StateMachine")
26
27 def _check_state_json(self, def_json, state_name, path):
28 """Check State JSON Definition"""
29 matches = []
30
31 # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html
32 common_state_keys = [
33 "Next",
34 "End",
35 "Type",
36 "Comment",
37 "InputPath",
38 "OutputPath",
39 ]
40 common_state_required_keys = [
41 "Type",
42 ]
43 state_key_types = {
44 "Pass": ["Result", "ResultPath", "Parameters"],
45 "Task": [
46 "Resource",
47 "ResultPath",
48 "ResultSelector",
49 "Retry",
50 "Catch",
51 "TimeoutSeconds",
52 "Parameters",
53 "HeartbeatSeconds",
54 ],
55 "Map": [
56 "MaxConcurrency",
57 "Iterator",
58 "ItemsPath",
59 "ResultPath",
60 "ResultSelector",
61 "Retry",
62 "Catch",
63 "Parameters",
64 ],
65 "Choice": ["Choices", "Default"],
66 "Wait": ["Seconds", "Timestamp", "SecondsPath", "TimestampPath"],
67 "Succeed": [],
68 "Fail": ["Cause", "Error"],
69 "Parallel": [
70 "Branches",
71 "ResultPath",
72 "ResultSelector",
73 "Parameters",
74 "Retry",
75 "Catch",
76 ],
77 }
78 state_required_types = {
79 "Pass": [],
80 "Task": ["Resource"],
81 "Choice": ["Choices"],
82 "Wait": [],
83 "Succeed": [],
84 "Fail": [],
85 "Parallel": ["Branches"],
86 }
87
88 for req_key in common_state_required_keys:
89 if req_key not in def_json:
90 message = f"State Machine Definition required key ({req_key}) for State ({state_name}) is missing"
91 matches.append(RuleMatch(path, message))
92 return matches
93
94 state_type = def_json.get("Type")
95
96 if state_type in state_key_types:
97 for state_key, _ in def_json.items():
98 if state_key not in common_state_keys + state_key_types.get(
99 state_type, []
100 ):
101 message = f"State Machine Definition key ({state_key}) for State ({state_name}) of Type ({state_type}) is not valid"
102 matches.append(RuleMatch(path, message))
103 for req_key in common_state_required_keys + state_required_types.get(
104 state_type, []
105 ):
106 if req_key not in def_json:
107 message = f"State Machine Definition required key ({req_key}) for State ({state_name}) of Type ({state_type}) is missing"
108 matches.append(RuleMatch(path, message))
109 return matches
110 else:
111 message = f"State Machine Definition Type ({state_type}) is not valid"
112 matches.append(RuleMatch(path, message))
113
114 return matches
115
116 def _check_definition_json(self, def_json, path):
117 """Check JSON Definition"""
118 matches = []
119
120 top_level_keys = ["Comment", "StartAt", "TimeoutSeconds", "Version", "States"]
121 top_level_required_keys = ["StartAt", "States"]
122 for top_key, _ in def_json.items():
123 if top_key not in top_level_keys:
124 message = f"State Machine Definition key ({top_key}) is not valid"
125 matches.append(RuleMatch(path, message))
126
127 for req_key in top_level_required_keys:
128 if req_key not in def_json:
129 message = (
130 f"State Machine Definition required key ({req_key}) is missing"
131 )
132 matches.append(RuleMatch(path, message))
133
134 for state_name, state_value in def_json.get("States", {}).items():
135 matches.extend(self._check_state_json(state_value, state_name, path))
136 return matches
137
138 def check_value(self, value, path, fail_on_loads=True):
139 """Check Definition Value"""
140 matches = []
141 try:
142 def_json = json.loads(value)
143 # pylint: disable=W0703
144 except Exception as err:
145 if fail_on_loads:
146 message = f"State Machine Definition needs to be formatted as JSON. Error {err}"
147 matches.append(RuleMatch(path, message))
148 return matches
149
150 self.logger.debug("State Machine definition could not be parsed. Skipping")
151 return matches
152
153 matches.extend(self._check_definition_json(def_json, path))
154 return matches
155
156 def check_sub(self, value, path):
157 """Check Sub Object"""
158 matches = []
159 if isinstance(value, list):
160 matches.extend(self.check_value(value[0], path, False))
161 elif isinstance(value, str):
162 matches.extend(self.check_value(value, path, False))
163
164 return matches
165
166 def match_resource_properties(self, properties, _, path, cfn):
167 """Check CloudFormation Properties"""
168 matches = []
169
170 matches.extend(
171 cfn.check_value(
172 obj=properties,
173 key="DefinitionString",
174 path=path[:],
175 check_value=self.check_value,
176 check_sub=self.check_sub,
177 )
178 )
179
180 return matches
181
[end of src/cfnlint/rules/resources/stepfunctions/StateMachine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
@@ -43,14 +43,18 @@
state_key_types = {
"Pass": ["Result", "ResultPath", "Parameters"],
"Task": [
+ "Credentials",
"Resource",
+ "Parameters",
"ResultPath",
"ResultSelector",
"Retry",
"Catch",
"TimeoutSeconds",
+ "TimeoutSecondsPath",
"Parameters",
"HeartbeatSeconds",
+ "HeartbeatSecondsPath",
],
"Map": [
"MaxConcurrency",
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n@@ -43,14 +43,18 @@\n state_key_types = {\n \"Pass\": [\"Result\", \"ResultPath\", \"Parameters\"],\n \"Task\": [\n+ \"Credentials\",\n \"Resource\",\n+ \"Parameters\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"TimeoutSeconds\",\n+ \"TimeoutSecondsPath\",\n \"Parameters\",\n \"HeartbeatSeconds\",\n+ \"HeartbeatSecondsPath\",\n ],\n \"Map\": [\n \"MaxConcurrency\",\n", "issue": "E2532 does not support TimeoutSecondsPath and HeartbeatSecondsPath\n*cfn-lint version: (`cfn-lint --version`)* 0.61.0\r\n\r\n*Description of issue.*\r\nI'm getting the E2532 when trying to use the `TimeoutSecondsPath` parameter in a `Task`.\r\n\r\nIt is mentioned that it is supported both at https://states-language.net/spec.html#statetypes and https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-task-state.html\r\n\r\nExample:\r\n\r\n```\r\n\"run\": {\r\n \"Type\": \"Task\",\r\n \"Resource\": \"arn:aws:states:::ecs:runTask.sync\",\r\n \"Parameters\": {\r\n \"LaunchType\": \"FARGATE\",\r\n \"Cluster\": \"arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:cluster/${BatchCluster}\",\r\n \"TaskDefinition.$\": \"$.taskDefinition\"\r\n }\r\n \"TimeoutSecondsPath\": \"$.timeout\",\r\n \"ResultPath\": null,\r\n \"Next\": \"done\"\r\n}\r\n```\r\n\r\nError from the cfn-lint run:\r\n```\r\nE2532 State Machine Definition key (TimeoutSecondsPath) for State (run) of Type (Task) is not valid\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport json\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n\n id = \"E2532\"\n shortdesc = \"Check State Machine Definition for proper syntax\"\n description = (\n \"Check the State Machine String Definition to make sure its JSON. \"\n \"Validate basic syntax of the file to determine validity.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html\"\n tags = [\"resources\", \"stepfunctions\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n self.resource_property_types.append(\"AWS::StepFunctions::StateMachine\")\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n \"Next\",\n \"End\",\n \"Type\",\n \"Comment\",\n \"InputPath\",\n \"OutputPath\",\n ]\n common_state_required_keys = [\n \"Type\",\n ]\n state_key_types = {\n \"Pass\": [\"Result\", \"ResultPath\", \"Parameters\"],\n \"Task\": [\n \"Resource\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"TimeoutSeconds\",\n \"Parameters\",\n \"HeartbeatSeconds\",\n ],\n \"Map\": [\n \"MaxConcurrency\",\n \"Iterator\",\n \"ItemsPath\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"Parameters\",\n ],\n \"Choice\": [\"Choices\", \"Default\"],\n \"Wait\": [\"Seconds\", \"Timestamp\", \"SecondsPath\", \"TimestampPath\"],\n \"Succeed\": [],\n \"Fail\": [\"Cause\", \"Error\"],\n \"Parallel\": [\n \"Branches\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Parameters\",\n \"Retry\",\n \"Catch\",\n ],\n }\n state_required_types = {\n \"Pass\": [],\n \"Task\": [\"Resource\"],\n \"Choice\": [\"Choices\"],\n \"Wait\": [],\n \"Succeed\": [],\n \"Fail\": [],\n \"Parallel\": [\"Branches\"],\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = f\"State Machine Definition required key ({req_key}) for State ({state_name}) is missing\"\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get(\"Type\")\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(\n state_type, []\n ):\n message = f\"State Machine Definition key ({state_key}) for State ({state_name}) of Type ({state_type}) is not valid\"\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(\n state_type, []\n ):\n if req_key not in def_json:\n message = f\"State Machine Definition required key ({req_key}) for State ({state_name}) of Type ({state_type}) is missing\"\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = f\"State Machine Definition Type ({state_type}) is not valid\"\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\"Comment\", \"StartAt\", \"TimeoutSeconds\", \"Version\", \"States\"]\n top_level_required_keys = [\"StartAt\", \"States\"]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = f\"State Machine Definition key ({top_key}) is not valid\"\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = (\n f\"State Machine Definition required key ({req_key}) is missing\"\n )\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get(\"States\", {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path, fail_on_loads=True):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n if fail_on_loads:\n message = f\"State Machine Definition needs to be formatted as JSON. Error {err}\"\n matches.append(RuleMatch(path, message))\n return matches\n\n self.logger.debug(\"State Machine definition could not be parsed. Skipping\")\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path, False))\n elif isinstance(value, str):\n matches.extend(self.check_value(value, path, False))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"DefinitionString\",\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}]} | 2,504 | 173 |
gh_patches_debug_12286 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2551 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flatpages and Redirects broken when using ASGI
Examples:
- https://grand-challenge.org/about/ (flatpage, exists)
- https://grand-challenge.org/about (should redirect to https://grand-challenge.org/about/)
- https://parse2022.grand-challenge.org/Participation (should redirect to https://parse2022.grand-challenge.org/Participation/)
- https://parse2022.grand-challenge.org/gfsdfgdfdsg (should redirect to https://parse2022.grand-challenge.org/gfsdfgdfdsg/, which should 404).
Error occurs in the clickjacking middleware:
https://sentry.io/organizations/grand-challenge/issues/3374906811/?project=303639&query=is%3Aignored
</issue>
<code>
[start of app/grandchallenge/core/middleware.py]
1 from allauth_2fa.middleware import BaseRequire2FAMiddleware
2 from django.urls import Resolver404, get_resolver
3 from django.utils.deprecation import MiddlewareMixin
4
5
6 class RequireStaffAndSuperuser2FAMiddleware(BaseRequire2FAMiddleware):
7 def require_2fa(self, request):
8 # Staff users and superusers are required to have 2FA.
9 return request.user.is_staff or request.user.is_superuser
10
11
12 class TwoFactorMiddleware(MiddlewareMixin):
13 """
14 Reset the login flow if another page is loaded halfway through the login.
15 (I.e. if the user has logged in with a username/password, but not yet
16 entered their two-factor credentials.) This makes sure a user does not stay
17 half logged in by mistake.
18 """
19
20 def __init__(self, get_response):
21 self.get_response = get_response
22
23 def process_request(self, request):
24 try:
25 match = get_resolver(request.urlconf).resolve(request.path)
26 if (
27 match
28 and not match.url_name
29 or not match.url_name.startswith("two-factor-authenticate")
30 ):
31 try:
32 del request.session["allauth_2fa_user_id"]
33 except KeyError:
34 pass
35 except Resolver404:
36 return self.get_response(request)
37
[end of app/grandchallenge/core/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/middleware.py b/app/grandchallenge/core/middleware.py
--- a/app/grandchallenge/core/middleware.py
+++ b/app/grandchallenge/core/middleware.py
@@ -17,9 +17,6 @@
half logged in by mistake.
"""
- def __init__(self, get_response):
- self.get_response = get_response
-
def process_request(self, request):
try:
match = get_resolver(request.urlconf).resolve(request.path)
@@ -33,4 +30,4 @@
except KeyError:
pass
except Resolver404:
- return self.get_response(request)
+ pass
| {"golden_diff": "diff --git a/app/grandchallenge/core/middleware.py b/app/grandchallenge/core/middleware.py\n--- a/app/grandchallenge/core/middleware.py\n+++ b/app/grandchallenge/core/middleware.py\n@@ -17,9 +17,6 @@\n half logged in by mistake.\r\n \"\"\"\r\n \r\n- def __init__(self, get_response):\r\n- self.get_response = get_response\r\n-\r\n def process_request(self, request):\r\n try:\r\n match = get_resolver(request.urlconf).resolve(request.path)\r\n@@ -33,4 +30,4 @@\n except KeyError:\r\n pass\r\n except Resolver404:\r\n- return self.get_response(request)\r\n+ pass\n", "issue": "Flatpages and Redirects broken when using ASGI\nExamples:\r\n\r\n- https://grand-challenge.org/about/ (flatpage, exists)\r\n- https://grand-challenge.org/about (should redirect to https://grand-challenge.org/about/)\r\n- https://parse2022.grand-challenge.org/Participation (should redirect to https://parse2022.grand-challenge.org/Participation/)\r\n- https://parse2022.grand-challenge.org/gfsdfgdfdsg (should redirect to https://parse2022.grand-challenge.org/gfsdfgdfdsg/, which should 404).\r\n\r\nError occurs in the clickjacking middleware:\r\n\r\nhttps://sentry.io/organizations/grand-challenge/issues/3374906811/?project=303639&query=is%3Aignored\n", "before_files": [{"content": "from allauth_2fa.middleware import BaseRequire2FAMiddleware\r\nfrom django.urls import Resolver404, get_resolver\r\nfrom django.utils.deprecation import MiddlewareMixin\r\n\r\n\r\nclass RequireStaffAndSuperuser2FAMiddleware(BaseRequire2FAMiddleware):\r\n def require_2fa(self, request):\r\n # Staff users and superusers are required to have 2FA.\r\n return request.user.is_staff or request.user.is_superuser\r\n\r\n\r\nclass TwoFactorMiddleware(MiddlewareMixin):\r\n \"\"\"\r\n Reset the login flow if another page is loaded halfway through the login.\r\n (I.e. if the user has logged in with a username/password, but not yet\r\n entered their two-factor credentials.) This makes sure a user does not stay\r\n half logged in by mistake.\r\n \"\"\"\r\n\r\n def __init__(self, get_response):\r\n self.get_response = get_response\r\n\r\n def process_request(self, request):\r\n try:\r\n match = get_resolver(request.urlconf).resolve(request.path)\r\n if (\r\n match\r\n and not match.url_name\r\n or not match.url_name.startswith(\"two-factor-authenticate\")\r\n ):\r\n try:\r\n del request.session[\"allauth_2fa_user_id\"]\r\n except KeyError:\r\n pass\r\n except Resolver404:\r\n return self.get_response(request)\r\n", "path": "app/grandchallenge/core/middleware.py"}]} | 1,065 | 149 |
gh_patches_debug_39502 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
toolstation spider now not returning any UK stores
The most recent run of the toolstation.py spider from 2023-05-15 has lost about 550 stores compared to the previous run from 2023-04-15. This corresponds to all the UK branches (on the toolstation.com website).
It looks like toolstation.com has changed its mapping provider (from Google to Woosmap, which ironically uses an OSM basemap) and as a result the machine readable store info formatting has changed. The ATP spider now fails to find the expected JS script fragment, throws an error, and doesn't return the branch. The .fr and .nl branches still use a Google map on their branch pages, so the spider still works for them (at least for now).
I think the data we need for the UK branches is still there in a structured form, see e.g. a UK branch page at https://www.toolstation.com/branches/aldridge , but it will need some custom code to parse it out.
</issue>
<code>
[start of locations/spiders/toolstation.py]
1 import json
2 import re
3
4 import scrapy
5
6 from locations.dict_parser import DictParser
7
8
9 class ToolstationSpider(scrapy.spiders.SitemapSpider):
10 name = "toolstation"
11 item_attributes = {"brand": "Toolstation", "brand_wikidata": "Q7824103"}
12 sitemap_urls = [
13 "https://www.toolstation.com/sitemap/branches.xml",
14 "https://www.toolstation.fr/sitemap/branches.xml",
15 "https://www.toolstation.nl/sitemap/branches.xml",
16 ]
17
18 def parse(self, response):
19 pattern = re.compile(r"var store = (.*?)\n", re.MULTILINE | re.DOTALL)
20 store = json.loads(response.xpath('//script[contains(., "var store")]/text()').re(pattern)[0])[0]
21 item = DictParser.parse(store)
22 item["website"] = response.url
23 item["addr_full"] = store["address_text"].split("<br /><br />")[0]
24 yield item
25
[end of locations/spiders/toolstation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/toolstation.py b/locations/spiders/toolstation.py
--- a/locations/spiders/toolstation.py
+++ b/locations/spiders/toolstation.py
@@ -1,9 +1,12 @@
import json
import re
+import chompjs
import scrapy
from locations.dict_parser import DictParser
+from locations.hours import OpeningHours, day_range, sanitise_day
+from locations.spiders.vapestore_gb import clean_address
class ToolstationSpider(scrapy.spiders.SitemapSpider):
@@ -14,11 +17,64 @@
"https://www.toolstation.fr/sitemap/branches.xml",
"https://www.toolstation.nl/sitemap/branches.xml",
]
+ gm_pattern = re.compile(r"var store = (.*?)\n", re.MULTILINE | re.DOTALL)
+ params_pattern = re.compile(r"function\(([_$\w,\s]+)\)")
+ values_pattern = re.compile(r"}\((.+)\)\);")
+ stores_pattern = re.compile(r"data:(\[.+\]),fe")
def parse(self, response):
- pattern = re.compile(r"var store = (.*?)\n", re.MULTILINE | re.DOTALL)
- store = json.loads(response.xpath('//script[contains(., "var store")]/text()').re(pattern)[0])[0]
- item = DictParser.parse(store)
- item["website"] = response.url
- item["addr_full"] = store["address_text"].split("<br /><br />")[0]
- yield item
+ if js := response.xpath('//script[contains(., "var store")]/text()').get():
+ store = json.loads(re.search(self.gm_pattern, js).group(1))[0]
+ item = DictParser.parse(store)
+ item["website"] = response.url
+ item["addr_full"] = clean_address(store["address_text"].split("<br /><br />")[0])
+ yield item
+ elif js := response.xpath('//script[contains(text(), "__NUXT__")]/text()').get():
+ # stores is actually a JS function, so we have to parse the parameters and values
+ params = re.search(self.params_pattern, js).group(1).split(",")
+ values = chompjs.parse_js_object("[" + re.search(self.values_pattern, js).group(1) + "]")
+ args = {}
+ for i in range(0, len(params)):
+ args[params[i]] = values[i]
+
+ store = chompjs.parse_js_object(re.search(self.stores_pattern, js).group(1))[0]["branch"]
+ self.populate(store, args)
+
+ if store["status"] != 1:
+ return
+
+ item = DictParser.parse(store)
+ item["website"] = response.url
+ item["addr_full"] = store["address_text"]
+
+ item["opening_hours"] = OpeningHours()
+ for rule in store["opening_hours"]:
+ days, times = rule.split(": ", 1)
+ if "-" in days:
+ start_day, end_day = days.split("-")
+ else:
+ start_day = end_day = days
+ start_day = sanitise_day(start_day)
+ end_day = sanitise_day(end_day)
+ if start_day and end_day:
+ start_time, end_time = times.strip().split("-")
+ item["opening_hours"].add_days_range(
+ day_range(start_day, end_day), start_time, end_time, time_format="%H%M"
+ )
+
+ yield item
+
+ @staticmethod
+ def populate(data: dict, args: dict):
+ for key, value in data.items():
+ if isinstance(value, str):
+ if value in args:
+ data[key] = args[value]
+ elif isinstance(value, list):
+ for i, x in enumerate(value):
+ if isinstance(x, dict):
+ ToolstationSpider.populate(x, args)
+ elif x in args:
+ value[i] = args[x]
+ elif isinstance(value, dict):
+ ToolstationSpider.populate(value, args)
| {"golden_diff": "diff --git a/locations/spiders/toolstation.py b/locations/spiders/toolstation.py\n--- a/locations/spiders/toolstation.py\n+++ b/locations/spiders/toolstation.py\n@@ -1,9 +1,12 @@\n import json\n import re\n \n+import chompjs\n import scrapy\n \n from locations.dict_parser import DictParser\n+from locations.hours import OpeningHours, day_range, sanitise_day\n+from locations.spiders.vapestore_gb import clean_address\n \n \n class ToolstationSpider(scrapy.spiders.SitemapSpider):\n@@ -14,11 +17,64 @@\n \"https://www.toolstation.fr/sitemap/branches.xml\",\n \"https://www.toolstation.nl/sitemap/branches.xml\",\n ]\n+ gm_pattern = re.compile(r\"var store = (.*?)\\n\", re.MULTILINE | re.DOTALL)\n+ params_pattern = re.compile(r\"function\\(([_$\\w,\\s]+)\\)\")\n+ values_pattern = re.compile(r\"}\\((.+)\\)\\);\")\n+ stores_pattern = re.compile(r\"data:(\\[.+\\]),fe\")\n \n def parse(self, response):\n- pattern = re.compile(r\"var store = (.*?)\\n\", re.MULTILINE | re.DOTALL)\n- store = json.loads(response.xpath('//script[contains(., \"var store\")]/text()').re(pattern)[0])[0]\n- item = DictParser.parse(store)\n- item[\"website\"] = response.url\n- item[\"addr_full\"] = store[\"address_text\"].split(\"<br /><br />\")[0]\n- yield item\n+ if js := response.xpath('//script[contains(., \"var store\")]/text()').get():\n+ store = json.loads(re.search(self.gm_pattern, js).group(1))[0]\n+ item = DictParser.parse(store)\n+ item[\"website\"] = response.url\n+ item[\"addr_full\"] = clean_address(store[\"address_text\"].split(\"<br /><br />\")[0])\n+ yield item\n+ elif js := response.xpath('//script[contains(text(), \"__NUXT__\")]/text()').get():\n+ # stores is actually a JS function, so we have to parse the parameters and values\n+ params = re.search(self.params_pattern, js).group(1).split(\",\")\n+ values = chompjs.parse_js_object(\"[\" + re.search(self.values_pattern, js).group(1) + \"]\")\n+ args = {}\n+ for i in range(0, len(params)):\n+ args[params[i]] = values[i]\n+\n+ store = chompjs.parse_js_object(re.search(self.stores_pattern, js).group(1))[0][\"branch\"]\n+ self.populate(store, args)\n+\n+ if store[\"status\"] != 1:\n+ return\n+\n+ item = DictParser.parse(store)\n+ item[\"website\"] = response.url\n+ item[\"addr_full\"] = store[\"address_text\"]\n+\n+ item[\"opening_hours\"] = OpeningHours()\n+ for rule in store[\"opening_hours\"]:\n+ days, times = rule.split(\": \", 1)\n+ if \"-\" in days:\n+ start_day, end_day = days.split(\"-\")\n+ else:\n+ start_day = end_day = days\n+ start_day = sanitise_day(start_day)\n+ end_day = sanitise_day(end_day)\n+ if start_day and end_day:\n+ start_time, end_time = times.strip().split(\"-\")\n+ item[\"opening_hours\"].add_days_range(\n+ day_range(start_day, end_day), start_time, end_time, time_format=\"%H%M\"\n+ )\n+\n+ yield item\n+\n+ @staticmethod\n+ def populate(data: dict, args: dict):\n+ for key, value in data.items():\n+ if isinstance(value, str):\n+ if value in args:\n+ data[key] = args[value]\n+ elif isinstance(value, list):\n+ for i, x in enumerate(value):\n+ if isinstance(x, dict):\n+ ToolstationSpider.populate(x, args)\n+ elif x in args:\n+ value[i] = args[x]\n+ elif isinstance(value, dict):\n+ ToolstationSpider.populate(value, args)\n", "issue": "toolstation spider now not returning any UK stores\nThe most recent run of the toolstation.py spider from 2023-05-15 has lost about 550 stores compared to the previous run from 2023-04-15. This corresponds to all the UK branches (on the toolstation.com website).\r\n\r\nIt looks like toolstation.com has changed its mapping provider (from Google to Woosmap, which ironically uses an OSM basemap) and as a result the machine readable store info formatting has changed. The ATP spider now fails to find the expected JS script fragment, throws an error, and doesn't return the branch. The .fr and .nl branches still use a Google map on their branch pages, so the spider still works for them (at least for now).\r\n\r\nI think the data we need for the UK branches is still there in a structured form, see e.g. a UK branch page at https://www.toolstation.com/branches/aldridge , but it will need some custom code to parse it out.\n", "before_files": [{"content": "import json\nimport re\n\nimport scrapy\n\nfrom locations.dict_parser import DictParser\n\n\nclass ToolstationSpider(scrapy.spiders.SitemapSpider):\n name = \"toolstation\"\n item_attributes = {\"brand\": \"Toolstation\", \"brand_wikidata\": \"Q7824103\"}\n sitemap_urls = [\n \"https://www.toolstation.com/sitemap/branches.xml\",\n \"https://www.toolstation.fr/sitemap/branches.xml\",\n \"https://www.toolstation.nl/sitemap/branches.xml\",\n ]\n\n def parse(self, response):\n pattern = re.compile(r\"var store = (.*?)\\n\", re.MULTILINE | re.DOTALL)\n store = json.loads(response.xpath('//script[contains(., \"var store\")]/text()').re(pattern)[0])[0]\n item = DictParser.parse(store)\n item[\"website\"] = response.url\n item[\"addr_full\"] = store[\"address_text\"].split(\"<br /><br />\")[0]\n yield item\n", "path": "locations/spiders/toolstation.py"}]} | 1,017 | 914 |
gh_patches_debug_22668 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-161 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make HMCState picklable
Currently, we use `laxtuple` to make HMCState. This way however make it not possible to pickle HMCState (unless we transform it to a dict/tuple/namedtuple).
</issue>
<code>
[start of numpyro/mcmc.py]
1 import math
2 import os
3
4 import tqdm
5
6 import jax.numpy as np
7 from jax import jit, partial, random
8 from jax.flatten_util import ravel_pytree
9 from jax.random import PRNGKey
10
11 import numpyro.distributions as dist
12 from numpyro.hmc_util import IntegratorState, build_tree, find_reasonable_step_size, velocity_verlet, warmup_adapter
13 from numpyro.util import cond, fori_loop, laxtuple
14
15 HMCState = laxtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',
16 'step_size', 'inverse_mass_matrix', 'rng'])
17
18
19 def _get_num_steps(step_size, trajectory_length):
20 num_steps = np.array(trajectory_length / step_size, dtype=np.int32)
21 return np.where(num_steps < 1, np.array(1, dtype=np.int32), num_steps)
22
23
24 def _sample_momentum(unpack_fn, inverse_mass_matrix, rng):
25 if inverse_mass_matrix.ndim == 1:
26 r = dist.Normal(0., np.sqrt(np.reciprocal(inverse_mass_matrix))).sample(rng)
27 return unpack_fn(r)
28 elif inverse_mass_matrix.ndim == 2:
29 raise NotImplementedError
30
31
32 def _euclidean_ke(inverse_mass_matrix, r):
33 r, _ = ravel_pytree(r)
34
35 if inverse_mass_matrix.ndim == 2:
36 v = np.matmul(inverse_mass_matrix, r)
37 elif inverse_mass_matrix.ndim == 1:
38 v = np.multiply(inverse_mass_matrix, r)
39
40 return 0.5 * np.dot(v, r)
41
42
43 def hmc(potential_fn, kinetic_fn=None, algo='NUTS'):
44 r"""
45 Hamiltonian Monte Carlo inference, using either fixed number of
46 steps or the No U-Turn Sampler (NUTS) with adaptive path length.
47
48 **References:**
49
50 1. *MCMC Using Hamiltonian Dynamics*, Radford M. Neal
51 2. *The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo*,
52 Matthew D. Hoffman, and Andrew Gelman.
53
54 :param potential_fn: Python callable that computes the potential energy
55 given input parameters. The input parameters to `potential_fn` can be
56 any python collection type, provided that `init_samples` argument to
57 `init_kernel` has the same type.
58 :param kinetic_fn: Python callable that returns the kinetic energy given
59 inverse mass matrix and momentum. If not provided, the default is
60 euclidean kinetic energy.
61 :param str algo: Whether to run ``HMC`` with fixed number of steps or ``NUTS``
62 with adaptive path length. Default is ``NUTS``.
63 :return init_kernel, sample_kernel: Returns a tuple of callables, the first
64 one to initialize the sampler, and the second one to generate samples
65 given an existing one.
66
67 **Example**
68
69 .. testsetup::
70
71 import jax
72 from jax import random
73 import jax.numpy as np
74 import numpyro.distributions as dist
75 from numpyro.handlers import sample
76 from numpyro.hmc_util import initialize_model
77 from numpyro.mcmc import hmc
78 from numpyro.util import fori_collect
79
80 .. doctest::
81
82 >>> true_coefs = np.array([1., 2., 3.])
83 >>> data = random.normal(random.PRNGKey(2), (2000, 3))
84 >>> dim = 3
85 >>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample(random.PRNGKey(3))
86 >>>
87 >>> def model(data, labels):
88 ... coefs_mean = np.zeros(dim)
89 ... coefs = sample('beta', dist.Normal(coefs_mean, np.ones(3)))
90 ... return sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
91 >>>
92 >>> init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(0), model, data, labels)
93 >>> init_kernel, sample_kernel = hmc(potential_fn, algo='NUTS')
94 >>> hmc_state = init_kernel(init_params,
95 ... trajectory_length=10,
96 ... num_warmup_steps=300)
97 >>> hmc_states = fori_collect(500, sample_kernel, hmc_state,
98 ... transform=lambda x: constrain_fn(x.z))
99 >>> print(np.mean(hmc_states['beta'], axis=0))
100 [0.9153987 2.0754058 2.9621222]
101 """
102 if kinetic_fn is None:
103 kinetic_fn = _euclidean_ke
104 vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)
105 trajectory_len = None
106 max_treedepth = None
107 momentum_generator = None
108 wa_update = None
109
110 def init_kernel(init_samples,
111 num_warmup,
112 step_size=1.0,
113 adapt_step_size=True,
114 adapt_mass_matrix=True,
115 diag_mass=True,
116 target_accept_prob=0.8,
117 trajectory_length=2*math.pi,
118 max_tree_depth=10,
119 run_warmup=True,
120 progbar=True,
121 rng=PRNGKey(0)):
122 """
123 Initializes the HMC sampler.
124
125 :param init_samples: Initial parameters to begin sampling. The type can
126 must be consistent with the input type to `potential_fn`.
127 :param int num_warmup_steps: Number of warmup steps; samples generated
128 during warmup are discarded.
129 :param float step_size: Determines the size of a single step taken by the
130 verlet integrator while computing the trajectory using Hamiltonian
131 dynamics. If not specified, it will be set to 1.
132 :param bool adapt_step_size: A flag to decide if we want to adapt step_size
133 during warm-up phase using Dual Averaging scheme.
134 :param bool adapt_mass_matrix: A flag to decide if we want to adapt mass
135 matrix during warm-up phase using Welford scheme.
136 :param bool diag_mass: A flag to decide if mass matrix is diagonal (default)
137 or dense (if set to ``False``).
138 :param float target_accept_prob: Target acceptance probability for step size
139 adaptation using Dual Averaging. Increasing this value will lead to a smaller
140 step size, hence the sampling will be slower but more robust. Default to 0.8.
141 :param float trajectory_length: Length of a MCMC trajectory for HMC. Default
142 value is :math:`2\\pi`.
143 :param int max_tree_depth: Max depth of the binary tree created during the doubling
144 scheme of NUTS sampler. Defaults to 10.
145 :param bool run_warmup: Flag to decide whether warmup is run. If ``True``,
146 `init_kernel` returns an initial :data:`HMCState` that can be used to
147 generate samples using MCMC. Else, returns the arguments and callable
148 that does the initial adaptation.
149 :param bool progbar: Whether to enable progress bar updates. Defaults to
150 ``True``.
151 :param bool heuristic_step_size: If ``True``, a coarse grained adjustment of
152 step size is done at the beginning of each adaptation window to achieve
153 `target_acceptance_prob`.
154 :param jax.random.PRNGKey rng: random key to be used as the source of
155 randomness.
156 """
157 step_size = float(step_size)
158 nonlocal momentum_generator, wa_update, trajectory_len, max_treedepth
159 trajectory_len = float(trajectory_length)
160 max_treedepth = max_tree_depth
161 z = init_samples
162 z_flat, unravel_fn = ravel_pytree(z)
163 momentum_generator = partial(_sample_momentum, unravel_fn)
164
165 find_reasonable_ss = partial(find_reasonable_step_size,
166 potential_fn, kinetic_fn,
167 momentum_generator)
168
169 wa_init, wa_update = warmup_adapter(num_warmup,
170 adapt_step_size=adapt_step_size,
171 adapt_mass_matrix=adapt_mass_matrix,
172 diag_mass=diag_mass,
173 target_accept_prob=target_accept_prob,
174 find_reasonable_step_size=find_reasonable_ss)
175
176 rng_hmc, rng_wa = random.split(rng)
177 wa_state = wa_init(z, rng_wa, step_size, mass_matrix_size=np.size(z_flat))
178 r = momentum_generator(wa_state.inverse_mass_matrix, rng)
179 vv_state = vv_init(z, r)
180 hmc_state = HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, 0, 0.,
181 wa_state.step_size, wa_state.inverse_mass_matrix, rng_hmc)
182
183 wa_update = jit(wa_update)
184 if run_warmup:
185 # JIT if progress bar updates not required
186 if not progbar:
187 hmc_state, _ = jit(fori_loop, static_argnums=(2,))(0, num_warmup,
188 warmup_update,
189 (hmc_state, wa_state))
190 else:
191 for i in tqdm.trange(num_warmup):
192 hmc_state, wa_state = warmup_update(i, (hmc_state, wa_state))
193 return hmc_state
194 else:
195 return hmc_state, wa_state, warmup_update
196
197 def warmup_update(t, states):
198 hmc_state, wa_state = states
199 hmc_state = sample_kernel(hmc_state)
200 wa_state = wa_update(t, hmc_state.accept_prob, hmc_state.z, wa_state)
201 hmc_state = hmc_state.update(step_size=wa_state.step_size,
202 inverse_mass_matrix=wa_state.inverse_mass_matrix)
203 return hmc_state, wa_state
204
205 def _hmc_next(step_size, inverse_mass_matrix, vv_state, rng):
206 num_steps = _get_num_steps(step_size, trajectory_len)
207 vv_state_new = fori_loop(0, num_steps,
208 lambda i, val: vv_update(step_size, inverse_mass_matrix, val),
209 vv_state)
210 energy_old = vv_state.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state.r)
211 energy_new = vv_state_new.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state_new.r)
212 delta_energy = energy_new - energy_old
213 delta_energy = np.where(np.isnan(delta_energy), np.inf, delta_energy)
214 accept_prob = np.clip(np.exp(-delta_energy), a_max=1.0)
215 transition = random.bernoulli(rng, accept_prob)
216 vv_state = cond(transition,
217 vv_state_new, lambda state: state,
218 vv_state, lambda state: state)
219 return vv_state, num_steps, accept_prob
220
221 def _nuts_next(step_size, inverse_mass_matrix, vv_state, rng):
222 binary_tree = build_tree(vv_update, kinetic_fn, vv_state,
223 inverse_mass_matrix, step_size, rng,
224 max_tree_depth=max_treedepth)
225 accept_prob = binary_tree.sum_accept_probs / binary_tree.num_proposals
226 num_steps = binary_tree.num_proposals
227 vv_state = vv_state.update(z=binary_tree.z_proposal,
228 potential_energy=binary_tree.z_proposal_pe,
229 z_grad=binary_tree.z_proposal_grad)
230 return vv_state, num_steps, accept_prob
231
232 _next = _nuts_next if algo == 'NUTS' else _hmc_next
233
234 @jit
235 def sample_kernel(hmc_state):
236 """
237 Given an existing :data:`HMCState`, run HMC with fixed (possibly adapted)
238 step size and return a new :data:`HMCState`.
239
240 :param hmc_state: Current sample (and associated state).
241 :return: new proposed :data:`HMCState` from simulating
242 Hamiltonian dynamics given existing state.
243 """
244 rng, rng_momentum, rng_transition = random.split(hmc_state.rng, 3)
245 r = momentum_generator(hmc_state.inverse_mass_matrix, rng_momentum)
246 vv_state = IntegratorState(hmc_state.z, r, hmc_state.potential_energy, hmc_state.z_grad)
247 vv_state, num_steps, accept_prob = _next(hmc_state.step_size,
248 hmc_state.inverse_mass_matrix,
249 vv_state, rng_transition)
250 return HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, num_steps,
251 accept_prob, hmc_state.step_size, hmc_state.inverse_mass_matrix, rng)
252
253 # Make `init_kernel` and `sample_kernel` visible from the global scope once
254 # `hmc` is called for sphinx doc generation.
255 if 'SPHINX_BUILD' in os.environ:
256 hmc.init_kernel = init_kernel
257 hmc.sample_kernel = sample_kernel
258
259 return init_kernel, sample_kernel
260
[end of numpyro/mcmc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpyro/mcmc.py b/numpyro/mcmc.py
--- a/numpyro/mcmc.py
+++ b/numpyro/mcmc.py
@@ -1,5 +1,6 @@
import math
import os
+from collections import namedtuple
import tqdm
@@ -7,13 +8,24 @@
from jax import jit, partial, random
from jax.flatten_util import ravel_pytree
from jax.random import PRNGKey
+from jax.tree_util import register_pytree_node
import numpyro.distributions as dist
from numpyro.hmc_util import IntegratorState, build_tree, find_reasonable_step_size, velocity_verlet, warmup_adapter
-from numpyro.util import cond, fori_loop, laxtuple
+from numpyro.util import cond, fori_loop
-HMCState = laxtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',
- 'step_size', 'inverse_mass_matrix', 'rng'])
+HMCState = namedtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',
+ 'step_size', 'inverse_mass_matrix', 'rng'])
+
+
+register_pytree_node(
+ HMCState,
+ lambda xs: (tuple(xs), None),
+ lambda _, xs: HMCState(*xs)
+)
+
+
+HMCState.update = HMCState._replace
def _get_num_steps(step_size, trajectory_length):
| {"golden_diff": "diff --git a/numpyro/mcmc.py b/numpyro/mcmc.py\n--- a/numpyro/mcmc.py\n+++ b/numpyro/mcmc.py\n@@ -1,5 +1,6 @@\n import math\n import os\n+from collections import namedtuple\n \n import tqdm\n \n@@ -7,13 +8,24 @@\n from jax import jit, partial, random\n from jax.flatten_util import ravel_pytree\n from jax.random import PRNGKey\n+from jax.tree_util import register_pytree_node\n \n import numpyro.distributions as dist\n from numpyro.hmc_util import IntegratorState, build_tree, find_reasonable_step_size, velocity_verlet, warmup_adapter\n-from numpyro.util import cond, fori_loop, laxtuple\n+from numpyro.util import cond, fori_loop\n \n-HMCState = laxtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',\n- 'step_size', 'inverse_mass_matrix', 'rng'])\n+HMCState = namedtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',\n+ 'step_size', 'inverse_mass_matrix', 'rng'])\n+\n+\n+register_pytree_node(\n+ HMCState,\n+ lambda xs: (tuple(xs), None),\n+ lambda _, xs: HMCState(*xs)\n+)\n+\n+\n+HMCState.update = HMCState._replace\n \n \n def _get_num_steps(step_size, trajectory_length):\n", "issue": "Make HMCState picklable\nCurrently, we use `laxtuple` to make HMCState. This way however make it not possible to pickle HMCState (unless we transform it to a dict/tuple/namedtuple).\n", "before_files": [{"content": "import math\nimport os\n\nimport tqdm\n\nimport jax.numpy as np\nfrom jax import jit, partial, random\nfrom jax.flatten_util import ravel_pytree\nfrom jax.random import PRNGKey\n\nimport numpyro.distributions as dist\nfrom numpyro.hmc_util import IntegratorState, build_tree, find_reasonable_step_size, velocity_verlet, warmup_adapter\nfrom numpyro.util import cond, fori_loop, laxtuple\n\nHMCState = laxtuple('HMCState', ['z', 'z_grad', 'potential_energy', 'num_steps', 'accept_prob',\n 'step_size', 'inverse_mass_matrix', 'rng'])\n\n\ndef _get_num_steps(step_size, trajectory_length):\n num_steps = np.array(trajectory_length / step_size, dtype=np.int32)\n return np.where(num_steps < 1, np.array(1, dtype=np.int32), num_steps)\n\n\ndef _sample_momentum(unpack_fn, inverse_mass_matrix, rng):\n if inverse_mass_matrix.ndim == 1:\n r = dist.Normal(0., np.sqrt(np.reciprocal(inverse_mass_matrix))).sample(rng)\n return unpack_fn(r)\n elif inverse_mass_matrix.ndim == 2:\n raise NotImplementedError\n\n\ndef _euclidean_ke(inverse_mass_matrix, r):\n r, _ = ravel_pytree(r)\n\n if inverse_mass_matrix.ndim == 2:\n v = np.matmul(inverse_mass_matrix, r)\n elif inverse_mass_matrix.ndim == 1:\n v = np.multiply(inverse_mass_matrix, r)\n\n return 0.5 * np.dot(v, r)\n\n\ndef hmc(potential_fn, kinetic_fn=None, algo='NUTS'):\n r\"\"\"\n Hamiltonian Monte Carlo inference, using either fixed number of\n steps or the No U-Turn Sampler (NUTS) with adaptive path length.\n\n **References:**\n\n 1. *MCMC Using Hamiltonian Dynamics*, Radford M. Neal\n 2. *The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo*,\n Matthew D. Hoffman, and Andrew Gelman.\n\n :param potential_fn: Python callable that computes the potential energy\n given input parameters. The input parameters to `potential_fn` can be\n any python collection type, provided that `init_samples` argument to\n `init_kernel` has the same type.\n :param kinetic_fn: Python callable that returns the kinetic energy given\n inverse mass matrix and momentum. If not provided, the default is\n euclidean kinetic energy.\n :param str algo: Whether to run ``HMC`` with fixed number of steps or ``NUTS``\n with adaptive path length. Default is ``NUTS``.\n :return init_kernel, sample_kernel: Returns a tuple of callables, the first\n one to initialize the sampler, and the second one to generate samples\n given an existing one.\n\n **Example**\n\n .. testsetup::\n\n import jax\n from jax import random\n import jax.numpy as np\n import numpyro.distributions as dist\n from numpyro.handlers import sample\n from numpyro.hmc_util import initialize_model\n from numpyro.mcmc import hmc\n from numpyro.util import fori_collect\n\n .. doctest::\n\n >>> true_coefs = np.array([1., 2., 3.])\n >>> data = random.normal(random.PRNGKey(2), (2000, 3))\n >>> dim = 3\n >>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample(random.PRNGKey(3))\n >>>\n >>> def model(data, labels):\n ... coefs_mean = np.zeros(dim)\n ... coefs = sample('beta', dist.Normal(coefs_mean, np.ones(3)))\n ... return sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)\n >>>\n >>> init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(0), model, data, labels)\n >>> init_kernel, sample_kernel = hmc(potential_fn, algo='NUTS')\n >>> hmc_state = init_kernel(init_params,\n ... trajectory_length=10,\n ... num_warmup_steps=300)\n >>> hmc_states = fori_collect(500, sample_kernel, hmc_state,\n ... transform=lambda x: constrain_fn(x.z))\n >>> print(np.mean(hmc_states['beta'], axis=0))\n [0.9153987 2.0754058 2.9621222]\n \"\"\"\n if kinetic_fn is None:\n kinetic_fn = _euclidean_ke\n vv_init, vv_update = velocity_verlet(potential_fn, kinetic_fn)\n trajectory_len = None\n max_treedepth = None\n momentum_generator = None\n wa_update = None\n\n def init_kernel(init_samples,\n num_warmup,\n step_size=1.0,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n diag_mass=True,\n target_accept_prob=0.8,\n trajectory_length=2*math.pi,\n max_tree_depth=10,\n run_warmup=True,\n progbar=True,\n rng=PRNGKey(0)):\n \"\"\"\n Initializes the HMC sampler.\n\n :param init_samples: Initial parameters to begin sampling. The type can\n must be consistent with the input type to `potential_fn`.\n :param int num_warmup_steps: Number of warmup steps; samples generated\n during warmup are discarded.\n :param float step_size: Determines the size of a single step taken by the\n verlet integrator while computing the trajectory using Hamiltonian\n dynamics. If not specified, it will be set to 1.\n :param bool adapt_step_size: A flag to decide if we want to adapt step_size\n during warm-up phase using Dual Averaging scheme.\n :param bool adapt_mass_matrix: A flag to decide if we want to adapt mass\n matrix during warm-up phase using Welford scheme.\n :param bool diag_mass: A flag to decide if mass matrix is diagonal (default)\n or dense (if set to ``False``).\n :param float target_accept_prob: Target acceptance probability for step size\n adaptation using Dual Averaging. Increasing this value will lead to a smaller\n step size, hence the sampling will be slower but more robust. Default to 0.8.\n :param float trajectory_length: Length of a MCMC trajectory for HMC. Default\n value is :math:`2\\\\pi`.\n :param int max_tree_depth: Max depth of the binary tree created during the doubling\n scheme of NUTS sampler. Defaults to 10.\n :param bool run_warmup: Flag to decide whether warmup is run. If ``True``,\n `init_kernel` returns an initial :data:`HMCState` that can be used to\n generate samples using MCMC. Else, returns the arguments and callable\n that does the initial adaptation.\n :param bool progbar: Whether to enable progress bar updates. Defaults to\n ``True``.\n :param bool heuristic_step_size: If ``True``, a coarse grained adjustment of\n step size is done at the beginning of each adaptation window to achieve\n `target_acceptance_prob`.\n :param jax.random.PRNGKey rng: random key to be used as the source of\n randomness.\n \"\"\"\n step_size = float(step_size)\n nonlocal momentum_generator, wa_update, trajectory_len, max_treedepth\n trajectory_len = float(trajectory_length)\n max_treedepth = max_tree_depth\n z = init_samples\n z_flat, unravel_fn = ravel_pytree(z)\n momentum_generator = partial(_sample_momentum, unravel_fn)\n\n find_reasonable_ss = partial(find_reasonable_step_size,\n potential_fn, kinetic_fn,\n momentum_generator)\n\n wa_init, wa_update = warmup_adapter(num_warmup,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n diag_mass=diag_mass,\n target_accept_prob=target_accept_prob,\n find_reasonable_step_size=find_reasonable_ss)\n\n rng_hmc, rng_wa = random.split(rng)\n wa_state = wa_init(z, rng_wa, step_size, mass_matrix_size=np.size(z_flat))\n r = momentum_generator(wa_state.inverse_mass_matrix, rng)\n vv_state = vv_init(z, r)\n hmc_state = HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, 0, 0.,\n wa_state.step_size, wa_state.inverse_mass_matrix, rng_hmc)\n\n wa_update = jit(wa_update)\n if run_warmup:\n # JIT if progress bar updates not required\n if not progbar:\n hmc_state, _ = jit(fori_loop, static_argnums=(2,))(0, num_warmup,\n warmup_update,\n (hmc_state, wa_state))\n else:\n for i in tqdm.trange(num_warmup):\n hmc_state, wa_state = warmup_update(i, (hmc_state, wa_state))\n return hmc_state\n else:\n return hmc_state, wa_state, warmup_update\n\n def warmup_update(t, states):\n hmc_state, wa_state = states\n hmc_state = sample_kernel(hmc_state)\n wa_state = wa_update(t, hmc_state.accept_prob, hmc_state.z, wa_state)\n hmc_state = hmc_state.update(step_size=wa_state.step_size,\n inverse_mass_matrix=wa_state.inverse_mass_matrix)\n return hmc_state, wa_state\n\n def _hmc_next(step_size, inverse_mass_matrix, vv_state, rng):\n num_steps = _get_num_steps(step_size, trajectory_len)\n vv_state_new = fori_loop(0, num_steps,\n lambda i, val: vv_update(step_size, inverse_mass_matrix, val),\n vv_state)\n energy_old = vv_state.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state.r)\n energy_new = vv_state_new.potential_energy + kinetic_fn(inverse_mass_matrix, vv_state_new.r)\n delta_energy = energy_new - energy_old\n delta_energy = np.where(np.isnan(delta_energy), np.inf, delta_energy)\n accept_prob = np.clip(np.exp(-delta_energy), a_max=1.0)\n transition = random.bernoulli(rng, accept_prob)\n vv_state = cond(transition,\n vv_state_new, lambda state: state,\n vv_state, lambda state: state)\n return vv_state, num_steps, accept_prob\n\n def _nuts_next(step_size, inverse_mass_matrix, vv_state, rng):\n binary_tree = build_tree(vv_update, kinetic_fn, vv_state,\n inverse_mass_matrix, step_size, rng,\n max_tree_depth=max_treedepth)\n accept_prob = binary_tree.sum_accept_probs / binary_tree.num_proposals\n num_steps = binary_tree.num_proposals\n vv_state = vv_state.update(z=binary_tree.z_proposal,\n potential_energy=binary_tree.z_proposal_pe,\n z_grad=binary_tree.z_proposal_grad)\n return vv_state, num_steps, accept_prob\n\n _next = _nuts_next if algo == 'NUTS' else _hmc_next\n\n @jit\n def sample_kernel(hmc_state):\n \"\"\"\n Given an existing :data:`HMCState`, run HMC with fixed (possibly adapted)\n step size and return a new :data:`HMCState`.\n\n :param hmc_state: Current sample (and associated state).\n :return: new proposed :data:`HMCState` from simulating\n Hamiltonian dynamics given existing state.\n \"\"\"\n rng, rng_momentum, rng_transition = random.split(hmc_state.rng, 3)\n r = momentum_generator(hmc_state.inverse_mass_matrix, rng_momentum)\n vv_state = IntegratorState(hmc_state.z, r, hmc_state.potential_energy, hmc_state.z_grad)\n vv_state, num_steps, accept_prob = _next(hmc_state.step_size,\n hmc_state.inverse_mass_matrix,\n vv_state, rng_transition)\n return HMCState(vv_state.z, vv_state.z_grad, vv_state.potential_energy, num_steps,\n accept_prob, hmc_state.step_size, hmc_state.inverse_mass_matrix, rng)\n\n # Make `init_kernel` and `sample_kernel` visible from the global scope once\n # `hmc` is called for sphinx doc generation.\n if 'SPHINX_BUILD' in os.environ:\n hmc.init_kernel = init_kernel\n hmc.sample_kernel = sample_kernel\n\n return init_kernel, sample_kernel\n", "path": "numpyro/mcmc.py"}]} | 4,074 | 330 |
gh_patches_debug_19786 | rasdani/github-patches | git_diff | mkdocs__mkdocs-271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ReadTheDocs theme doesn't show the TOC on the home page
It is expanded on the left here: http://mkdocs.readthedocs.org/en/latest/user-guide/writing-your-docs/
But not here: http://mkdocs.readthedocs.org/en/latest/
This is probably due to us not including the home page in the navigation and ties into the discussion on #231 that I had with @tomchristie
</issue>
<code>
[start of mkdocs/nav.py]
1 # coding: utf-8
2
3 """
4 Deals with generating the site-wide navigation.
5
6 This consists of building a set of interlinked page and header objects.
7 """
8
9 from mkdocs import utils
10 import posixpath
11 import os
12
13
14 def filename_to_title(filename):
15 """
16 Automatically generate a default title, given a filename.
17 """
18 if utils.is_homepage(filename):
19 return 'Home'
20
21 title = os.path.splitext(filename)[0]
22 title = title.replace('-', ' ').replace('_', ' ')
23 # Captialize if the filename was all lowercase, otherwise leave it as-is.
24 if title.lower() == title:
25 title = title.capitalize()
26 return title
27
28
29 class SiteNavigation(object):
30 def __init__(self, pages_config, use_directory_urls=True):
31 self.url_context = URLContext()
32 self.file_context = FileContext()
33 self.nav_items, self.pages = \
34 _generate_site_navigation(pages_config, self.url_context, use_directory_urls)
35 self.homepage = self.pages[0] if self.pages else None
36 self.use_directory_urls = use_directory_urls
37
38 def __str__(self):
39 return str(self.homepage) + ''.join([str(item) for item in self])
40
41 def __iter__(self):
42 return iter(self.nav_items)
43
44 def walk_pages(self):
45 """
46 Returns each page in the site in turn.
47
48 Additionally this sets the active status of the pages and headers,
49 in the site navigation, so that the rendered navbar can correctly
50 highlight the currently active page and/or header item.
51 """
52 page = self.homepage
53 page.set_active()
54 self.url_context.set_current_url(page.abs_url)
55 self.file_context.set_current_path(page.input_path)
56 yield page
57 while page.next_page:
58 page.set_active(False)
59 page = page.next_page
60 page.set_active()
61 self.url_context.set_current_url(page.abs_url)
62 self.file_context.set_current_path(page.input_path)
63 yield page
64 page.set_active(False)
65
66 @property
67 def source_files(self):
68 if not hasattr(self, '_source_files'):
69 self._source_files = set([page.input_path for page in self.pages])
70 return self._source_files
71
72
73 class URLContext(object):
74 """
75 The URLContext is used to ensure that we can generate the appropriate
76 relative URLs to other pages from any given page in the site.
77
78 We use relative URLs so that static sites can be deployed to any location
79 without having to specify what the path component on the host will be
80 if the documentation is not hosted at the root path.
81 """
82
83 def __init__(self):
84 self.base_path = '/'
85
86 def set_current_url(self, current_url):
87 self.base_path = posixpath.dirname(current_url)
88
89 def make_relative(self, url):
90 """
91 Given a URL path return it as a relative URL,
92 given the context of the current page.
93 """
94 suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
95 # Workaround for bug on `posixpath.relpath()` in Python 2.6
96 if self.base_path == '/':
97 if url == '/':
98 # Workaround for static assets
99 return '.'
100 return url.lstrip('/')
101 relative_path = posixpath.relpath(url, start=self.base_path) + suffix
102
103 # Under Python 2.6, relative_path adds an extra '/' at the end.
104 return relative_path.rstrip('/')
105
106
107 class FileContext(object):
108 """
109 The FileContext is used to ensure that we can generate the appropriate
110 full path for other pages given their relative path from a particular page.
111
112 This is used when we have relative hyperlinks in the documentation, so that
113 we can ensure that they point to markdown documents that actually exist
114 in the `pages` config.
115 """
116 def __init__(self):
117 self.current_file = None
118 self.base_path = ''
119
120 def set_current_path(self, current_path):
121 self.current_file = current_path
122 self.base_path = os.path.dirname(current_path)
123
124 def make_absolute(self, path):
125 """
126 Given a relative file path return it as a POSIX-style
127 absolute filepath, given the context of the current page.
128 """
129 return posixpath.normpath(posixpath.join(self.base_path, path))
130
131
132 class Page(object):
133 def __init__(self, title, url, path, url_context):
134 self.title = title
135 self.abs_url = url
136 self.active = False
137 self.url_context = url_context
138
139 # Relative paths to the input markdown file and output html file.
140 self.input_path = path
141 self.output_path = utils.get_html_path(path)
142
143 # Links to related pages
144 self.previous_page = None
145 self.next_page = None
146 self.ancestors = []
147
148 @property
149 def url(self):
150 return self.url_context.make_relative(self.abs_url)
151
152 @property
153 def is_homepage(self):
154 return utils.is_homepage(self.input_path)
155
156 def __str__(self):
157 return self._indent_print()
158
159 def _indent_print(self, depth=0):
160 indent = ' ' * depth
161 active_marker = ' [*]' if self.active else ''
162 title = self.title if (self.title is not None) else '[blank]'
163 return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
164
165 def set_active(self, active=True):
166 self.active = active
167 for ancestor in self.ancestors:
168 ancestor.active = active
169
170
171 class Header(object):
172 def __init__(self, title, children):
173 self.title, self.children = title, children
174 self.active = False
175
176 def __str__(self):
177 return self._indent_print()
178
179 def _indent_print(self, depth=0):
180 indent = ' ' * depth
181 active_marker = ' [*]' if self.active else ''
182 ret = '%s%s%s\n' % (indent, self.title, active_marker)
183 for item in self.children:
184 ret += item._indent_print(depth + 1)
185 return ret
186
187
188 def _generate_site_navigation(pages_config, url_context, use_directory_urls=True):
189 """
190 Returns a list of Page and Header instances that represent the
191 top level site navigation.
192 """
193 nav_items = []
194 pages = []
195 previous = None
196
197 for config_line in pages_config:
198 if isinstance(config_line, str):
199 path = config_line
200 title, child_title = None, None
201 elif len(config_line) in (1, 2, 3):
202 # Pad any items that don't exist with 'None'
203 padded_config = (list(config_line) + [None, None])[:3]
204 path, title, child_title = padded_config
205 else:
206 msg = (
207 "Line in 'page' config contained %d items. "
208 "Expected 1, 2 or 3 strings." % len(config_line)
209 )
210 assert False, msg
211
212 if title is None:
213 filename = path.split('/')[0]
214 title = filename_to_title(filename)
215 if child_title is None and '/' in path:
216 filename = path.split('/')[1]
217 child_title = filename_to_title(filename)
218
219 url = utils.get_url_path(path, use_directory_urls)
220
221 if not child_title:
222 # New top level page.
223 page = Page(title=title, url=url, path=path, url_context=url_context)
224 if not utils.is_homepage(path):
225 nav_items.append(page)
226 elif not nav_items or (nav_items[-1].title != title):
227 # New second level page.
228 page = Page(title=child_title, url=url, path=path, url_context=url_context)
229 header = Header(title=title, children=[page])
230 nav_items.append(header)
231 page.ancestors = [header]
232 else:
233 # Additional second level page.
234 page = Page(title=child_title, url=url, path=path, url_context=url_context)
235 header = nav_items[-1]
236 header.children.append(page)
237 page.ancestors = [header]
238
239 # Add in previous and next information.
240 if previous:
241 page.previous_page = previous
242 previous.next_page = page
243 previous = page
244
245 pages.append(page)
246
247 return (nav_items, pages)
248
[end of mkdocs/nav.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/nav.py b/mkdocs/nav.py
--- a/mkdocs/nav.py
+++ b/mkdocs/nav.py
@@ -36,7 +36,7 @@
self.use_directory_urls = use_directory_urls
def __str__(self):
- return str(self.homepage) + ''.join([str(item) for item in self])
+ return ''.join([str(item) for item in self])
def __iter__(self):
return iter(self.nav_items)
@@ -221,8 +221,7 @@
if not child_title:
# New top level page.
page = Page(title=title, url=url, path=path, url_context=url_context)
- if not utils.is_homepage(path):
- nav_items.append(page)
+ nav_items.append(page)
elif not nav_items or (nav_items[-1].title != title):
# New second level page.
page = Page(title=child_title, url=url, path=path, url_context=url_context)
| {"golden_diff": "diff --git a/mkdocs/nav.py b/mkdocs/nav.py\n--- a/mkdocs/nav.py\n+++ b/mkdocs/nav.py\n@@ -36,7 +36,7 @@\n self.use_directory_urls = use_directory_urls\n \n def __str__(self):\n- return str(self.homepage) + ''.join([str(item) for item in self])\n+ return ''.join([str(item) for item in self])\n \n def __iter__(self):\n return iter(self.nav_items)\n@@ -221,8 +221,7 @@\n if not child_title:\n # New top level page.\n page = Page(title=title, url=url, path=path, url_context=url_context)\n- if not utils.is_homepage(path):\n- nav_items.append(page)\n+ nav_items.append(page)\n elif not nav_items or (nav_items[-1].title != title):\n # New second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n", "issue": "ReadTheDocs theme doesn't show the TOC on the home page\nIt is expanded on the left here: http://mkdocs.readthedocs.org/en/latest/user-guide/writing-your-docs/\nBut not here: http://mkdocs.readthedocs.org/en/latest/\n\nThis is probably due to us not including the home page in the navigation and ties into the discussion on #231 that I had with @tomchristie \n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom mkdocs import utils\nimport posixpath\nimport os\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n title = os.path.splitext(filename)[0]\n title = title.replace('-', ' ').replace('_', ' ')\n # Captialize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n return title\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = \\\n _generate_site_navigation(pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return str(self.homepage) + ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = posixpath.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `posixpath.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n relative_path = posixpath.relpath(url, start=self.base_path) + suffix\n\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n return relative_path.rstrip('/')\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return posixpath.normpath(posixpath.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.active = active\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n\n def __str__(self):\n return self._indent_print()\n\n def _indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item._indent_print(depth + 1)\n return ret\n\n\ndef _generate_site_navigation(pages_config, url_context, use_directory_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n previous = None\n\n for config_line in pages_config:\n if isinstance(config_line, str):\n path = config_line\n title, child_title = None, None\n elif len(config_line) in (1, 2, 3):\n # Pad any items that don't exist with 'None'\n padded_config = (list(config_line) + [None, None])[:3]\n path, title, child_title = padded_config\n else:\n msg = (\n \"Line in 'page' config contained %d items. \"\n \"Expected 1, 2 or 3 strings.\" % len(config_line)\n )\n assert False, msg\n\n if title is None:\n filename = path.split('/')[0]\n title = filename_to_title(filename)\n if child_title is None and '/' in path:\n filename = path.split('/')[1]\n child_title = filename_to_title(filename)\n\n url = utils.get_url_path(path, use_directory_urls)\n\n if not child_title:\n # New top level page.\n page = Page(title=title, url=url, path=path, url_context=url_context)\n if not utils.is_homepage(path):\n nav_items.append(page)\n elif not nav_items or (nav_items[-1].title != title):\n # New second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = Header(title=title, children=[page])\n nav_items.append(header)\n page.ancestors = [header]\n else:\n # Additional second level page.\n page = Page(title=child_title, url=url, path=path, url_context=url_context)\n header = nav_items[-1]\n header.children.append(page)\n page.ancestors = [header]\n\n # Add in previous and next information.\n if previous:\n page.previous_page = previous\n previous.next_page = page\n previous = page\n\n pages.append(page)\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}]} | 3,062 | 222 |
gh_patches_debug_6240 | rasdani/github-patches | git_diff | hylang__hy-2554 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
An exemplar Hy program
When you have a new programming language, it really helps—for the purposes of advertising the language, of helping people learn how to use it, and of giving the language a broader and more practical test than unit tests—to have some kind of nontrivial program written in the language. I think I speak for many of us when I say that write a lot of small programs in Hy, as well as a lot of not-so-small programs that are of little general interest (in my case, code for the data analysis of specific scientific studies). What I don't have is a program that is both not small and of general interest.
I propose we consider writing and maintaining an exemplar program. It doesn't have to be kept under the hylang organization and probably shouldn't be considered a responsibility of the Hy core team; it could be one person's passion project, so long as it's free software. It should be something that's useful to end users of some kind, rather than Hy programmers—we want to show something that is done in Hy, not something you can use to write other programs in Hy. It should offer something that doesn't already exist, rather than being a Hy rewrite (or stripped-down equivalent) of an existing program. And it shouldn't be too specialized. A text editor, paint program, or process manager isn't too specialized, whereas a program for managing reservations at a hotel or for designing aircraft is.
One genre of program that fits a lot of these criteria is games. [Rogue TV](https://github.com/kodiologist/rogue-tv) has the potential to be a good exemplar program, although it would need considerable work to get running on the latest Hy, and it's still missing a lot of content elements to be a good game. Also, Rogue TV is arguably too big and complex to be good for this purpose. Ironically, I quit development of it largely to work on Hy itself.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Set both `setup_requires` and `install_requires` with our
4 # dependencies, since we need to compile Hy files during setup. And
5 # put this as the first statement in the file so it's easy to parse
6 # out without executing the file.
7 requires = [
8 "funcparserlib ~= 1.0",
9 'astor>=0.8 ; python_version < "3.9"',
10 ]
11
12 import os
13
14 import fastentrypoints # Monkey-patches setuptools.
15 from get_version import __version__
16 from setuptools import find_packages, setup
17 from setuptools.command.install import install
18
19 os.chdir(os.path.split(os.path.abspath(__file__))[0])
20
21 PKG = "hy"
22
23 long_description = """Hy is a Lisp dialect that's embedded in Python.
24 Since Hy transforms its Lisp code into Python abstract syntax tree (AST)
25 objects, you have the whole beautiful world of Python at your fingertips,
26 in Lisp form."""
27
28
29 class install(install):
30 def run(self):
31 super().run()
32 import py_compile
33
34 import hy # for compile hooks
35
36 for path in set(self.get_outputs()):
37 if path.endswith(".hy"):
38 py_compile.compile(
39 path,
40 invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,
41 )
42
43 setup(
44 name=PKG,
45 version=(
46 None
47 if __version__ == "unknown"
48 else __version__
49 ),
50 setup_requires=["wheel"] + requires,
51 install_requires=requires,
52 python_requires=">= 3.8, < 3.13",
53 entry_points={
54 "console_scripts": [
55 "hy = hy.cmdline:hy_main",
56 "hyc = hy.cmdline:hyc_main",
57 "hy2py = hy.cmdline:hy2py_main"
58 ]
59 },
60 packages=find_packages(exclude=["tests*"]),
61 package_data={
62 "": ["*.hy"],
63 },
64 data_files=[("get_version", ["get_version.py"])],
65 author="Paul Tagliamonte",
66 author_email="[email protected]",
67 long_description=long_description,
68 description="A Lisp dialect embedded in Python",
69 license="Expat",
70 url="http://hylang.org/",
71 platforms=["any"],
72 classifiers=[
73 "Development Status :: 4 - Beta",
74 "Intended Audience :: Developers",
75 "License :: DFSG approved",
76 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
77 "Operating System :: OS Independent",
78 "Programming Language :: Lisp",
79 "Programming Language :: Python",
80 "Programming Language :: Python :: 3",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 "Programming Language :: Python :: 3.11",
85 "Programming Language :: Python :: 3.12",
86 "Programming Language :: Python :: Implementation :: PyPy",
87 "Environment :: WebAssembly :: Emscripten",
88 "Topic :: Software Development :: Code Generators",
89 "Topic :: Software Development :: Compilers",
90 "Topic :: Software Development :: Libraries",
91 ],
92 project_urls={
93 "Documentation": "https://docs.hylang.org/",
94 "Source": "https://github.com/hylang/hy",
95 },
96 cmdclass={
97 "install": install,
98 },
99 )
100
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -75,6 +75,7 @@
"License :: DFSG approved",
"License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
"Operating System :: OS Independent",
+ "Programming Language :: Hy",
"Programming Language :: Lisp",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -75,6 +75,7 @@\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n+ \"Programming Language :: Hy\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n", "issue": "An exemplar Hy program\nWhen you have a new programming language, it really helps\u2014for the purposes of advertising the language, of helping people learn how to use it, and of giving the language a broader and more practical test than unit tests\u2014to have some kind of nontrivial program written in the language. I think I speak for many of us when I say that write a lot of small programs in Hy, as well as a lot of not-so-small programs that are of little general interest (in my case, code for the data analysis of specific scientific studies). What I don't have is a program that is both not small and of general interest.\r\n\r\nI propose we consider writing and maintaining an exemplar program. It doesn't have to be kept under the hylang organization and probably shouldn't be considered a responsibility of the Hy core team; it could be one person's passion project, so long as it's free software. It should be something that's useful to end users of some kind, rather than Hy programmers\u2014we want to show something that is done in Hy, not something you can use to write other programs in Hy. It should offer something that doesn't already exist, rather than being a Hy rewrite (or stripped-down equivalent) of an existing program. And it shouldn't be too specialized. A text editor, paint program, or process manager isn't too specialized, whereas a program for managing reservations at a hotel or for designing aircraft is.\r\n\r\nOne genre of program that fits a lot of these criteria is games. [Rogue TV](https://github.com/kodiologist/rogue-tv) has the potential to be a good exemplar program, although it would need considerable work to get running on the latest Hy, and it's still missing a lot of content elements to be a good game. Also, Rogue TV is arguably too big and complex to be good for this purpose. Ironically, I quit development of it largely to work on Hy itself.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Set both `setup_requires` and `install_requires` with our\n# dependencies, since we need to compile Hy files during setup. And\n# put this as the first statement in the file so it's easy to parse\n# out without executing the file.\nrequires = [\n \"funcparserlib ~= 1.0\",\n 'astor>=0.8 ; python_version < \"3.9\"',\n]\n\nimport os\n\nimport fastentrypoints # Monkey-patches setuptools.\nfrom get_version import __version__\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Lisp dialect that's embedded in Python.\nSince Hy transforms its Lisp code into Python abstract syntax tree (AST)\nobjects, you have the whole beautiful world of Python at your fingertips,\nin Lisp form.\"\"\"\n\n\nclass install(install):\n def run(self):\n super().run()\n import py_compile\n\n import hy # for compile hooks\n\n for path in set(self.get_outputs()):\n if path.endswith(\".hy\"):\n py_compile.compile(\n path,\n invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,\n )\n\nsetup(\n name=PKG,\n version=(\n None\n if __version__ == \"unknown\"\n else __version__\n ),\n setup_requires=[\"wheel\"] + requires,\n install_requires=requires,\n python_requires=\">= 3.8, < 3.13\",\n entry_points={\n \"console_scripts\": [\n \"hy = hy.cmdline:hy_main\",\n \"hyc = hy.cmdline:hyc_main\",\n \"hy2py = hy.cmdline:hy2py_main\"\n ]\n },\n packages=find_packages(exclude=[\"tests*\"]),\n package_data={\n \"\": [\"*.hy\"],\n },\n data_files=[(\"get_version\", [\"get_version.py\"])],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description=\"A Lisp dialect embedded in Python\",\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Environment :: WebAssembly :: Emscripten\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n },\n cmdclass={\n \"install\": install,\n },\n)\n", "path": "setup.py"}]} | 1,852 | 100 |
gh_patches_debug_15849 | rasdani/github-patches | git_diff | iterative__dvc-2693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pull: wrong warning on pulling import stage
Script to reproduce:
```
mkdir test
dvc import https://github.com/iterative/example-get-started model.pkl
git add .
git commit -am "init"
cd ..
git clone test test1
cd test1
dvc pull
```
outputs only:
```
WARNING: DVC-file 'model.pkl.dvc' is locked. Its dependencies are not going to be checked out.
```
I think there should be no warning, especially considering that it has done the job - pulled the `model.pkl`. Like we discussed in #2667 there should be some statistics instead.
</issue>
<code>
[start of dvc/repo/checkout.py]
1 from __future__ import unicode_literals
2
3 import logging
4
5 from dvc.exceptions import CheckoutErrorSuggestGit, CheckoutError
6 from dvc.progress import Tqdm
7
8
9 logger = logging.getLogger(__name__)
10
11
12 def _cleanup_unused_links(self, all_stages):
13 used = [
14 out.fspath
15 for stage in all_stages
16 for out in stage.outs
17 if out.scheme == "local"
18 ]
19 self.state.remove_unused_links(used)
20
21
22 def get_all_files_numbers(stages):
23 return sum(stage.get_all_files_number() for stage in stages)
24
25
26 def _checkout(
27 self, targets=None, with_deps=False, force=False, recursive=False
28 ):
29 from dvc.stage import StageFileDoesNotExistError, StageFileBadNameError
30
31 stages = set()
32 targets = targets or [None]
33 for target in targets:
34 try:
35 new = self.collect(
36 target, with_deps=with_deps, recursive=recursive
37 )
38 stages.update(new)
39 except (StageFileDoesNotExistError, StageFileBadNameError) as exc:
40 if not target:
41 raise
42 raise CheckoutErrorSuggestGit(target, exc)
43
44 _cleanup_unused_links(self, self.stages)
45 total = get_all_files_numbers(stages)
46 if total == 0:
47 logger.info("Nothing to do")
48 failed = []
49 with Tqdm(
50 total=total, unit="file", desc="Checkout", disable=total == 0
51 ) as pbar:
52 for stage in stages:
53 if stage.locked:
54 logger.warning(
55 "DVC-file '{path}' is locked. Its dependencies are"
56 " not going to be checked out.".format(path=stage.relpath)
57 )
58
59 failed.extend(
60 stage.checkout(force=force, progress_callback=pbar.update_desc)
61 )
62 if failed:
63 raise CheckoutError(failed)
64
[end of dvc/repo/checkout.py]
[start of dvc/repo/pull.py]
1 from __future__ import unicode_literals
2
3 from . import locked
4
5
6 @locked
7 def pull(
8 self,
9 targets=None,
10 jobs=None,
11 remote=None,
12 all_branches=False,
13 with_deps=False,
14 all_tags=False,
15 force=False,
16 recursive=False,
17 ):
18 processed_files_count = self._fetch(
19 targets,
20 jobs,
21 remote=remote,
22 all_branches=all_branches,
23 all_tags=all_tags,
24 with_deps=with_deps,
25 recursive=recursive,
26 )
27 self._checkout(
28 targets=targets, with_deps=with_deps, force=force, recursive=recursive
29 )
30 return processed_files_count
31
[end of dvc/repo/pull.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/checkout.py b/dvc/repo/checkout.py
--- a/dvc/repo/checkout.py
+++ b/dvc/repo/checkout.py
@@ -50,12 +50,6 @@
total=total, unit="file", desc="Checkout", disable=total == 0
) as pbar:
for stage in stages:
- if stage.locked:
- logger.warning(
- "DVC-file '{path}' is locked. Its dependencies are"
- " not going to be checked out.".format(path=stage.relpath)
- )
-
failed.extend(
stage.checkout(force=force, progress_callback=pbar.update_desc)
)
diff --git a/dvc/repo/pull.py b/dvc/repo/pull.py
--- a/dvc/repo/pull.py
+++ b/dvc/repo/pull.py
@@ -1,6 +1,11 @@
from __future__ import unicode_literals
-from . import locked
+import logging
+
+from dvc.repo import locked
+
+
+logger = logging.getLogger(__name__)
@locked
| {"golden_diff": "diff --git a/dvc/repo/checkout.py b/dvc/repo/checkout.py\n--- a/dvc/repo/checkout.py\n+++ b/dvc/repo/checkout.py\n@@ -50,12 +50,6 @@\n total=total, unit=\"file\", desc=\"Checkout\", disable=total == 0\n ) as pbar:\n for stage in stages:\n- if stage.locked:\n- logger.warning(\n- \"DVC-file '{path}' is locked. Its dependencies are\"\n- \" not going to be checked out.\".format(path=stage.relpath)\n- )\n-\n failed.extend(\n stage.checkout(force=force, progress_callback=pbar.update_desc)\n )\ndiff --git a/dvc/repo/pull.py b/dvc/repo/pull.py\n--- a/dvc/repo/pull.py\n+++ b/dvc/repo/pull.py\n@@ -1,6 +1,11 @@\n from __future__ import unicode_literals\n \n-from . import locked\n+import logging\n+\n+from dvc.repo import locked\n+\n+\n+logger = logging.getLogger(__name__)\n \n \n @locked\n", "issue": "pull: wrong warning on pulling import stage\nScript to reproduce:\r\n\r\n```\r\nmkdir test\r\ndvc import https://github.com/iterative/example-get-started model.pkl\r\ngit add .\r\ngit commit -am \"init\"\r\ncd ..\r\ngit clone test test1\r\ncd test1\r\ndvc pull\r\n```\r\n\r\noutputs only:\r\n\r\n```\r\nWARNING: DVC-file 'model.pkl.dvc' is locked. Its dependencies are not going to be checked out.\r\n```\r\n\r\nI think there should be no warning, especially considering that it has done the job - pulled the `model.pkl`. Like we discussed in #2667 there should be some statistics instead.\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.exceptions import CheckoutErrorSuggestGit, CheckoutError\nfrom dvc.progress import Tqdm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _cleanup_unused_links(self, all_stages):\n used = [\n out.fspath\n for stage in all_stages\n for out in stage.outs\n if out.scheme == \"local\"\n ]\n self.state.remove_unused_links(used)\n\n\ndef get_all_files_numbers(stages):\n return sum(stage.get_all_files_number() for stage in stages)\n\n\ndef _checkout(\n self, targets=None, with_deps=False, force=False, recursive=False\n):\n from dvc.stage import StageFileDoesNotExistError, StageFileBadNameError\n\n stages = set()\n targets = targets or [None]\n for target in targets:\n try:\n new = self.collect(\n target, with_deps=with_deps, recursive=recursive\n )\n stages.update(new)\n except (StageFileDoesNotExistError, StageFileBadNameError) as exc:\n if not target:\n raise\n raise CheckoutErrorSuggestGit(target, exc)\n\n _cleanup_unused_links(self, self.stages)\n total = get_all_files_numbers(stages)\n if total == 0:\n logger.info(\"Nothing to do\")\n failed = []\n with Tqdm(\n total=total, unit=\"file\", desc=\"Checkout\", disable=total == 0\n ) as pbar:\n for stage in stages:\n if stage.locked:\n logger.warning(\n \"DVC-file '{path}' is locked. Its dependencies are\"\n \" not going to be checked out.\".format(path=stage.relpath)\n )\n\n failed.extend(\n stage.checkout(force=force, progress_callback=pbar.update_desc)\n )\n if failed:\n raise CheckoutError(failed)\n", "path": "dvc/repo/checkout.py"}, {"content": "from __future__ import unicode_literals\n\nfrom . import locked\n\n\n@locked\ndef pull(\n self,\n targets=None,\n jobs=None,\n remote=None,\n all_branches=False,\n with_deps=False,\n all_tags=False,\n force=False,\n recursive=False,\n):\n processed_files_count = self._fetch(\n targets,\n jobs,\n remote=remote,\n all_branches=all_branches,\n all_tags=all_tags,\n with_deps=with_deps,\n recursive=recursive,\n )\n self._checkout(\n targets=targets, with_deps=with_deps, force=force, recursive=recursive\n )\n return processed_files_count\n", "path": "dvc/repo/pull.py"}]} | 1,418 | 246 |
gh_patches_debug_16508 | rasdani/github-patches | git_diff | cocotb__cocotb-878 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix docs for async/await in coroutines doc
The new documentation for async has a couple issues:
- a non-inline link in rst isn't displayed: https://cocotb.readthedocs.io/en/latest/coroutines.html (at the bottom of the page, "More details on this type of generator can be found in `PEP 525`_.")
- `python3` should be used instead of `python` in the code examples to make sure that keywords like `await` get highlighted properly.
- the note block is broken.
</issue>
<code>
[start of documentation/source/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # cocotb documentation build configuration file, created by
4 # sphinx-quickstart on Wed Jun 19 14:44:09 2013.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys, os
15 import datetime
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.insert(0, os.path.abspath('../..'))
21
22 # Add in-tree extensions to path
23 sys.path.insert(0, os.path.abspath('../sphinxext'))
24
25 os.environ["SPHINX_BUILD"] = "1"
26
27 # -- General configuration -----------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #needs_sphinx = '1.0'
31
32 # Add any Sphinx extension module names here, as strings. They can be extensions
33 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
34 extensions = [
35 'sphinx.ext.autodoc',
36 'sphinx.ext.doctest',
37 'sphinx.ext.todo',
38 'sphinx.ext.coverage',
39 'sphinx.ext.imgmath',
40 'sphinx.ext.viewcode',
41 'sphinx.ext.napoleon',
42 'sphinx.ext.intersphinx',
43 'cairosvgconverter',
44 'sphinxcontrib_trio',
45 ]
46
47 intersphinx_mapping = {'https://docs.python.org/3': None}
48
49 # Add any paths that contain templates here, relative to this directory.
50 templates_path = ['_templates']
51
52 # The suffix of source filenames.
53 source_suffix = '.rst'
54
55 # The encoding of source files.
56 #source_encoding = 'utf-8-sig'
57
58 # The master toctree document.
59 master_doc = 'index'
60
61 # General information about the project.
62 project = u'cocotb'
63 copyright = u'2014-{0}, PotentialVentures'.format(datetime.datetime.now().year)
64
65 # The version info for the project you're documenting, acts as replacement for
66 # |version| and |release|, also used in various other places throughout the
67 # built documents.
68 #
69 # The short X.Y version.
70 version = '1.1'
71 # The full version, including alpha/beta/rc tags.
72 release = '1.1'
73
74 # The language for content autogenerated by Sphinx. Refer to documentation
75 # for a list of supported languages.
76 #language = None
77
78 # There are two options for replacing |today|: either, you set today to some
79 # non-false value, then it is used:
80 #today = ''
81 # Else, today_fmt is used as the format for a strftime call.
82 #today_fmt = '%B %d, %Y'
83
84 # List of patterns, relative to source directory, that match files and
85 # directories to ignore when looking for source files.
86 exclude_patterns = []
87
88 # The reST default role (used for this markup: `text`) to use for all documents.
89 #default_role = None
90
91 # If true, '()' will be appended to :func: etc. cross-reference text.
92 #add_function_parentheses = True
93
94 # If true, the current module name will be prepended to all description
95 # unit titles (such as .. function::).
96 #add_module_names = True
97
98 # If true, sectionauthor and moduleauthor directives will be shown in the
99 # output. They are ignored by default.
100 #show_authors = False
101
102 # The name of the Pygments (syntax highlighting) style to use.
103 pygments_style = 'sphinx'
104
105 # A list of ignored prefixes for module index sorting.
106 #modindex_common_prefix = []
107
108 # If true, keep warnings as "system message" paragraphs in the built documents.
109 #keep_warnings = False
110
111
112 # -- Options for HTML output ---------------------------------------------------
113
114 # The theme to use for HTML and HTML Help pages. See the documentation for
115 # a list of builtin themes.
116 html_theme = 'default'
117
118 # Theme options are theme-specific and customize the look and feel of a theme
119 # further. For a list of options available for each theme, see the
120 # documentation.
121 #html_theme_options = {}
122
123 # Add any paths that contain custom themes here, relative to this directory.
124 #html_theme_path = []
125
126 # The name for this set of Sphinx documents. If None, it defaults to
127 # "<project> v<release> documentation".
128 #html_title = None
129
130 # A shorter title for the navigation bar. Default is the same as html_title.
131 #html_short_title = None
132
133 # The name of an image file (relative to this directory) to place at the top
134 # of the sidebar.
135 #html_logo = None
136
137 # The name of an image file (within the static path) to use as favicon of the
138 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
139 # pixels large.
140 #html_favicon = None
141
142 # Add any paths that contain custom static files (such as style sheets) here,
143 # relative to this directory. They are copied after the builtin static files,
144 # so a file named "default.css" will overwrite the builtin "default.css".
145 #html_static_path = ['_static']
146
147 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
148 # using the given strftime format.
149 #html_last_updated_fmt = '%b %d, %Y'
150
151 # If true, SmartyPants will be used to convert quotes and dashes to
152 # typographically correct entities.
153 #html_use_smartypants = True
154
155 # Custom sidebar templates, maps document names to template names.
156 #html_sidebars = {}
157
158 # Additional templates that should be rendered to pages, maps page names to
159 # template names.
160 #html_additional_pages = {}
161
162 # If false, no module index is generated.
163 #html_domain_indices = True
164
165 # If false, no index is generated.
166 #html_use_index = True
167
168 # If true, the index is split into individual pages for each letter.
169 #html_split_index = False
170
171 # If true, links to the reST sources are added to the pages.
172 #html_show_sourcelink = True
173
174 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
175 #html_show_sphinx = True
176
177 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
178 #html_show_copyright = True
179
180 # If true, an OpenSearch description file will be output, and all pages will
181 # contain a <link> tag referring to it. The value of this option must be the
182 # base URL from which the finished HTML is served.
183 #html_use_opensearch = ''
184
185 # This is the file name suffix for HTML files (e.g. ".xhtml").
186 #html_file_suffix = None
187
188 # Output file base name for HTML help builder.
189 htmlhelp_basename = 'cocotbdoc'
190
191
192 # -- Options for LaTeX output --------------------------------------------------
193
194 latex_elements = {
195 # The paper size ('letterpaper' or 'a4paper').
196 #'papersize': 'letterpaper',
197
198 # The font size ('10pt', '11pt' or '12pt').
199 #'pointsize': '10pt',
200
201 # Additional stuff for the LaTeX preamble.
202 #'preamble': '',
203 }
204
205 # Grouping the document tree into LaTeX files. List of tuples
206 # (source start file, target name, title, author, documentclass [howto/manual]).
207 latex_documents = [
208 ('index', 'cocotb.tex', u'cocotb Documentation',
209 u'PotentialVentures', 'manual'),
210 ]
211
212 # The name of an image file (relative to this directory) to place at the top of
213 # the title page.
214 #latex_logo = None
215
216 # For "manual" documents, if this is true, then toplevel headings are parts,
217 # not chapters.
218 #latex_use_parts = False
219
220 # If true, show page references after internal links.
221 #latex_show_pagerefs = False
222
223 # If true, show URL addresses after external links.
224 #latex_show_urls = False
225
226 # Documents to append as an appendix to all manuals.
227 #latex_appendices = []
228
229 # If false, no module index is generated.
230 #latex_domain_indices = True
231
232
233 # -- Options for manual page output --------------------------------------------
234
235 # One entry per manual page. List of tuples
236 # (source start file, name, description, authors, manual section).
237 man_pages = [
238 ('index', 'cocotb', u'cocotb Documentation',
239 [u'PotentialVentures'], 1)
240 ]
241
242 # If true, show URL addresses after external links.
243 #man_show_urls = False
244
245
246 # -- Options for Texinfo output ------------------------------------------------
247
248 # Grouping the document tree into Texinfo files. List of tuples
249 # (source start file, target name, title, author,
250 # dir menu entry, description, category)
251 texinfo_documents = [
252 ('index', 'cocotb', u'cocotb Documentation',
253 u'PotentialVentures', 'cocotb', 'Coroutine Cosimulation TestBench \
254 environment for efficient verification of RTL using Python.',
255 'Miscellaneous'),
256 ]
257
258 # Documents to append as an appendix to all manuals.
259 #texinfo_appendices = []
260
261 # If false, no module index is generated.
262 #texinfo_domain_indices = True
263
264 # How to display URL addresses: 'footnote', 'no', or 'inline'.
265 #texinfo_show_urls = 'footnote'
266
267 # If true, do not generate a @detailmenu in the "Top" node's menu.
268 #texinfo_no_detailmenu = False
269
270 # For now show the todoy
271 todo_include_todos = True
272
[end of documentation/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/documentation/source/conf.py b/documentation/source/conf.py
--- a/documentation/source/conf.py
+++ b/documentation/source/conf.py
@@ -113,7 +113,24 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'default'
+
+# The Read the Docs theme is available from
+# https://github.com/snide/sphinx_rtd_theme
+#
+# Install with
+# - pip install sphinx_rtd_theme
+# or
+# - apt-get install python-sphinx-rtd-theme
+
+try:
+ import sphinx_rtd_theme
+ html_theme = 'sphinx_rtd_theme'
+except ImportError:
+ sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was '+
+ 'not found. Make sure you have the theme installed to produce pretty '+
+ 'HTML output. Falling back to the default theme.\n')
+
+ html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
| {"golden_diff": "diff --git a/documentation/source/conf.py b/documentation/source/conf.py\n--- a/documentation/source/conf.py\n+++ b/documentation/source/conf.py\n@@ -113,7 +113,24 @@\n \n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n-html_theme = 'default'\n+\n+# The Read the Docs theme is available from\n+# https://github.com/snide/sphinx_rtd_theme\n+#\n+# Install with\n+# - pip install sphinx_rtd_theme\n+# or\n+# - apt-get install python-sphinx-rtd-theme\n+\n+try:\n+ import sphinx_rtd_theme\n+ html_theme = 'sphinx_rtd_theme'\n+except ImportError:\n+ sys.stderr.write('Warning: The Sphinx \\'sphinx_rtd_theme\\' HTML theme was '+\n+ 'not found. Make sure you have the theme installed to produce pretty '+\n+ 'HTML output. Falling back to the default theme.\\n')\n+\n+ html_theme = 'default'\n \n # Theme options are theme-specific and customize the look and feel of a theme\n # further. For a list of options available for each theme, see the\n", "issue": "Fix docs for async/await in coroutines doc\nThe new documentation for async has a couple issues:\r\n\r\n- a non-inline link in rst isn't displayed: https://cocotb.readthedocs.io/en/latest/coroutines.html (at the bottom of the page, \"More details on this type of generator can be found in `PEP 525`_.\")\r\n- `python3` should be used instead of `python` in the code examples to make sure that keywords like `await` get highlighted properly.\r\n- the note block is broken.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# cocotb documentation build configuration file, created by\n# sphinx-quickstart on Wed Jun 19 14:44:09 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# Add in-tree extensions to path\nsys.path.insert(0, os.path.abspath('../sphinxext'))\n\nos.environ[\"SPHINX_BUILD\"] = \"1\"\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc', \n 'sphinx.ext.doctest', \n 'sphinx.ext.todo', \n 'sphinx.ext.coverage', \n 'sphinx.ext.imgmath', \n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'cairosvgconverter',\n 'sphinxcontrib_trio',\n ]\n\nintersphinx_mapping = {'https://docs.python.org/3': None}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'cocotb'\ncopyright = u'2014-{0}, PotentialVentures'.format(datetime.datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'cocotbdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'cocotb.tex', u'cocotb Documentation',\n u'PotentialVentures', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'cocotb', u'cocotb Documentation',\n [u'PotentialVentures'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'cocotb', u'cocotb Documentation',\n u'PotentialVentures', 'cocotb', 'Coroutine Cosimulation TestBench \\\n environment for efficient verification of RTL using Python.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# For now show the todoy \ntodo_include_todos = True\n", "path": "documentation/source/conf.py"}]} | 3,521 | 257 |
gh_patches_debug_22798 | rasdani/github-patches | git_diff | svthalia__concrexit-3184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow people to delete albums without pushnotifications permissions
### Describe the bug
The paparazcie cannot delete albums because they don't (didn't, I temporarily gave them permissions) have delete permissions on message and scheduledmessage and facedetectionphoto.
### How to reproduce
Steps to reproduce the behaviour:
1. Have delete_album and delete_photo permission but no other delete permissions.
2. Try to delete an album
3. Get error screen telling you you need some more permissions.
### Expected behaviour
Cascade deletes are allowed when deleting an album or photo regardless of permissions on the related items.
### Additional context
The delete permissions are needed only for related models that have a ModelAdmin registered in the admin site. Models without an admin are ignored already by default.
Here the missing permissions are gathered: https://github.com/django/django/blob/7cc138a58f73c17f07cfaf459ef8e7677ac41ac0/django/contrib/admin/utils.py#LL147C8-L149C52.
We can probably drop them in `ModelAdmin.get_deleted_objects`.
With splitting up some models (e.g. BlacklistedThabloidUser, etc.) there may be more admins that need something like this.
</issue>
<code>
[start of website/photos/admin.py]
1 from django.contrib import admin, messages
2 from django.db.models import Count
3 from django.dispatch import Signal
4 from django.utils.translation import gettext_lazy as _
5
6 from django_filepond_widget.fields import FilePondFile
7
8 from .forms import AlbumForm
9 from .models import Album, Like, Photo
10 from .services import extract_archive, save_photo
11
12 album_uploaded = Signal()
13
14
15 @admin.register(Album)
16 class AlbumAdmin(admin.ModelAdmin):
17 """Model for Album admin page."""
18
19 list_display = ("title", "date", "num_photos", "hidden", "shareable")
20 fields = (
21 "title",
22 "slug",
23 "date",
24 "event",
25 "hidden",
26 "shareable",
27 "album_archive",
28 "_cover",
29 )
30 search_fields = ("title", "date")
31 list_filter = ("hidden", "shareable")
32 date_hierarchy = "date"
33 prepopulated_fields = {
34 "slug": (
35 "date",
36 "title",
37 )
38 }
39 form = AlbumForm
40
41 def get_queryset(self, request):
42 """Get Albums and add the amount of photos as an annotation."""
43 return Album.objects.annotate(photos_count=Count("photo"))
44
45 def num_photos(self, obj):
46 """Pretty-print the number of photos."""
47 return obj.photos_count
48
49 num_photos.short_description = _("Number of photos")
50 num_photos.admin_order_field = "photos_count"
51
52 def save_model(self, request, obj, form, change):
53 """Save the new Album by extracting the archive."""
54 super().save_model(request, obj, form, change)
55
56 archive = form.cleaned_data.get("album_archive", None)
57 if archive is not None:
58 try:
59 extract_archive(request, obj, archive)
60 album_uploaded.send(sender=None, album=obj)
61 except Exception as e:
62 raise e
63 finally:
64 if isinstance(archive, FilePondFile):
65 archive.remove()
66
67 messages.add_message(
68 request,
69 messages.WARNING,
70 _("Full-sized photos will not be saved on the Thalia-website."),
71 )
72
73
74 class LikeInline(admin.StackedInline):
75 model = Like
76 extra = 0
77
78
79 @admin.register(Photo)
80 class PhotoAdmin(admin.ModelAdmin):
81 """Model for Photo admin page."""
82
83 list_display = (
84 "__str__",
85 "album",
86 "hidden",
87 "num_likes",
88 )
89 search_fields = ("file",)
90 list_filter = ("album", "hidden")
91 exclude = ("_digest",)
92
93 inlines = [
94 LikeInline,
95 ]
96
97 def save_model(self, request, obj, form, change):
98 """Save new Photo."""
99 super().save_model(request, obj, form, change)
100 if change and obj.original_file == obj.file.name:
101 return
102
103 if save_photo(obj, obj.file, obj.file.name):
104 messages.add_message(
105 request,
106 messages.WARNING,
107 _("Full-sized photos will not be saved on the Thalia-website."),
108 )
109 else:
110 messages.add_message(
111 request, messages.ERROR, _("This photo already exists in the album.")
112 )
113
[end of website/photos/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/photos/admin.py b/website/photos/admin.py
--- a/website/photos/admin.py
+++ b/website/photos/admin.py
@@ -70,6 +70,18 @@
_("Full-sized photos will not be saved on the Thalia-website."),
)
+ def get_deleted_objects(self, objs, request):
+ (
+ deleted_objects,
+ model_count,
+ perms_needed,
+ protected,
+ ) = super().get_deleted_objects(objs, request)
+
+ # Drop any missing delete permissions. If the user has `delete_album` permission,
+ # they should automatically be allowed to cascade e.g. related pushnotifications.
+ return deleted_objects, model_count, set(), protected
+
class LikeInline(admin.StackedInline):
model = Like
@@ -94,6 +106,16 @@
LikeInline,
]
+ def get_deleted_objects(self, objs, request):
+ (
+ deleted_objects,
+ model_count,
+ perms_needed,
+ protected,
+ ) = super().get_deleted_objects(objs, request)
+
+ return deleted_objects, model_count, set(), protected
+
def save_model(self, request, obj, form, change):
"""Save new Photo."""
super().save_model(request, obj, form, change)
| {"golden_diff": "diff --git a/website/photos/admin.py b/website/photos/admin.py\n--- a/website/photos/admin.py\n+++ b/website/photos/admin.py\n@@ -70,6 +70,18 @@\n _(\"Full-sized photos will not be saved on the Thalia-website.\"),\n )\n \n+ def get_deleted_objects(self, objs, request):\n+ (\n+ deleted_objects,\n+ model_count,\n+ perms_needed,\n+ protected,\n+ ) = super().get_deleted_objects(objs, request)\n+\n+ # Drop any missing delete permissions. If the user has `delete_album` permission,\n+ # they should automatically be allowed to cascade e.g. related pushnotifications.\n+ return deleted_objects, model_count, set(), protected\n+\n \n class LikeInline(admin.StackedInline):\n model = Like\n@@ -94,6 +106,16 @@\n LikeInline,\n ]\n \n+ def get_deleted_objects(self, objs, request):\n+ (\n+ deleted_objects,\n+ model_count,\n+ perms_needed,\n+ protected,\n+ ) = super().get_deleted_objects(objs, request)\n+\n+ return deleted_objects, model_count, set(), protected\n+\n def save_model(self, request, obj, form, change):\n \"\"\"Save new Photo.\"\"\"\n super().save_model(request, obj, form, change)\n", "issue": "Allow people to delete albums without pushnotifications permissions\n### Describe the bug\r\nThe paparazcie cannot delete albums because they don't (didn't, I temporarily gave them permissions) have delete permissions on message and scheduledmessage and facedetectionphoto.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Have delete_album and delete_photo permission but no other delete permissions.\r\n2. Try to delete an album\r\n3. Get error screen telling you you need some more permissions.\r\n\r\n### Expected behaviour\r\nCascade deletes are allowed when deleting an album or photo regardless of permissions on the related items.\r\n\r\n### Additional context\r\nThe delete permissions are needed only for related models that have a ModelAdmin registered in the admin site. Models without an admin are ignored already by default.\r\n\r\nHere the missing permissions are gathered: https://github.com/django/django/blob/7cc138a58f73c17f07cfaf459ef8e7677ac41ac0/django/contrib/admin/utils.py#LL147C8-L149C52. \r\n\r\nWe can probably drop them in `ModelAdmin.get_deleted_objects`.\r\n\r\nWith splitting up some models (e.g. BlacklistedThabloidUser, etc.) there may be more admins that need something like this.\n", "before_files": [{"content": "from django.contrib import admin, messages\nfrom django.db.models import Count\nfrom django.dispatch import Signal\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_filepond_widget.fields import FilePondFile\n\nfrom .forms import AlbumForm\nfrom .models import Album, Like, Photo\nfrom .services import extract_archive, save_photo\n\nalbum_uploaded = Signal()\n\n\[email protected](Album)\nclass AlbumAdmin(admin.ModelAdmin):\n \"\"\"Model for Album admin page.\"\"\"\n\n list_display = (\"title\", \"date\", \"num_photos\", \"hidden\", \"shareable\")\n fields = (\n \"title\",\n \"slug\",\n \"date\",\n \"event\",\n \"hidden\",\n \"shareable\",\n \"album_archive\",\n \"_cover\",\n )\n search_fields = (\"title\", \"date\")\n list_filter = (\"hidden\", \"shareable\")\n date_hierarchy = \"date\"\n prepopulated_fields = {\n \"slug\": (\n \"date\",\n \"title\",\n )\n }\n form = AlbumForm\n\n def get_queryset(self, request):\n \"\"\"Get Albums and add the amount of photos as an annotation.\"\"\"\n return Album.objects.annotate(photos_count=Count(\"photo\"))\n\n def num_photos(self, obj):\n \"\"\"Pretty-print the number of photos.\"\"\"\n return obj.photos_count\n\n num_photos.short_description = _(\"Number of photos\")\n num_photos.admin_order_field = \"photos_count\"\n\n def save_model(self, request, obj, form, change):\n \"\"\"Save the new Album by extracting the archive.\"\"\"\n super().save_model(request, obj, form, change)\n\n archive = form.cleaned_data.get(\"album_archive\", None)\n if archive is not None:\n try:\n extract_archive(request, obj, archive)\n album_uploaded.send(sender=None, album=obj)\n except Exception as e:\n raise e\n finally:\n if isinstance(archive, FilePondFile):\n archive.remove()\n\n messages.add_message(\n request,\n messages.WARNING,\n _(\"Full-sized photos will not be saved on the Thalia-website.\"),\n )\n\n\nclass LikeInline(admin.StackedInline):\n model = Like\n extra = 0\n\n\[email protected](Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n \"\"\"Model for Photo admin page.\"\"\"\n\n list_display = (\n \"__str__\",\n \"album\",\n \"hidden\",\n \"num_likes\",\n )\n search_fields = (\"file\",)\n list_filter = (\"album\", \"hidden\")\n exclude = (\"_digest\",)\n\n inlines = [\n LikeInline,\n ]\n\n def save_model(self, request, obj, form, change):\n \"\"\"Save new Photo.\"\"\"\n super().save_model(request, obj, form, change)\n if change and obj.original_file == obj.file.name:\n return\n\n if save_photo(obj, obj.file, obj.file.name):\n messages.add_message(\n request,\n messages.WARNING,\n _(\"Full-sized photos will not be saved on the Thalia-website.\"),\n )\n else:\n messages.add_message(\n request, messages.ERROR, _(\"This photo already exists in the album.\")\n )\n", "path": "website/photos/admin.py"}]} | 1,700 | 294 |
gh_patches_debug_12313 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mypy 0.940 x Python 3.10 `scalar`overload
```
$ mypy strawberry/scalars.py
strawberry/scalars.py:9: error: No overload variant of "scalar" matches argument types "NewType", "str", "str", "Callable[[Any], Any]", "Callable[[Any], Any]" [call-overload]
strawberry/scalars.py:9: note: Possible overload variants:
strawberry/scalars.py:9: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]
strawberry/scalars.py:9: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T
strawberry/scalars.py:23: error: No overload variant of "scalar" matches argument types "NewType", "str", "str", "Callable[[Any], str]", "Callable[[Any], bytes]" [call-overload]
strawberry/scalars.py:23: note: Possible overload variants:
strawberry/scalars.py:23: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]
strawberry/scalars.py:23: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T
strawberry/scalars.py:31: error: No overload variant of "scalar" matches argument types "NewType", "str", "str", "Callable[[Any], str]", "Callable[[Any], bytes]" [call-overload]
strawberry/scalars.py:31: note: Possible overload variants:
strawberry/scalars.py:31: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]
strawberry/scalars.py:31: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T
strawberry/scalars.py:41: error: No overload variant of "scalar" matches argument types "NewType", "str", "str", "Callable[[Any], str]", "Callable[[Any], bytes]" [call-overload]
strawberry/scalars.py:41: note: Possible overload variants:
strawberry/scalars.py:41: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]
strawberry/scalars.py:41: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T
Found 4 errors in 1 file (checked 1 source file)
```
In 3.10, `NewType` is a [class rather than a function](https://docs.python.org/3/library/typing.html#newtype), I guess that's what is different with <= 3.9.
</issue>
<code>
[start of strawberry/custom_scalar.py]
1 from dataclasses import dataclass
2 from typing import Any, Callable, Mapping, Optional, Type, TypeVar, Union, overload
3
4 from graphql import GraphQLScalarType
5
6 from strawberry.type import StrawberryType
7
8 from .utils.str_converters import to_camel_case
9
10
11 _T = TypeVar("_T", bound=type)
12
13
14 def identity(x):
15 return x
16
17
18 @dataclass
19 class ScalarDefinition(StrawberryType):
20 name: str
21 description: Optional[str]
22 specified_by_url: Optional[str]
23 serialize: Optional[Callable]
24 parse_value: Optional[Callable]
25 parse_literal: Optional[Callable]
26
27 # Optionally store the GraphQLScalarType instance so that we don't get
28 # duplicates
29 implementation: Optional[GraphQLScalarType] = None
30
31 def copy_with(
32 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
33 ) -> Union[StrawberryType, type]:
34 return super().copy_with(type_var_map)
35
36 @property
37 def is_generic(self) -> bool:
38 return False
39
40
41 class ScalarWrapper:
42 _scalar_definition: ScalarDefinition
43
44 def __init__(self, wrap):
45 self.wrap = wrap
46
47 def __call__(self, *args, **kwargs):
48 return self.wrap(*args, **kwargs)
49
50
51 def _process_scalar(
52 cls: Type[_T],
53 *,
54 name: Optional[str] = None,
55 description: Optional[str] = None,
56 specified_by_url: Optional[str] = None,
57 serialize: Optional[Callable] = None,
58 parse_value: Optional[Callable] = None,
59 parse_literal: Optional[Callable] = None,
60 ):
61 name = name or to_camel_case(cls.__name__)
62
63 wrapper = ScalarWrapper(cls)
64 wrapper._scalar_definition = ScalarDefinition(
65 name=name,
66 description=description,
67 specified_by_url=specified_by_url,
68 serialize=serialize,
69 parse_literal=parse_literal,
70 parse_value=parse_value,
71 )
72
73 return wrapper
74
75
76 @overload
77 def scalar(
78 *,
79 name: Optional[str] = None,
80 description: Optional[str] = None,
81 specified_by_url: Optional[str] = None,
82 serialize: Callable = identity,
83 parse_value: Optional[Callable] = None,
84 parse_literal: Optional[Callable] = None,
85 ) -> Callable[[_T], _T]:
86 ...
87
88
89 @overload
90 def scalar(
91 cls: _T,
92 *,
93 name: Optional[str] = None,
94 description: Optional[str] = None,
95 specified_by_url: Optional[str] = None,
96 serialize: Callable = identity,
97 parse_value: Optional[Callable] = None,
98 parse_literal: Optional[Callable] = None,
99 ) -> _T:
100 ...
101
102
103 # FIXME: We are tricking pyright into thinking that we are returning the given type
104 # here or else it won't let us use any custom scalar to annotate attributes in
105 # dataclasses/types. This should be properly solved when implementing StrawberryScalar
106 def scalar(
107 cls=None,
108 *,
109 name: Optional[str] = None,
110 description: Optional[str] = None,
111 specified_by_url: Optional[str] = None,
112 serialize: Callable = identity,
113 parse_value: Optional[Callable] = None,
114 parse_literal: Optional[Callable] = None,
115 ) -> Any:
116 """Annotates a class or type as a GraphQL custom scalar.
117
118 Example usages:
119
120 >>> strawberry.scalar(
121 >>> datetime.date,
122 >>> serialize=lambda value: value.isoformat(),
123 >>> parse_value=datetime.parse_date
124 >>> )
125
126 >>> Base64Encoded = strawberry.scalar(
127 >>> NewType("Base64Encoded", bytes),
128 >>> serialize=base64.b64encode,
129 >>> parse_value=base64.b64decode
130 >>> )
131
132 >>> @strawberry.scalar(
133 >>> serialize=lambda value: ",".join(value.items),
134 >>> parse_value=lambda value: CustomList(value.split(","))
135 >>> )
136 >>> class CustomList:
137 >>> def __init__(self, items):
138 >>> self.items = items
139
140 """
141
142 if parse_value is None:
143 parse_value = cls
144
145 def wrap(cls):
146 return _process_scalar(
147 cls,
148 name=name,
149 description=description,
150 specified_by_url=specified_by_url,
151 serialize=serialize,
152 parse_value=parse_value,
153 parse_literal=parse_literal,
154 )
155
156 if cls is None:
157 return wrap
158
159 return wrap(cls)
160
[end of strawberry/custom_scalar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/custom_scalar.py b/strawberry/custom_scalar.py
--- a/strawberry/custom_scalar.py
+++ b/strawberry/custom_scalar.py
@@ -1,5 +1,16 @@
+import sys
from dataclasses import dataclass
-from typing import Any, Callable, Mapping, Optional, Type, TypeVar, Union, overload
+from typing import (
+ Any,
+ Callable,
+ Mapping,
+ NewType,
+ Optional,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
from graphql import GraphQLScalarType
@@ -8,7 +19,11 @@
from .utils.str_converters import to_camel_case
-_T = TypeVar("_T", bound=type)
+# in python 3.10+ NewType is a class
+if sys.version_info >= (3, 10):
+ _T = TypeVar("_T", bound=Union[type, NewType])
+else:
+ _T = TypeVar("_T", bound=type)
def identity(x):
| {"golden_diff": "diff --git a/strawberry/custom_scalar.py b/strawberry/custom_scalar.py\n--- a/strawberry/custom_scalar.py\n+++ b/strawberry/custom_scalar.py\n@@ -1,5 +1,16 @@\n+import sys\n from dataclasses import dataclass\n-from typing import Any, Callable, Mapping, Optional, Type, TypeVar, Union, overload\n+from typing import (\n+ Any,\n+ Callable,\n+ Mapping,\n+ NewType,\n+ Optional,\n+ Type,\n+ TypeVar,\n+ Union,\n+ overload,\n+)\n \n from graphql import GraphQLScalarType\n \n@@ -8,7 +19,11 @@\n from .utils.str_converters import to_camel_case\n \n \n-_T = TypeVar(\"_T\", bound=type)\n+# in python 3.10+ NewType is a class\n+if sys.version_info >= (3, 10):\n+ _T = TypeVar(\"_T\", bound=Union[type, NewType])\n+else:\n+ _T = TypeVar(\"_T\", bound=type)\n \n \n def identity(x):\n", "issue": "Mypy 0.940 x Python 3.10 `scalar`overload\n```\r\n$ mypy strawberry/scalars.py\r\nstrawberry/scalars.py:9: error: No overload variant of \"scalar\" matches argument types \"NewType\", \"str\", \"str\", \"Callable[[Any], Any]\", \"Callable[[Any], Any]\" [call-overload]\r\nstrawberry/scalars.py:9: note: Possible overload variants:\r\nstrawberry/scalars.py:9: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]\r\nstrawberry/scalars.py:9: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T\r\nstrawberry/scalars.py:23: error: No overload variant of \"scalar\" matches argument types \"NewType\", \"str\", \"str\", \"Callable[[Any], str]\", \"Callable[[Any], bytes]\" [call-overload]\r\nstrawberry/scalars.py:23: note: Possible overload variants:\r\nstrawberry/scalars.py:23: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]\r\nstrawberry/scalars.py:23: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T\r\nstrawberry/scalars.py:31: error: No overload variant of \"scalar\" matches argument types \"NewType\", \"str\", \"str\", \"Callable[[Any], str]\", \"Callable[[Any], bytes]\" [call-overload]\r\nstrawberry/scalars.py:31: note: Possible overload variants:\r\nstrawberry/scalars.py:31: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]\r\nstrawberry/scalars.py:31: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T\r\nstrawberry/scalars.py:41: error: No overload variant of \"scalar\" matches argument types \"NewType\", \"str\", \"str\", \"Callable[[Any], str]\", \"Callable[[Any], bytes]\" [call-overload]\r\nstrawberry/scalars.py:41: note: Possible overload variants:\r\nstrawberry/scalars.py:41: note: def scalar(*, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> Callable[[_T], _T]\r\nstrawberry/scalars.py:41: note: def [_T <: type] scalar(cls: _T, *, name: Optional[str] = ..., description: Optional[str] = ..., specified_by_url: Optional[str] = ..., serialize: Callable[..., Any] = ..., parse_value: Optional[Callable[..., Any]] = ..., parse_literal: Optional[Callable[..., Any]] = ...) -> _T\r\nFound 4 errors in 1 file (checked 1 source file)\r\n```\r\n\r\n\r\nIn 3.10, `NewType` is a [class rather than a function](https://docs.python.org/3/library/typing.html#newtype), I guess that's what is different with <= 3.9.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import Any, Callable, Mapping, Optional, Type, TypeVar, Union, overload\n\nfrom graphql import GraphQLScalarType\n\nfrom strawberry.type import StrawberryType\n\nfrom .utils.str_converters import to_camel_case\n\n\n_T = TypeVar(\"_T\", bound=type)\n\n\ndef identity(x):\n return x\n\n\n@dataclass\nclass ScalarDefinition(StrawberryType):\n name: str\n description: Optional[str]\n specified_by_url: Optional[str]\n serialize: Optional[Callable]\n parse_value: Optional[Callable]\n parse_literal: Optional[Callable]\n\n # Optionally store the GraphQLScalarType instance so that we don't get\n # duplicates\n implementation: Optional[GraphQLScalarType] = None\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map)\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\nclass ScalarWrapper:\n _scalar_definition: ScalarDefinition\n\n def __init__(self, wrap):\n self.wrap = wrap\n\n def __call__(self, *args, **kwargs):\n return self.wrap(*args, **kwargs)\n\n\ndef _process_scalar(\n cls: Type[_T],\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Optional[Callable] = None,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n):\n name = name or to_camel_case(cls.__name__)\n\n wrapper = ScalarWrapper(cls)\n wrapper._scalar_definition = ScalarDefinition(\n name=name,\n description=description,\n specified_by_url=specified_by_url,\n serialize=serialize,\n parse_literal=parse_literal,\n parse_value=parse_value,\n )\n\n return wrapper\n\n\n@overload\ndef scalar(\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n) -> Callable[[_T], _T]:\n ...\n\n\n@overload\ndef scalar(\n cls: _T,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n) -> _T:\n ...\n\n\n# FIXME: We are tricking pyright into thinking that we are returning the given type\n# here or else it won't let us use any custom scalar to annotate attributes in\n# dataclasses/types. This should be properly solved when implementing StrawberryScalar\ndef scalar(\n cls=None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n specified_by_url: Optional[str] = None,\n serialize: Callable = identity,\n parse_value: Optional[Callable] = None,\n parse_literal: Optional[Callable] = None,\n) -> Any:\n \"\"\"Annotates a class or type as a GraphQL custom scalar.\n\n Example usages:\n\n >>> strawberry.scalar(\n >>> datetime.date,\n >>> serialize=lambda value: value.isoformat(),\n >>> parse_value=datetime.parse_date\n >>> )\n\n >>> Base64Encoded = strawberry.scalar(\n >>> NewType(\"Base64Encoded\", bytes),\n >>> serialize=base64.b64encode,\n >>> parse_value=base64.b64decode\n >>> )\n\n >>> @strawberry.scalar(\n >>> serialize=lambda value: \",\".join(value.items),\n >>> parse_value=lambda value: CustomList(value.split(\",\"))\n >>> )\n >>> class CustomList:\n >>> def __init__(self, items):\n >>> self.items = items\n\n \"\"\"\n\n if parse_value is None:\n parse_value = cls\n\n def wrap(cls):\n return _process_scalar(\n cls,\n name=name,\n description=description,\n specified_by_url=specified_by_url,\n serialize=serialize,\n parse_value=parse_value,\n parse_literal=parse_literal,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n", "path": "strawberry/custom_scalar.py"}]} | 2,910 | 240 |
gh_patches_debug_32981 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-662 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make Configuration object return boolean values
Many times, configuration values are just boolean values. The `Configuration` object now only returns the literal configuration value. It would be useful to have this object return `True` instead of `"True"` to keep code a bit shorter
We could have this:
```python
if Configuration().SOME_CONFIGURATION:
...
```
Instead of this:
```python
if Configuration().SOME_CONFIGURATION == "True":
...
```
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # FIXME find a better way to avoid all those "Expression has type "Any"" errors
16 # type: ignore
17
18 """
19 Simple configuration manager
20
21 This is a configuration manager for OpenTelemetry. It reads configuration
22 values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
23 characters are only alphanumeric characters and unserscores, except for the
24 first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
25
26 For example, these environment variables will be read:
27
28 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
29 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
30 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
31 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
32 5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
33
34 These won't:
35
36 1. ``OPENTELEMETRY_PYTH_SOMETHING``
37 2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
38 3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
39
40 The values stored in the environment variables can be found in an instance of
41 ``opentelemetry.configuration.Configuration``. This class can be instantiated
42 freely because instantiating it returns always the same object.
43
44 For example, if the environment variable
45 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
46 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
47
48 Non defined attributes will always return ``None``. This is intended to make it
49 easier to use the ``Configuration`` object in actual code, because it won't be
50 necessary to check for the attribute to be defined first.
51
52 Environment variables used by OpenTelemetry
53 -------------------------------------------
54
55 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
56 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
57
58 The value of these environment variables should be the name of the entry point
59 that points to the class that implements either provider. This OpenTelemetry
60 API package provides one entry point for each, which can be found in the
61 setup.py file::
62
63 entry_points={
64 ...
65 "opentelemetry_meter_provider": [
66 "default_meter_provider = "
67 "opentelemetry.metrics:DefaultMeterProvider"
68 ],
69 "opentelemetry_tracer_provider": [
70 "default_tracer_provider = "
71 "opentelemetry.trace:DefaultTracerProvider"
72 ],
73 }
74
75 To use the meter provider above, then the
76 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
77 ``"default_meter_provider"`` (this is not actually necessary since the
78 OpenTelemetry API provided providers are the default ones used if no
79 configuration is found in the environment variables).
80
81 This object can be used by any OpenTelemetry component, native or external.
82 For that reason, the ``Configuration`` object is designed to be immutable.
83 If a component would change the value of one of the ``Configuration`` object
84 attributes then another component that relied on that value may break, leading
85 to bugs that are very hard to debug. To avoid this situation, the preferred
86 approach for components that need a different value than the one provided by
87 the ``Configuration`` object is to implement a mechanism that allows the user
88 to override this value instead of changing it.
89 """
90
91 from os import environ
92 from re import fullmatch
93
94
95 class Configuration:
96 _instance = None
97
98 __slots__ = []
99
100 def __new__(cls) -> "Configuration":
101 if Configuration._instance is None:
102
103 for key, value in environ.items():
104
105 match = fullmatch(
106 r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
107 )
108
109 if match is not None:
110
111 key = match.group(1)
112
113 setattr(Configuration, "_{}".format(key), value)
114 setattr(
115 Configuration,
116 key,
117 property(
118 fget=lambda cls, key=key: getattr(
119 cls, "_{}".format(key)
120 )
121 ),
122 )
123
124 Configuration.__slots__.append(key)
125
126 Configuration.__slots__ = tuple(Configuration.__slots__)
127
128 Configuration._instance = object.__new__(cls)
129
130 return cls._instance
131
132 def __getattr__(self, name):
133 return None
134
135 @classmethod
136 def _reset(cls):
137 """
138 This method "resets" the global configuration attributes
139
140 It is not intended to be used by production code but by testing code
141 only.
142 """
143
144 for slot in cls.__slots__:
145 if slot in cls.__dict__.keys():
146 delattr(cls, slot)
147 delattr(cls, "_{}".format(slot))
148
149 cls.__slots__ = []
150 cls._instance = None
151
[end of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
[start of ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from logging import getLogger
16
17 from django.conf import settings
18
19 from opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor
20 from opentelemetry.configuration import Configuration
21 from opentelemetry.ext.django.middleware import _DjangoMiddleware
22
23 _logger = getLogger(__name__)
24
25
26 class DjangoInstrumentor(BaseInstrumentor):
27 """An instrumentor for Django
28
29 See `BaseInstrumentor`
30 """
31
32 _opentelemetry_middleware = ".".join(
33 [_DjangoMiddleware.__module__, _DjangoMiddleware.__qualname__]
34 )
35
36 def _instrument(self, **kwargs):
37
38 # FIXME this is probably a pattern that will show up in the rest of the
39 # ext. Find a better way of implementing this.
40 # FIXME Probably the evaluation of strings into boolean values can be
41 # built inside the Configuration class itself with the magic method
42 # __bool__
43
44 if Configuration().DJANGO_INSTRUMENT != "True":
45 return
46
47 # This can not be solved, but is an inherent problem of this approach:
48 # the order of middleware entries matters, and here you have no control
49 # on that:
50 # https://docs.djangoproject.com/en/3.0/topics/http/middleware/#activating-middleware
51 # https://docs.djangoproject.com/en/3.0/ref/middleware/#middleware-ordering
52
53 settings_middleware = getattr(settings, "MIDDLEWARE", [])
54 settings_middleware.append(self._opentelemetry_middleware)
55
56 setattr(settings, "MIDDLEWARE", settings_middleware)
57
58 def _uninstrument(self, **kwargs):
59 settings_middleware = getattr(settings, "MIDDLEWARE", None)
60
61 # FIXME This is starting to smell like trouble. We have 2 mechanisms
62 # that may make this condition be True, one implemented in
63 # BaseInstrumentor and another one implemented in _instrument. Both
64 # stop _instrument from running and thus, settings_middleware not being
65 # set.
66 if settings_middleware is None or (
67 self._opentelemetry_middleware not in settings_middleware
68 ):
69 return
70
71 settings_middleware.remove(self._opentelemetry_middleware)
72 setattr(settings, "MIDDLEWARE", settings_middleware)
73
[end of ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py b/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py
--- a/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py
+++ b/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py
@@ -41,7 +41,7 @@
# built inside the Configuration class itself with the magic method
# __bool__
- if Configuration().DJANGO_INSTRUMENT != "True":
+ if not Configuration().DJANGO_INSTRUMENT:
return
# This can not be solved, but is an inherent problem of this approach:
diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -78,6 +78,11 @@
OpenTelemetry API provided providers are the default ones used if no
configuration is found in the environment variables).
+Configuration values that are exactly ``"True"`` or ``"False"`` will be
+converted to its boolean values of ``True`` and ``False`` respectively.
+
+Configuration values that can be casted to integers or floats will be casted.
+
This object can be used by any OpenTelemetry component, native or external.
For that reason, the ``Configuration`` object is designed to be immutable.
If a component would change the value of one of the ``Configuration`` object
@@ -110,6 +115,20 @@
key = match.group(1)
+ if value == "True":
+ value = True
+ elif value == "False":
+ value = False
+ else:
+ try:
+ value = int(value)
+ except ValueError:
+ pass
+ try:
+ value = float(value)
+ except ValueError:
+ pass
+
setattr(Configuration, "_{}".format(key), value)
setattr(
Configuration,
| {"golden_diff": "diff --git a/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py b/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py\n--- a/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py\n+++ b/ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py\n@@ -41,7 +41,7 @@\n # built inside the Configuration class itself with the magic method\n # __bool__\n \n- if Configuration().DJANGO_INSTRUMENT != \"True\":\n+ if not Configuration().DJANGO_INSTRUMENT:\n return\n \n # This can not be solved, but is an inherent problem of this approach:\ndiff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -78,6 +78,11 @@\n OpenTelemetry API provided providers are the default ones used if no\n configuration is found in the environment variables).\n \n+Configuration values that are exactly ``\"True\"`` or ``\"False\"`` will be\n+converted to its boolean values of ``True`` and ``False`` respectively.\n+\n+Configuration values that can be casted to integers or floats will be casted.\n+\n This object can be used by any OpenTelemetry component, native or external.\n For that reason, the ``Configuration`` object is designed to be immutable.\n If a component would change the value of one of the ``Configuration`` object\n@@ -110,6 +115,20 @@\n \n key = match.group(1)\n \n+ if value == \"True\":\n+ value = True\n+ elif value == \"False\":\n+ value = False\n+ else:\n+ try:\n+ value = int(value)\n+ except ValueError:\n+ pass\n+ try:\n+ value = float(value)\n+ except ValueError:\n+ pass\n+\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n", "issue": "Make Configuration object return boolean values\nMany times, configuration values are just boolean values. The `Configuration` object now only returns the literal configuration value. It would be useful to have this object return `True` instead of `\"True\"` to keep code a bit shorter\r\nWe could have this:\r\n```python\r\nif Configuration().SOME_CONFIGURATION:\r\n ...\r\n```\r\nInstead of this:\r\n```python\r\nif Configuration().SOME_CONFIGURATION == \"True\":\r\n ...\r\n```\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n``\"default_meter_provider\"`` (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\nThis object can be used by any OpenTelemetry component, native or external.\nFor that reason, the ``Configuration`` object is designed to be immutable.\nIf a component would change the value of one of the ``Configuration`` object\nattributes then another component that relied on that value may break, leading\nto bugs that are very hard to debug. To avoid this situation, the preferred\napproach for components that need a different value than the one provided by\nthe ``Configuration`` object is to implement a mechanism that allows the user\nto override this value instead of changing it.\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n\n @classmethod\n def _reset(cls):\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n for slot in cls.__slots__:\n if slot in cls.__dict__.keys():\n delattr(cls, slot)\n delattr(cls, \"_{}\".format(slot))\n\n cls.__slots__ = []\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\n\nfrom django.conf import settings\n\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.configuration import Configuration\nfrom opentelemetry.ext.django.middleware import _DjangoMiddleware\n\n_logger = getLogger(__name__)\n\n\nclass DjangoInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for Django\n\n See `BaseInstrumentor`\n \"\"\"\n\n _opentelemetry_middleware = \".\".join(\n [_DjangoMiddleware.__module__, _DjangoMiddleware.__qualname__]\n )\n\n def _instrument(self, **kwargs):\n\n # FIXME this is probably a pattern that will show up in the rest of the\n # ext. Find a better way of implementing this.\n # FIXME Probably the evaluation of strings into boolean values can be\n # built inside the Configuration class itself with the magic method\n # __bool__\n\n if Configuration().DJANGO_INSTRUMENT != \"True\":\n return\n\n # This can not be solved, but is an inherent problem of this approach:\n # the order of middleware entries matters, and here you have no control\n # on that:\n # https://docs.djangoproject.com/en/3.0/topics/http/middleware/#activating-middleware\n # https://docs.djangoproject.com/en/3.0/ref/middleware/#middleware-ordering\n\n settings_middleware = getattr(settings, \"MIDDLEWARE\", [])\n settings_middleware.append(self._opentelemetry_middleware)\n\n setattr(settings, \"MIDDLEWARE\", settings_middleware)\n\n def _uninstrument(self, **kwargs):\n settings_middleware = getattr(settings, \"MIDDLEWARE\", None)\n\n # FIXME This is starting to smell like trouble. We have 2 mechanisms\n # that may make this condition be True, one implemented in\n # BaseInstrumentor and another one implemented in _instrument. Both\n # stop _instrument from running and thus, settings_middleware not being\n # set.\n if settings_middleware is None or (\n self._opentelemetry_middleware not in settings_middleware\n ):\n return\n\n settings_middleware.remove(self._opentelemetry_middleware)\n setattr(settings, \"MIDDLEWARE\", settings_middleware)\n", "path": "ext/opentelemetry-ext-django/src/opentelemetry/ext/django/__init__.py"}]} | 2,970 | 478 |
gh_patches_debug_26921 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-1494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PGI doesn't accept "-pthread" switch
I'm trying to compile OpenMPI 2.1.2 with PGI 18.4 and I stumbled upon a pgi error saying that it doen't accept -pthread switch
After few research I saw this thread: https://www.pgroup.com/userforum/viewtopic.php?t=5253&sid=93356f57a47eee097b569b79bc744676
basicaly PGI accept -lpthread instead
and one solution for PGI to accept -pthread would be to modify the siterc in $PGI_INSTALL/bin/siterc
</issue>
<code>
[start of easybuild/easyblocks/p/pgi.py]
1 ##
2 # Copyright 2015-2018 Bart Oldeman
3 # Copyright 2016-2018 Forschungszentrum Juelich
4 #
5 # This file is triple-licensed under GPLv2 (see below), MIT, and
6 # BSD three-clause licenses.
7 #
8 # This file is part of EasyBuild,
9 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
10 # with support of Ghent University (http://ugent.be/hpc),
11 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
12 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
13 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
14 #
15 # https://github.com/easybuilders/easybuild
16 #
17 # EasyBuild is free software: you can redistribute it and/or modify
18 # it under the terms of the GNU General Public License as published by
19 # the Free Software Foundation v2.
20 #
21 # EasyBuild is distributed in the hope that it will be useful,
22 # but WITHOUT ANY WARRANTY; without even the implied warranty of
23 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 # GNU General Public License for more details.
25 #
26 # You should have received a copy of the GNU General Public License
27 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
28 ##
29 """
30 EasyBuild support for installing PGI compilers, implemented as an easyblock
31
32 @author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)
33 @author: Damian Alvarez (Forschungszentrum Juelich)
34 """
35 import os
36 import fileinput
37 import re
38 import sys
39
40 import easybuild.tools.environment as env
41 from easybuild.easyblocks.generic.packedbinary import PackedBinary
42 from easybuild.framework.easyconfig import CUSTOM
43 from easybuild.framework.easyconfig.types import ensure_iterable_license_specs
44 from easybuild.tools.filetools import find_flexlm_license, write_file
45 from easybuild.tools.run import run_cmd
46 from easybuild.tools.modules import get_software_root
47
48
49 # contents for siterc file to make PGI pick up $LIBRARY_PATH
50 # cfr. https://www.pgroup.com/support/link.htm#lib_path_ldflags
51 SITERC_LIBRARY_PATH = """
52 # get the value of the environment variable LIBRARY_PATH
53 variable LIBRARY_PATH is environment(LIBRARY_PATH);
54
55 # split this value at colons, separate by -L, prepend 1st one by -L
56 variable library_path is
57 default($if($LIBRARY_PATH,-L$replace($LIBRARY_PATH,":", -L)));
58
59 # add the -L arguments to the link line
60 append LDLIBARGS=$library_path;
61
62 # also include the location where libm & co live on Debian-based systems
63 # cfr. https://github.com/easybuilders/easybuild-easyblocks/pull/919
64 append LDLIBARGS=-L/usr/lib/x86_64-linux-gnu;
65 """
66
67
68 class EB_PGI(PackedBinary):
69 """
70 Support for installing the PGI compilers
71 """
72
73 @staticmethod
74 def extra_options():
75 extra_vars = {
76 'install_amd': [True, "Install AMD software components", CUSTOM],
77 'install_java': [True, "Install Java JRE for graphical debugger", CUSTOM],
78 'install_managed': [True, "Install OpenACC Unified Memory Evaluation package", CUSTOM],
79 'install_nvidia': [True, "Install CUDA Toolkit Components", CUSTOM],
80 }
81 return PackedBinary.extra_options(extra_vars)
82
83 def __init__(self, *args, **kwargs):
84 """Easyblock constructor, define custom class variables specific to PGI."""
85 super(EB_PGI, self).__init__(*args, **kwargs)
86
87 self.license_file = 'UNKNOWN'
88 self.license_env_var = 'UNKNOWN' # Probably not really necessary for PGI
89
90 self.pgi_install_subdir = os.path.join('linux86-64', self.version)
91
92 def configure_step(self):
93 """
94 Handle license file.
95 """
96 default_lic_env_var = 'PGROUPD_LICENSE_FILE'
97 license_specs = ensure_iterable_license_specs(self.cfg['license_file'])
98 lic_specs, self.license_env_var = find_flexlm_license(custom_env_vars=[default_lic_env_var],
99 lic_specs=license_specs)
100
101 if lic_specs:
102 if self.license_env_var is None:
103 self.log.info("Using PGI license specifications from 'license_file': %s", lic_specs)
104 self.license_env_var = default_lic_env_var
105 else:
106 self.log.info("Using PGI license specifications from %s: %s", self.license_env_var, lic_specs)
107
108 self.license_file = os.pathsep.join(lic_specs)
109 env.setvar(self.license_env_var, self.license_file)
110
111 else:
112 self.log.info("No viable license specifications found, assuming PGI Community Edition...")
113
114 def install_step(self):
115 """Install by running install command."""
116
117 pgi_env_vars = {
118 'PGI_ACCEPT_EULA': 'accept',
119 'PGI_INSTALL_AMD': str(self.cfg['install_amd']).lower(),
120 'PGI_INSTALL_DIR': self.installdir,
121 'PGI_INSTALL_JAVA': str(self.cfg['install_java']).lower(),
122 'PGI_INSTALL_MANAGED': str(self.cfg['install_managed']).lower(),
123 'PGI_INSTALL_NVIDIA': str(self.cfg['install_nvidia']).lower(),
124 'PGI_SILENT': 'true',
125 }
126 cmd = "%s ./install" % ' '.join(['%s=%s' % x for x in sorted(pgi_env_vars.items())])
127 run_cmd(cmd, log_all=True, simple=True)
128
129 # make sure localrc uses GCC in PATH, not always the system GCC, and does not use a system g77 but gfortran
130 install_abs_subdir = os.path.join(self.installdir, self.pgi_install_subdir)
131 filename = os.path.join(install_abs_subdir, "bin", "makelocalrc")
132 for line in fileinput.input(filename, inplace='1', backup='.orig'):
133 line = re.sub(r"^PATH=/", r"#PATH=/", line)
134 sys.stdout.write(line)
135
136 cmd = "%s -x %s -g77 /" % (filename, install_abs_subdir)
137 run_cmd(cmd, log_all=True, simple=True)
138
139 # If an OS libnuma is NOT found, makelocalrc creates symbolic links to libpgnuma.so
140 # If we use the EB libnuma, delete those symbolic links to ensure they are not used
141 if get_software_root("numactl"):
142 for filename in ["libnuma.so", "libnuma.so.1"]:
143 path = os.path.join(install_abs_subdir, "lib", filename)
144 if os.path.islink(path):
145 os.remove(path)
146
147 # install (or update) siterc file to make PGI consider $LIBRARY_PATH
148 siterc_path = os.path.join(self.installdir, self.pgi_install_subdir, 'bin', 'siterc')
149 write_file(siterc_path, SITERC_LIBRARY_PATH, append=True)
150 self.log.info("Appended instructions to pick up $LIBRARY_PATH to siterc file at %s: %s",
151 siterc_path, SITERC_LIBRARY_PATH)
152
153 def sanity_check_step(self):
154 """Custom sanity check for PGI"""
155 prefix = self.pgi_install_subdir
156 custom_paths = {
157 'files': [os.path.join(prefix, 'bin', x) for x in ['pgcc', 'pgc++', 'pgf77', 'pgfortran', 'siterc']],
158 'dirs': [os.path.join(prefix, 'bin'), os.path.join(prefix, 'lib'),
159 os.path.join(prefix, 'include'), os.path.join(prefix, 'man')]
160 }
161 super(EB_PGI, self).sanity_check_step(custom_paths=custom_paths)
162
163 def make_module_req_guess(self):
164 """Prefix subdirectories in PGI install dir considered for environment variables defined in module file."""
165 dirs = super(EB_PGI, self).make_module_req_guess()
166 for key in dirs:
167 dirs[key] = [os.path.join(self.pgi_install_subdir, d) for d in dirs[key]]
168
169 # $CPATH should not be defined in module for PGI, it causes problems
170 # cfr. https://github.com/easybuilders/easybuild-easyblocks/issues/830
171 if 'CPATH' in dirs:
172 self.log.info("Removing $CPATH entry: %s", dirs['CPATH'])
173 del dirs['CPATH']
174
175 return dirs
176
177 def make_module_extra(self):
178 """Add environment variables LM_LICENSE_FILE and PGI for license file and PGI location"""
179 txt = super(EB_PGI, self).make_module_extra()
180 if self.license_env_var:
181 txt += self.module_generator.prepend_paths(self.license_env_var, [self.license_file],
182 allow_abs=True, expand_relpaths=False)
183 txt += self.module_generator.set_environment('PGI', self.installdir)
184 return txt
185
[end of easybuild/easyblocks/p/pgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/easyblocks/p/pgi.py b/easybuild/easyblocks/p/pgi.py
--- a/easybuild/easyblocks/p/pgi.py
+++ b/easybuild/easyblocks/p/pgi.py
@@ -64,6 +64,12 @@
append LDLIBARGS=-L/usr/lib/x86_64-linux-gnu;
"""
+# contents for siterc file to make PGI accept the -pthread switch
+SITERC_PTHREAD_SWITCH = """
+# replace unknown switch -pthread with -lpthread
+switch -pthread is replace(-lpthread) positional(linker);
+"""
+
class EB_PGI(PackedBinary):
"""
@@ -144,11 +150,14 @@
if os.path.islink(path):
os.remove(path)
- # install (or update) siterc file to make PGI consider $LIBRARY_PATH
+ # install (or update) siterc file to make PGI consider $LIBRARY_PATH and accept -pthread
siterc_path = os.path.join(self.installdir, self.pgi_install_subdir, 'bin', 'siterc')
write_file(siterc_path, SITERC_LIBRARY_PATH, append=True)
self.log.info("Appended instructions to pick up $LIBRARY_PATH to siterc file at %s: %s",
siterc_path, SITERC_LIBRARY_PATH)
+ write_file(siterc_path, SITERC_PTHREAD_SWITCH, append=True)
+ self.log.info("Append instructions to replace -pthread with -lpthread to siterc file at %s: %s",
+ siterc_path, SITERC_PTHREAD_SWITCH)
def sanity_check_step(self):
"""Custom sanity check for PGI"""
| {"golden_diff": "diff --git a/easybuild/easyblocks/p/pgi.py b/easybuild/easyblocks/p/pgi.py\n--- a/easybuild/easyblocks/p/pgi.py\n+++ b/easybuild/easyblocks/p/pgi.py\n@@ -64,6 +64,12 @@\n append LDLIBARGS=-L/usr/lib/x86_64-linux-gnu;\n \"\"\"\n \n+# contents for siterc file to make PGI accept the -pthread switch\n+SITERC_PTHREAD_SWITCH = \"\"\"\n+# replace unknown switch -pthread with -lpthread\n+switch -pthread is replace(-lpthread) positional(linker);\n+\"\"\"\n+\n \n class EB_PGI(PackedBinary):\n \"\"\"\n@@ -144,11 +150,14 @@\n if os.path.islink(path):\n os.remove(path)\n \n- # install (or update) siterc file to make PGI consider $LIBRARY_PATH\n+ # install (or update) siterc file to make PGI consider $LIBRARY_PATH and accept -pthread\n siterc_path = os.path.join(self.installdir, self.pgi_install_subdir, 'bin', 'siterc')\n write_file(siterc_path, SITERC_LIBRARY_PATH, append=True)\n self.log.info(\"Appended instructions to pick up $LIBRARY_PATH to siterc file at %s: %s\",\n siterc_path, SITERC_LIBRARY_PATH)\n+ write_file(siterc_path, SITERC_PTHREAD_SWITCH, append=True)\n+ self.log.info(\"Append instructions to replace -pthread with -lpthread to siterc file at %s: %s\",\n+ siterc_path, SITERC_PTHREAD_SWITCH)\n \n def sanity_check_step(self):\n \"\"\"Custom sanity check for PGI\"\"\"\n", "issue": "PGI doesn't accept \"-pthread\" switch\nI'm trying to compile OpenMPI 2.1.2 with PGI 18.4 and I stumbled upon a pgi error saying that it doen't accept -pthread switch\r\n\r\nAfter few research I saw this thread: https://www.pgroup.com/userforum/viewtopic.php?t=5253&sid=93356f57a47eee097b569b79bc744676\r\nbasicaly PGI accept -lpthread instead\r\nand one solution for PGI to accept -pthread would be to modify the siterc in $PGI_INSTALL/bin/siterc\n", "before_files": [{"content": "##\n# Copyright 2015-2018 Bart Oldeman\n# Copyright 2016-2018 Forschungszentrum Juelich\n#\n# This file is triple-licensed under GPLv2 (see below), MIT, and\n# BSD three-clause licenses.\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing PGI compilers, implemented as an easyblock\n\n@author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)\n@author: Damian Alvarez (Forschungszentrum Juelich)\n\"\"\"\nimport os\nimport fileinput\nimport re\nimport sys\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.packedbinary import PackedBinary\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.easyconfig.types import ensure_iterable_license_specs\nfrom easybuild.tools.filetools import find_flexlm_license, write_file\nfrom easybuild.tools.run import run_cmd\nfrom easybuild.tools.modules import get_software_root\n\n\n# contents for siterc file to make PGI pick up $LIBRARY_PATH\n# cfr. https://www.pgroup.com/support/link.htm#lib_path_ldflags\nSITERC_LIBRARY_PATH = \"\"\"\n# get the value of the environment variable LIBRARY_PATH\nvariable LIBRARY_PATH is environment(LIBRARY_PATH);\n\n# split this value at colons, separate by -L, prepend 1st one by -L\nvariable library_path is\ndefault($if($LIBRARY_PATH,-L$replace($LIBRARY_PATH,\":\", -L)));\n\n# add the -L arguments to the link line\nappend LDLIBARGS=$library_path;\n\n# also include the location where libm & co live on Debian-based systems\n# cfr. https://github.com/easybuilders/easybuild-easyblocks/pull/919\nappend LDLIBARGS=-L/usr/lib/x86_64-linux-gnu;\n\"\"\"\n\n\nclass EB_PGI(PackedBinary):\n \"\"\"\n Support for installing the PGI compilers\n \"\"\"\n\n @staticmethod\n def extra_options():\n extra_vars = {\n 'install_amd': [True, \"Install AMD software components\", CUSTOM],\n 'install_java': [True, \"Install Java JRE for graphical debugger\", CUSTOM],\n 'install_managed': [True, \"Install OpenACC Unified Memory Evaluation package\", CUSTOM],\n 'install_nvidia': [True, \"Install CUDA Toolkit Components\", CUSTOM],\n }\n return PackedBinary.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Easyblock constructor, define custom class variables specific to PGI.\"\"\"\n super(EB_PGI, self).__init__(*args, **kwargs)\n\n self.license_file = 'UNKNOWN'\n self.license_env_var = 'UNKNOWN' # Probably not really necessary for PGI\n\n self.pgi_install_subdir = os.path.join('linux86-64', self.version)\n\n def configure_step(self):\n \"\"\"\n Handle license file.\n \"\"\"\n default_lic_env_var = 'PGROUPD_LICENSE_FILE'\n license_specs = ensure_iterable_license_specs(self.cfg['license_file'])\n lic_specs, self.license_env_var = find_flexlm_license(custom_env_vars=[default_lic_env_var],\n lic_specs=license_specs)\n\n if lic_specs:\n if self.license_env_var is None:\n self.log.info(\"Using PGI license specifications from 'license_file': %s\", lic_specs)\n self.license_env_var = default_lic_env_var\n else:\n self.log.info(\"Using PGI license specifications from %s: %s\", self.license_env_var, lic_specs)\n\n self.license_file = os.pathsep.join(lic_specs)\n env.setvar(self.license_env_var, self.license_file)\n\n else:\n self.log.info(\"No viable license specifications found, assuming PGI Community Edition...\")\n\n def install_step(self):\n \"\"\"Install by running install command.\"\"\"\n\n pgi_env_vars = {\n 'PGI_ACCEPT_EULA': 'accept',\n 'PGI_INSTALL_AMD': str(self.cfg['install_amd']).lower(),\n 'PGI_INSTALL_DIR': self.installdir,\n 'PGI_INSTALL_JAVA': str(self.cfg['install_java']).lower(),\n 'PGI_INSTALL_MANAGED': str(self.cfg['install_managed']).lower(),\n 'PGI_INSTALL_NVIDIA': str(self.cfg['install_nvidia']).lower(),\n 'PGI_SILENT': 'true',\n }\n cmd = \"%s ./install\" % ' '.join(['%s=%s' % x for x in sorted(pgi_env_vars.items())])\n run_cmd(cmd, log_all=True, simple=True)\n\n # make sure localrc uses GCC in PATH, not always the system GCC, and does not use a system g77 but gfortran\n install_abs_subdir = os.path.join(self.installdir, self.pgi_install_subdir)\n filename = os.path.join(install_abs_subdir, \"bin\", \"makelocalrc\")\n for line in fileinput.input(filename, inplace='1', backup='.orig'):\n line = re.sub(r\"^PATH=/\", r\"#PATH=/\", line)\n sys.stdout.write(line)\n\n cmd = \"%s -x %s -g77 /\" % (filename, install_abs_subdir)\n run_cmd(cmd, log_all=True, simple=True)\n\n # If an OS libnuma is NOT found, makelocalrc creates symbolic links to libpgnuma.so\n # If we use the EB libnuma, delete those symbolic links to ensure they are not used\n if get_software_root(\"numactl\"):\n for filename in [\"libnuma.so\", \"libnuma.so.1\"]:\n path = os.path.join(install_abs_subdir, \"lib\", filename)\n if os.path.islink(path):\n os.remove(path)\n\n # install (or update) siterc file to make PGI consider $LIBRARY_PATH\n siterc_path = os.path.join(self.installdir, self.pgi_install_subdir, 'bin', 'siterc')\n write_file(siterc_path, SITERC_LIBRARY_PATH, append=True)\n self.log.info(\"Appended instructions to pick up $LIBRARY_PATH to siterc file at %s: %s\",\n siterc_path, SITERC_LIBRARY_PATH)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for PGI\"\"\"\n prefix = self.pgi_install_subdir\n custom_paths = {\n 'files': [os.path.join(prefix, 'bin', x) for x in ['pgcc', 'pgc++', 'pgf77', 'pgfortran', 'siterc']],\n 'dirs': [os.path.join(prefix, 'bin'), os.path.join(prefix, 'lib'),\n os.path.join(prefix, 'include'), os.path.join(prefix, 'man')]\n }\n super(EB_PGI, self).sanity_check_step(custom_paths=custom_paths)\n\n def make_module_req_guess(self):\n \"\"\"Prefix subdirectories in PGI install dir considered for environment variables defined in module file.\"\"\"\n dirs = super(EB_PGI, self).make_module_req_guess()\n for key in dirs:\n dirs[key] = [os.path.join(self.pgi_install_subdir, d) for d in dirs[key]]\n\n # $CPATH should not be defined in module for PGI, it causes problems\n # cfr. https://github.com/easybuilders/easybuild-easyblocks/issues/830\n if 'CPATH' in dirs:\n self.log.info(\"Removing $CPATH entry: %s\", dirs['CPATH'])\n del dirs['CPATH']\n\n return dirs\n\n def make_module_extra(self):\n \"\"\"Add environment variables LM_LICENSE_FILE and PGI for license file and PGI location\"\"\"\n txt = super(EB_PGI, self).make_module_extra()\n if self.license_env_var:\n txt += self.module_generator.prepend_paths(self.license_env_var, [self.license_file],\n allow_abs=True, expand_relpaths=False)\n txt += self.module_generator.set_environment('PGI', self.installdir)\n return txt\n", "path": "easybuild/easyblocks/p/pgi.py"}]} | 3,158 | 390 |
gh_patches_debug_23986 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-4724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FollowPath] position_update log frequency
position_update log entries for FollowPath appear too often, like every 1.5 seconds, with distance changes from 1 meter to 5 meter depend on the random speed. It's kind of unnecessarily spam the terminal.
An interval of 5 seconds or 10 meter should be more relevant.
</issue>
<code>
[start of pokemongo_bot/base_task.py]
1 import logging
2
3
4 class BaseTask(object):
5 TASK_API_VERSION = 1
6
7 def __init__(self, bot, config):
8 """
9
10 :param bot:
11 :type bot: pokemongo_bot.PokemonGoBot
12 :param config:
13 :return:
14 """
15 self.bot = bot
16 self.config = config
17 self._validate_work_exists()
18 self.logger = logging.getLogger(type(self).__name__)
19 self.enabled = config.get('enabled', True)
20 self.initialize()
21
22 def _validate_work_exists(self):
23 method = getattr(self, 'work', None)
24 if not method or not callable(method):
25 raise NotImplementedError('Missing "work" method')
26
27 def emit_event(self, event, sender=None, level='info', formatted='', data={}):
28 if not sender:
29 sender=self
30 self.bot.event_manager.emit(
31 event,
32 sender=sender,
33 level=level,
34 formatted=formatted,
35 data=data
36 )
37
38 def initialize(self):
39 pass
40
[end of pokemongo_bot/base_task.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/base_task.py b/pokemongo_bot/base_task.py
--- a/pokemongo_bot/base_task.py
+++ b/pokemongo_bot/base_task.py
@@ -1,5 +1,7 @@
import logging
+import time
+
class BaseTask(object):
TASK_API_VERSION = 1
@@ -17,6 +19,7 @@
self._validate_work_exists()
self.logger = logging.getLogger(type(self).__name__)
self.enabled = config.get('enabled', True)
+ self.last_log_time = time.time()
self.initialize()
def _validate_work_exists(self):
@@ -27,13 +30,17 @@
def emit_event(self, event, sender=None, level='info', formatted='', data={}):
if not sender:
sender=self
- self.bot.event_manager.emit(
- event,
- sender=sender,
- level=level,
- formatted=formatted,
- data=data
- )
+
+ # Print log only if X seconds are passed from last log
+ if (time.time() - self.last_log_time) > self.config.get('log_interval', 0):
+ self.last_log_time = time.time()
+ self.bot.event_manager.emit(
+ event,
+ sender=sender,
+ level=level,
+ formatted=formatted,
+ data=data
+ )
def initialize(self):
pass
| {"golden_diff": "diff --git a/pokemongo_bot/base_task.py b/pokemongo_bot/base_task.py\n--- a/pokemongo_bot/base_task.py\n+++ b/pokemongo_bot/base_task.py\n@@ -1,5 +1,7 @@\n import logging\n \n+import time\n+\n \n class BaseTask(object):\n TASK_API_VERSION = 1\n@@ -17,6 +19,7 @@\n self._validate_work_exists()\n self.logger = logging.getLogger(type(self).__name__)\n self.enabled = config.get('enabled', True)\n+ self.last_log_time = time.time()\n self.initialize()\n \n def _validate_work_exists(self):\n@@ -27,13 +30,17 @@\n def emit_event(self, event, sender=None, level='info', formatted='', data={}):\n if not sender:\n sender=self\n- self.bot.event_manager.emit(\n- event,\n- sender=sender,\n- level=level,\n- formatted=formatted,\n- data=data\n- )\n+\n+ # Print log only if X seconds are passed from last log\n+ if (time.time() - self.last_log_time) > self.config.get('log_interval', 0):\n+ self.last_log_time = time.time()\n+ self.bot.event_manager.emit(\n+ event,\n+ sender=sender,\n+ level=level,\n+ formatted=formatted,\n+ data=data\n+ )\n \n def initialize(self):\n pass\n", "issue": "[FollowPath] position_update log frequency\nposition_update log entries for FollowPath appear too often, like every 1.5 seconds, with distance changes from 1 meter to 5 meter depend on the random speed. It's kind of unnecessarily spam the terminal.\n\nAn interval of 5 seconds or 10 meter should be more relevant.\n\n", "before_files": [{"content": "import logging\n\n\nclass BaseTask(object):\n TASK_API_VERSION = 1\n\n def __init__(self, bot, config):\n \"\"\"\n\n :param bot:\n :type bot: pokemongo_bot.PokemonGoBot\n :param config:\n :return:\n \"\"\"\n self.bot = bot\n self.config = config\n self._validate_work_exists()\n self.logger = logging.getLogger(type(self).__name__)\n self.enabled = config.get('enabled', True)\n self.initialize()\n\n def _validate_work_exists(self):\n method = getattr(self, 'work', None)\n if not method or not callable(method):\n raise NotImplementedError('Missing \"work\" method')\n\n def emit_event(self, event, sender=None, level='info', formatted='', data={}):\n if not sender:\n sender=self\n self.bot.event_manager.emit(\n event,\n sender=sender,\n level=level,\n formatted=formatted,\n data=data\n )\n\n def initialize(self):\n pass\n", "path": "pokemongo_bot/base_task.py"}]} | 897 | 318 |
gh_patches_debug_3090 | rasdani/github-patches | git_diff | docker__docker-py-1671 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue with port option in 2.4.0 version
Hi,
I update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) :
`ports:
- "127.0.0.1:9292:9090"`
I got the following error:
`
ERROR: for ContainerName expected string or buffer
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 68, in main
command()
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 118, in perform_command
handler(command, command_options)
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 926, in up
scale_override=parse_scale_args(options['--scale']),
File "/usr/local/lib/python2.7/dist-packages/compose/project.py", line 424, in up
get_deps
File "/usr/local/lib/python2.7/dist-packages/compose/parallel.py", line 69, in parallel_execute
raise error_to_reraise
TypeError: expected string or buffer
`
I have no issue when i downgrade again to the 2.3 version of the package
To reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):
```
version: '2'
services :
ContainerName:
image: bae2d441e03a
ports:
- "127.0.0.1:9292:9090"
```
I run on Ubuntu 14.04.5 LTS with the following package:
```
docker==2.4.0
docker-compose==1.14.0
docker-pycreds==0.2.1
dockerpty==0.4.1
Python 2.7.6
Client:
Version: 17.05.0-ce
API version: 1.29
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Server:
Version: 17.05.0-ce
API version: 1.29 (minimum version 1.12)
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Experimental: false
```
</issue>
<code>
[start of docker/utils/ports.py]
1 import re
2
3 PORT_SPEC = re.compile(
4 "^" # Match full string
5 "(" # External part
6 "((?P<host>[a-fA-F\d.:]+):)?" # Address
7 "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
8 ")?"
9 "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
10 "(?P<proto>/(udp|tcp))?" # Protocol
11 "$" # Match full string
12 )
13
14
15 def add_port_mapping(port_bindings, internal_port, external):
16 if internal_port in port_bindings:
17 port_bindings[internal_port].append(external)
18 else:
19 port_bindings[internal_port] = [external]
20
21
22 def add_port(port_bindings, internal_port_range, external_range):
23 if external_range is None:
24 for internal_port in internal_port_range:
25 add_port_mapping(port_bindings, internal_port, None)
26 else:
27 ports = zip(internal_port_range, external_range)
28 for internal_port, external_port in ports:
29 add_port_mapping(port_bindings, internal_port, external_port)
30
31
32 def build_port_bindings(ports):
33 port_bindings = {}
34 for port in ports:
35 internal_port_range, external_range = split_port(port)
36 add_port(port_bindings, internal_port_range, external_range)
37 return port_bindings
38
39
40 def _raise_invalid_port(port):
41 raise ValueError('Invalid port "%s", should be '
42 '[[remote_ip:]remote_port[-remote_port]:]'
43 'port[/protocol]' % port)
44
45
46 def port_range(start, end, proto, randomly_available_port=False):
47 if not start:
48 return start
49 if not end:
50 return [start + proto]
51 if randomly_available_port:
52 return ['{}-{}'.format(start, end) + proto]
53 return [str(port) + proto for port in range(int(start), int(end) + 1)]
54
55
56 def split_port(port):
57 port = str(port)
58 match = PORT_SPEC.match(port)
59 if match is None:
60 _raise_invalid_port(port)
61 parts = match.groupdict()
62
63 host = parts['host']
64 proto = parts['proto'] or ''
65 internal = port_range(parts['int'], parts['int_end'], proto)
66 external = port_range(
67 parts['ext'], parts['ext_end'], '', len(internal) == 1)
68
69 if host is None:
70 if external is not None and len(internal) != len(external):
71 raise ValueError('Port ranges don\'t match in length')
72 return internal, external
73 else:
74 if not external:
75 external = [None] * len(internal)
76 elif len(internal) != len(external):
77 raise ValueError('Port ranges don\'t match in length')
78 return internal, [(host, ext_port) for ext_port in external]
79
[end of docker/utils/ports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/ports.py b/docker/utils/ports.py
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -54,6 +54,11 @@
def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
| {"golden_diff": "diff --git a/docker/utils/ports.py b/docker/utils/ports.py\n--- a/docker/utils/ports.py\n+++ b/docker/utils/ports.py\n@@ -54,6 +54,11 @@\n \n \n def split_port(port):\n+ if hasattr(port, 'legacy_repr'):\n+ # This is the worst hack, but it prevents a bug in Compose 1.14.0\n+ # https://github.com/docker/docker-py/issues/1668\n+ # TODO: remove once fixed in Compose stable\n+ port = port.legacy_repr()\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n", "issue": "Issue with port option in 2.4.0 version\nHi,\r\nI update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) : \r\n`ports:\r\n - \"127.0.0.1:9292:9090\"`\r\n\r\nI got the following error:\r\n\r\n`\r\nERROR: for ContainerName expected string or buffer\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/docker-compose\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 68, in main\r\n command()\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 118, in perform_command\r\n handler(command, command_options)\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 926, in up\r\n scale_override=parse_scale_args(options['--scale']),\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/project.py\", line 424, in up\r\n get_deps\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/parallel.py\", line 69, in parallel_execute\r\n raise error_to_reraise\r\nTypeError: expected string or buffer\r\n`\r\n\r\nI have no issue when i downgrade again to the 2.3 version of the package\r\n\r\nTo reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):\r\n```\r\nversion: '2'\r\n\r\nservices :\r\n ContainerName:\r\n image: bae2d441e03a\r\n ports:\r\n - \"127.0.0.1:9292:9090\"\r\n```\r\n\r\nI run on Ubuntu 14.04.5 LTS with the following package:\r\n```\r\ndocker==2.4.0\r\ndocker-compose==1.14.0\r\ndocker-pycreds==0.2.1\r\ndockerpty==0.4.1\r\nPython 2.7.6\r\nClient:\r\n Version: 17.05.0-ce\r\n API version: 1.29\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Version: 17.05.0-ce\r\n API version: 1.29 (minimum version 1.12)\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n", "path": "docker/utils/ports.py"}]} | 1,956 | 146 |
gh_patches_debug_20319 | rasdani/github-patches | git_diff | pantsbuild__pants-12885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`DigestEntries` returns a `Directory` instead of an empty vector for non-matching digest
</issue>
<code>
[start of src/python/pants/jvm/util_rules.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7
8 from pants.engine.fs import Digest, DigestEntries, DigestSubset, FileDigest, FileEntry, PathGlobs
9 from pants.engine.rules import Get, collect_rules, rule
10
11
12 @dataclass(frozen=True)
13 class ExtractFileDigest:
14 digest: Digest
15 file_path: str
16
17
18 @rule
19 async def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:
20 digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))
21 files_or_directories = await Get(DigestEntries, Digest, digest)
22 digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]
23
24 if len(digest_entries) == 0:
25 raise Exception(f"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.")
26 elif len(digest_entries) > 1:
27 raise Exception(
28 f"ExtractFileDigest: Unexpected error: '{request.file_path}' found multiple times in {request.digest}"
29 )
30
31 file_info = digest_entries[0]
32 return file_info.file_digest
33
34
35 def rules():
36 return [*collect_rules()]
37
[end of src/python/pants/jvm/util_rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/jvm/util_rules.py b/src/python/pants/jvm/util_rules.py
--- a/src/python/pants/jvm/util_rules.py
+++ b/src/python/pants/jvm/util_rules.py
@@ -18,8 +18,7 @@
@rule
async def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:
digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))
- files_or_directories = await Get(DigestEntries, Digest, digest)
- digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]
+ digest_entries = await Get(DigestEntries, Digest, digest)
if len(digest_entries) == 0:
raise Exception(f"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.")
@@ -29,6 +28,12 @@
)
file_info = digest_entries[0]
+
+ if not isinstance(file_info, FileEntry):
+ raise AssertionError(
+ f"Unexpected error: '{request.file_path}' refers to a directory, not a file."
+ )
+
return file_info.file_digest
| {"golden_diff": "diff --git a/src/python/pants/jvm/util_rules.py b/src/python/pants/jvm/util_rules.py\n--- a/src/python/pants/jvm/util_rules.py\n+++ b/src/python/pants/jvm/util_rules.py\n@@ -18,8 +18,7 @@\n @rule\n async def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:\n digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))\n- files_or_directories = await Get(DigestEntries, Digest, digest)\n- digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]\n+ digest_entries = await Get(DigestEntries, Digest, digest)\n \n if len(digest_entries) == 0:\n raise Exception(f\"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.\")\n@@ -29,6 +28,12 @@\n )\n \n file_info = digest_entries[0]\n+\n+ if not isinstance(file_info, FileEntry):\n+ raise AssertionError(\n+ f\"Unexpected error: '{request.file_path}' refers to a directory, not a file.\"\n+ )\n+\n return file_info.file_digest\n", "issue": "`DigestEntries` returns a `Directory` instead of an empty vector for non-matching digest\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pants.engine.fs import Digest, DigestEntries, DigestSubset, FileDigest, FileEntry, PathGlobs\nfrom pants.engine.rules import Get, collect_rules, rule\n\n\n@dataclass(frozen=True)\nclass ExtractFileDigest:\n digest: Digest\n file_path: str\n\n\n@rule\nasync def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:\n digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))\n files_or_directories = await Get(DigestEntries, Digest, digest)\n digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]\n\n if len(digest_entries) == 0:\n raise Exception(f\"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.\")\n elif len(digest_entries) > 1:\n raise Exception(\n f\"ExtractFileDigest: Unexpected error: '{request.file_path}' found multiple times in {request.digest}\"\n )\n\n file_info = digest_entries[0]\n return file_info.file_digest\n\n\ndef rules():\n return [*collect_rules()]\n", "path": "src/python/pants/jvm/util_rules.py"}]} | 916 | 261 |
gh_patches_debug_8401 | rasdani/github-patches | git_diff | pretalx__pretalx-219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding a speaker question does not pre-select "speaker question"
In the question list view, one can click "Add new question" either for speaker questions or for submission questions. In the following form, the question type is not preselected, which is unexpected.
</issue>
<code>
[start of src/pretalx/orga/views/cfp.py]
1 from csp.decorators import csp_update
2 from django.contrib import messages
3 from django.db import transaction
4 from django.db.models.deletion import ProtectedError
5 from django.forms.models import inlineformset_factory
6 from django.shortcuts import redirect
7 from django.utils.decorators import method_decorator
8 from django.utils.functional import cached_property
9 from django.utils.translation import ugettext_lazy as _
10 from django.views.generic import ListView, TemplateView, UpdateView, View
11
12 from pretalx.common.forms import I18nFormSet
13 from pretalx.common.views import ActionFromUrl, CreateOrUpdateView
14 from pretalx.orga.forms import CfPForm, QuestionForm, SubmissionTypeForm
15 from pretalx.orga.forms.cfp import AnswerOptionForm, CfPSettingsForm
16 from pretalx.submission.models import (
17 AnswerOption, CfP, Question, SubmissionType,
18 )
19
20
21 class CfPTextDetail(ActionFromUrl, UpdateView):
22 form_class = CfPForm
23 model = CfP
24 template_name = 'orga/cfp/text.html'
25
26 def get_context_data(self, *args, **kwargs):
27 ctx = super().get_context_data(*args, **kwargs)
28 ctx['sform'] = self.sform
29 return ctx
30
31 @cached_property
32 def sform(self):
33 return CfPSettingsForm(
34 read_only=(self._action == 'view'),
35 locales=self.request.event.locales,
36 obj=self.request.event,
37 attribute_name='settings',
38 data=self.request.POST if self.request.method == "POST" else None,
39 prefix='settings'
40 )
41
42 def get_object(self):
43 return self.request.event.cfp
44
45 def get_success_url(self) -> str:
46 return self.get_object().urls.text
47
48 def form_valid(self, form):
49 if not self.sform.is_valid():
50 return self.form_invalid(form)
51 messages.success(self.request, 'The CfP update has been saved.')
52 form.instance.event = self.request.event
53 ret = super().form_valid(form)
54 if form.has_changed():
55 form.instance.log_action('pretalx.cfp.update', person=self.request.user, orga=True)
56 self.sform.save()
57 return ret
58
59
60 class CfPQuestionList(TemplateView):
61 template_name = 'orga/cfp/question_view.html'
62
63 def get_context_data(self, *args, **kwargs):
64 ctx = super().get_context_data(*args, **kwargs)
65 ctx['speaker_questions'] = Question.all_objects.filter(event=self.request.event, target='speaker')
66 ctx['submission_questions'] = Question.all_objects.filter(event=self.request.event, target='submission')
67 return ctx
68
69
70 @method_decorator(csp_update(SCRIPT_SRC="'self' 'unsafe-inline'"), name='dispatch')
71 class CfPQuestionDetail(ActionFromUrl, CreateOrUpdateView):
72 model = Question
73 form_class = QuestionForm
74 template_name = 'orga/cfp/question_form.html'
75
76 def get_object(self) -> Question:
77 return self.request.event.questions.filter(pk=self.kwargs.get('pk')).first()
78
79 @cached_property
80 def formset(self):
81 formset_class = inlineformset_factory(
82 Question, AnswerOption, form=AnswerOptionForm, formset=I18nFormSet,
83 can_delete=True, extra=0,
84 )
85 return formset_class(
86 self.request.POST if self.request.method == 'POST' else None,
87 queryset=AnswerOption.objects.filter(question=self.get_object()) if self.get_object() else AnswerOption.objects.none(),
88 event=self.request.event
89 )
90
91 def save_formset(self, obj):
92 if self.formset.is_valid():
93 for form in self.formset.initial_forms:
94 if form in self.formset.deleted_forms:
95 if not form.instance.pk:
96 continue
97 obj.log_action(
98 'pretalx.event.question.option.deleted', person=self.request.user, orga=True, data={
99 'id': form.instance.pk
100 }
101 )
102 form.instance.delete()
103 form.instance.pk = None
104 elif form.has_changed():
105 form.instance.question = obj
106 form.save()
107 change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
108 change_data['id'] = form.instance.pk
109 obj.log_action(
110 'pretalx.event.question.option.changed',
111 person=self.request.user, orga=True, data=change_data
112 )
113
114 for form in self.formset.extra_forms:
115 if not form.has_changed():
116 continue
117 if self.formset._should_delete_form(form):
118 continue
119 form.instance.question = obj
120 form.save()
121 change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
122 change_data['id'] = form.instance.pk
123 obj.log_action(
124 'pretalx.event.question.option.added',
125 person=self.request.user, orga=True, data=change_data
126 )
127
128 return True
129 return False
130
131 def get_context_data(self, *args, **kwargs):
132 ctx = super().get_context_data(*args, **kwargs)
133 ctx['formset'] = self.formset
134 return ctx
135
136 def get_success_url(self) -> str:
137 obj = self.get_object() or self.instance
138 return obj.urls.base
139
140 @transaction.atomic
141 def form_valid(self, form):
142 form.instance.event = self.request.event
143 self.instance = form.instance
144 ret = super().form_valid(form)
145 if form.cleaned_data.get('variant') in ('choices', 'multiple_choice'):
146 result = self.save_formset(self.instance)
147 if not result:
148 return self.get(self.request, *self.args, **self.kwargs)
149 if form.has_changed():
150 action = 'pretalx.question.' + ('update' if self.object else 'create')
151 form.instance.log_action(action, person=self.request.user, orga=True)
152 messages.success(self.request, 'The question has been saved.')
153 return ret
154
155
156 class CfPQuestionDelete(View):
157
158 def dispatch(self, request, *args, **kwargs):
159 super().dispatch(request, *args, **kwargs)
160 question = self.request.event.questions.get(pk=self.kwargs.get('pk'))
161
162 try:
163 question.delete()
164 question.log_action('pretalx.question.delete', person=self.request.user, orga=True)
165 messages.success(request, _('The question has been deleted.'))
166 except ProtectedError:
167 question.active = False
168 question.save()
169 messages.error(request, _('You cannot delete a question that has already been answered. We have deactivated the question instead.'))
170 return redirect(self.request.event.cfp.urls.questions)
171
172
173 class SubmissionTypeList(ListView):
174 template_name = 'orga/cfp/submission_type_view.html'
175 context_object_name = 'types'
176
177 def get_queryset(self):
178 return self.request.event.submission_types.all()
179
180
181 class SubmissionTypeDetail(ActionFromUrl, CreateOrUpdateView):
182 model = SubmissionType
183 form_class = SubmissionTypeForm
184 template_name = 'orga/cfp/submission_type_form.html'
185
186 def get_success_url(self) -> str:
187 return self.request.event.cfp.urls.types
188
189 def get_object(self):
190 return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))
191
192 def form_valid(self, form):
193 messages.success(self.request, 'The Submission Type has been saved.')
194 form.instance.event = self.request.event
195 ret = super().form_valid(form)
196 if form.has_changed():
197 action = 'pretalx.submission_type.' + ('update' if self.object else 'create')
198 form.instance.log_action(action, person=self.request.user, orga=True)
199 return ret
200
201
202 class SubmissionTypeDefault(View):
203
204 def dispatch(self, request, *args, **kwargs):
205 super().dispatch(request, *args, **kwargs)
206
207 submission_type = self.request.event.submission_types.get(pk=self.kwargs.get('pk'))
208 self.request.event.cfp.default_type = submission_type
209 self.request.event.cfp.save(update_fields=['default_type'])
210 submission_type.log_action('pretalx.submission_type.make_default', person=self.request.user, orga=True)
211 messages.success(request, _('The Submission Type has been made default.'))
212 return redirect(self.request.event.cfp.urls.types)
213
214
215 class SubmissionTypeDelete(View):
216
217 def dispatch(self, request, *args, **kwargs):
218 super().dispatch(request, *args, **kwargs)
219 submission_type = self.request.event.submission_types.get(pk=self.kwargs.get('pk'))
220
221 if request.event.submission_types.count() == 1:
222 messages.error(request, _('You cannot delete the only submission type. Try creating another one first!'))
223 elif request.event.cfp.default_type == submission_type:
224 messages.error(request, _('You cannot delete the default submission type. Make another type default first!'))
225 else:
226 try:
227 submission_type.delete()
228 request.event.log_action('pretalx.submission_type.delete', person=self.request.user, orga=True)
229 messages.success(request, _('The Submission Type has been deleted.'))
230 except ProtectedError: # TODO: show which/how many submissions are concerned
231 messages.error(request, _('This Submission Type is in use in a submission and cannot be deleted.'))
232 return redirect(self.request.event.cfp.urls.types)
233
[end of src/pretalx/orga/views/cfp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pretalx/orga/views/cfp.py b/src/pretalx/orga/views/cfp.py
--- a/src/pretalx/orga/views/cfp.py
+++ b/src/pretalx/orga/views/cfp.py
@@ -133,6 +133,14 @@
ctx['formset'] = self.formset
return ctx
+ def get_form_kwargs(self, *args, **kwargs):
+ kwargs = super().get_form_kwargs(*args, **kwargs)
+ if not self.get_object():
+ initial = kwargs['initial'] or dict()
+ initial['target'] = self.request.GET.get('type')
+ kwargs['initial'] = initial
+ return kwargs
+
def get_success_url(self) -> str:
obj = self.get_object() or self.instance
return obj.urls.base
| {"golden_diff": "diff --git a/src/pretalx/orga/views/cfp.py b/src/pretalx/orga/views/cfp.py\n--- a/src/pretalx/orga/views/cfp.py\n+++ b/src/pretalx/orga/views/cfp.py\n@@ -133,6 +133,14 @@\n ctx['formset'] = self.formset\n return ctx\n \n+ def get_form_kwargs(self, *args, **kwargs):\n+ kwargs = super().get_form_kwargs(*args, **kwargs)\n+ if not self.get_object():\n+ initial = kwargs['initial'] or dict()\n+ initial['target'] = self.request.GET.get('type')\n+ kwargs['initial'] = initial\n+ return kwargs\n+\n def get_success_url(self) -> str:\n obj = self.get_object() or self.instance\n return obj.urls.base\n", "issue": "Adding a speaker question does not pre-select \"speaker question\"\nIn the question list view, one can click \"Add new question\" either for speaker questions or for submission questions. In the following form, the question type is not preselected, which is unexpected.\n", "before_files": [{"content": "from csp.decorators import csp_update\nfrom django.contrib import messages\nfrom django.db import transaction\nfrom django.db.models.deletion import ProtectedError\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import ListView, TemplateView, UpdateView, View\n\nfrom pretalx.common.forms import I18nFormSet\nfrom pretalx.common.views import ActionFromUrl, CreateOrUpdateView\nfrom pretalx.orga.forms import CfPForm, QuestionForm, SubmissionTypeForm\nfrom pretalx.orga.forms.cfp import AnswerOptionForm, CfPSettingsForm\nfrom pretalx.submission.models import (\n AnswerOption, CfP, Question, SubmissionType,\n)\n\n\nclass CfPTextDetail(ActionFromUrl, UpdateView):\n form_class = CfPForm\n model = CfP\n template_name = 'orga/cfp/text.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['sform'] = self.sform\n return ctx\n\n @cached_property\n def sform(self):\n return CfPSettingsForm(\n read_only=(self._action == 'view'),\n locales=self.request.event.locales,\n obj=self.request.event,\n attribute_name='settings',\n data=self.request.POST if self.request.method == \"POST\" else None,\n prefix='settings'\n )\n\n def get_object(self):\n return self.request.event.cfp\n\n def get_success_url(self) -> str:\n return self.get_object().urls.text\n\n def form_valid(self, form):\n if not self.sform.is_valid():\n return self.form_invalid(form)\n messages.success(self.request, 'The CfP update has been saved.')\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n if form.has_changed():\n form.instance.log_action('pretalx.cfp.update', person=self.request.user, orga=True)\n self.sform.save()\n return ret\n\n\nclass CfPQuestionList(TemplateView):\n template_name = 'orga/cfp/question_view.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['speaker_questions'] = Question.all_objects.filter(event=self.request.event, target='speaker')\n ctx['submission_questions'] = Question.all_objects.filter(event=self.request.event, target='submission')\n return ctx\n\n\n@method_decorator(csp_update(SCRIPT_SRC=\"'self' 'unsafe-inline'\"), name='dispatch')\nclass CfPQuestionDetail(ActionFromUrl, CreateOrUpdateView):\n model = Question\n form_class = QuestionForm\n template_name = 'orga/cfp/question_form.html'\n\n def get_object(self) -> Question:\n return self.request.event.questions.filter(pk=self.kwargs.get('pk')).first()\n\n @cached_property\n def formset(self):\n formset_class = inlineformset_factory(\n Question, AnswerOption, form=AnswerOptionForm, formset=I18nFormSet,\n can_delete=True, extra=0,\n )\n return formset_class(\n self.request.POST if self.request.method == 'POST' else None,\n queryset=AnswerOption.objects.filter(question=self.get_object()) if self.get_object() else AnswerOption.objects.none(),\n event=self.request.event\n )\n\n def save_formset(self, obj):\n if self.formset.is_valid():\n for form in self.formset.initial_forms:\n if form in self.formset.deleted_forms:\n if not form.instance.pk:\n continue\n obj.log_action(\n 'pretalx.event.question.option.deleted', person=self.request.user, orga=True, data={\n 'id': form.instance.pk\n }\n )\n form.instance.delete()\n form.instance.pk = None\n elif form.has_changed():\n form.instance.question = obj\n form.save()\n change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}\n change_data['id'] = form.instance.pk\n obj.log_action(\n 'pretalx.event.question.option.changed',\n person=self.request.user, orga=True, data=change_data\n )\n\n for form in self.formset.extra_forms:\n if not form.has_changed():\n continue\n if self.formset._should_delete_form(form):\n continue\n form.instance.question = obj\n form.save()\n change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}\n change_data['id'] = form.instance.pk\n obj.log_action(\n 'pretalx.event.question.option.added',\n person=self.request.user, orga=True, data=change_data\n )\n\n return True\n return False\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['formset'] = self.formset\n return ctx\n\n def get_success_url(self) -> str:\n obj = self.get_object() or self.instance\n return obj.urls.base\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n self.instance = form.instance\n ret = super().form_valid(form)\n if form.cleaned_data.get('variant') in ('choices', 'multiple_choice'):\n result = self.save_formset(self.instance)\n if not result:\n return self.get(self.request, *self.args, **self.kwargs)\n if form.has_changed():\n action = 'pretalx.question.' + ('update' if self.object else 'create')\n form.instance.log_action(action, person=self.request.user, orga=True)\n messages.success(self.request, 'The question has been saved.')\n return ret\n\n\nclass CfPQuestionDelete(View):\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n question = self.request.event.questions.get(pk=self.kwargs.get('pk'))\n\n try:\n question.delete()\n question.log_action('pretalx.question.delete', person=self.request.user, orga=True)\n messages.success(request, _('The question has been deleted.'))\n except ProtectedError:\n question.active = False\n question.save()\n messages.error(request, _('You cannot delete a question that has already been answered. We have deactivated the question instead.'))\n return redirect(self.request.event.cfp.urls.questions)\n\n\nclass SubmissionTypeList(ListView):\n template_name = 'orga/cfp/submission_type_view.html'\n context_object_name = 'types'\n\n def get_queryset(self):\n return self.request.event.submission_types.all()\n\n\nclass SubmissionTypeDetail(ActionFromUrl, CreateOrUpdateView):\n model = SubmissionType\n form_class = SubmissionTypeForm\n template_name = 'orga/cfp/submission_type_form.html'\n\n def get_success_url(self) -> str:\n return self.request.event.cfp.urls.types\n\n def get_object(self):\n return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n\n def form_valid(self, form):\n messages.success(self.request, 'The Submission Type has been saved.')\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n if form.has_changed():\n action = 'pretalx.submission_type.' + ('update' if self.object else 'create')\n form.instance.log_action(action, person=self.request.user, orga=True)\n return ret\n\n\nclass SubmissionTypeDefault(View):\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n\n submission_type = self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n self.request.event.cfp.default_type = submission_type\n self.request.event.cfp.save(update_fields=['default_type'])\n submission_type.log_action('pretalx.submission_type.make_default', person=self.request.user, orga=True)\n messages.success(request, _('The Submission Type has been made default.'))\n return redirect(self.request.event.cfp.urls.types)\n\n\nclass SubmissionTypeDelete(View):\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n submission_type = self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n\n if request.event.submission_types.count() == 1:\n messages.error(request, _('You cannot delete the only submission type. Try creating another one first!'))\n elif request.event.cfp.default_type == submission_type:\n messages.error(request, _('You cannot delete the default submission type. Make another type default first!'))\n else:\n try:\n submission_type.delete()\n request.event.log_action('pretalx.submission_type.delete', person=self.request.user, orga=True)\n messages.success(request, _('The Submission Type has been deleted.'))\n except ProtectedError: # TODO: show which/how many submissions are concerned\n messages.error(request, _('This Submission Type is in use in a submission and cannot be deleted.'))\n return redirect(self.request.event.cfp.urls.types)\n", "path": "src/pretalx/orga/views/cfp.py"}]} | 3,170 | 190 |
gh_patches_debug_30118 | rasdani/github-patches | git_diff | kivy__kivy-8540 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`PIL` text provider: Critical issues if `font_size` is `< 1`
**Software Versions**
* Python: `3.8.9`
* OS: Windows
* Kivy: `2.3.0rc2`
* Kivy installation method: `python -m pip install --pre "kivy[base]"` / `pip install Kivy==2.3.0rc2`
**Describe the bug**
After upgrading from Pillow 9.x.x to Pillow 10.x.x, some errors are occurring, some with no clear cause. Some critical errors already existed in previous versions of kivy/Pillow.
🔴 Note: This same code works correctly (without errors) with the `"sdl2"` text provider
# Issue 1:
### Works in `9.x.x`, breaks in `10.x.x`:
```python
import os
os.environ["KIVY_TEXT"] = "pil"
from kivy.app import App
from kivy.uix.label import Label
class LabelTest(App):
def build(self):
return Label(text='Kivy rulz', font_size=0)
LabelTest().run()
```
### Error:
```
File "lib\site-packages\kivy\uix\label.py", line 432, in texture_update
self._label.refresh()
File "lib\site-packages\kivy\core\text\__init__.py", line 836, in refresh
sz = self.render()
File "lib\site-packages\kivy\core\text\__init__.py", line 772, in render
options['space_width'] = self.get_extents(' ')[0]
File "lib\site-packages\kivy\core\text\text_pil.py", line 39, in get_extents
font = self._select_font()
File "lib\site-packages\kivy\core\text\text_pil.py", line 33, in _select_font
font = ImageFont.truetype(fontname, fontsize)
File "lib\site-packages\PIL\ImageFont.py", line 797, in truetype
return freetype(font)
File "lib\site-packages\PIL\ImageFont.py", line 794, in freetype
return FreeTypeFont(font, size, index, encoding, layout_engine)
File "lib\site-packages\PIL\ImageFont.py", line 226, in __init__
self.font = core.getfont(
OSError: invalid ppem value
```
# Issue 2:
### Breaks in `9.x.x`, and in `10.x.x`, and in previous versions of kivy:
```python
import os
os.environ["KIVY_TEXT"] = "pil"
from kivy.app import App
from kivy.uix.label import Label
class LabelTest(App):
def build(self):
return Label(text='Kivy rulz', font_size=-10)
LabelTest().run()
```
### Error:
```
File "lib\site-packages\kivy\uix\label.py", line 432, in texture_update
self._label.refresh()
File "lib\site-packages\kivy\core\text\__init__.py", line 836, in refresh
sz = self.render()
File "lib\site-packages\kivy\core\text\__init__.py", line 812, in render
w, h, clipped = layout_text(text, lines, (0, 0), (uw, uh), options,
File "kivy\\core\\text\\text_layout.pyx", line 408, in kivy.core.text.text_layout.layout_text
File "kivy\\core\\text\\text_layout.pyx", line 235, in kivy.core.text.text_layout.layout_text_unrestricted
File "lib\site-packages\kivy\core\text\text_pil.py", line 37, in get_extents
left, top, right, bottom = font.getbbox(text)
File "lib\site-packages\PIL\ImageFont.py", line 408, in getbbox
size, offset = self.font.getsize(
OSError: invalid argument
```
# Issue 3:
### Works in `9.x.x`, breaks in `10.x.x`:
```
There is still no minimum reproducible code
```
### Error:
```
File "lib\site-packages\kivy\core\text\__init__.py", line 828, in _texture_fill
self.render(real=True)
File "lib\site-packages\kivy\core\text\markup.py", line 143, in render
ret = self._render_real()
File "lib\site-packages\kivy\core\text\__init__.py", line 752, in _render_real
self.render_lines(lines, options, self._render_text, y, size)
File "lib\site-packages\kivy\core\text\markup.py", line 500, in render_lines
render_text(word.text, x, y + script_pos)
File "lib\site-packages\kivy\core\text\text_pil.py", line 66, in _render_text
self._pil_draw.text((int(x), int(y)),
File "lib\site-packages\PIL\ImageDraw.py", line 565, in text
draw_text(ink)
File "lib\site-packages\PIL\ImageDraw.py", line 508, in draw_text
mask, offset = font.getmask2(
File "lib\site-packages\PIL\ImageFont.py", line 580, in getmask2
offset = self.font.render(
OSError: unsupported bitmap pixel mode
```
**Expected behavior**
Behave like the `sdl2` text provider
### Proposed solutions
- issues 1 and 2 - Limit within the `PIL` text provider the font size to `> 0`
- issue 3 - If the root cause of the issue is not identified, treat the error on the line [66](https://github.com/kivy/kivy/blob/master/kivy/core/text/text_pil.py#L66):
```python
self._pil_draw.text((int(x), int(y)),
text, font=self._select_font(), fill=color)
```
</issue>
<code>
[start of kivy/core/text/text_pil.py]
1 '''
2 Text PIL: Draw text with PIL
3 '''
4
5 __all__ = ('LabelPIL', )
6
7 from PIL import Image, ImageFont, ImageDraw
8
9
10 from kivy.compat import text_type
11 from kivy.core.text import LabelBase
12 from kivy.core.image import ImageData
13
14 # used for fetching extends before creature image surface
15 default_font = ImageFont.load_default()
16
17
18 class LabelPIL(LabelBase):
19 _cache = {}
20
21 def _select_font(self):
22 fontsize = int(self.options['font_size'])
23 fontname = self.options['font_name_r']
24 try:
25 id = '%s.%s' % (text_type(fontname), text_type(fontsize))
26 except UnicodeDecodeError:
27 id = '%s.%s' % (fontname, fontsize)
28
29 if id not in self._cache:
30 font = ImageFont.truetype(fontname, fontsize)
31 self._cache[id] = font
32
33 return self._cache[id]
34
35 def get_extents(self, text):
36 font = self._select_font()
37 left, top, right, bottom = font.getbbox(text)
38 ascent, descent = font.getmetrics()
39
40 if self.options['limit_render_to_text_bbox']:
41 h = bottom - top
42 else:
43 h = ascent + descent
44 w = right - left
45
46 return w, h
47
48 def get_cached_extents(self):
49 return self.get_extents
50
51 def _render_begin(self):
52 # create a surface, context, font...
53 self._pil_im = Image.new('RGBA', self._size, color=(255, 255, 255, 0))
54 self._pil_draw = ImageDraw.Draw(self._pil_im)
55
56 def _render_text(self, text, x, y):
57 color = tuple([int(c * 255) for c in self.options['color']])
58
59 # Adjust x and y position to avoid text cutoff
60 if self.options['limit_render_to_text_bbox']:
61 font = self._select_font()
62 bbox = font.getbbox(text)
63 x -= bbox[0]
64 y -= bbox[1]
65
66 self._pil_draw.text((int(x), int(y)),
67 text, font=self._select_font(), fill=color)
68
69 def _render_end(self):
70 data = ImageData(self._size[0], self._size[1],
71 self._pil_im.mode.lower(), self._pil_im.tobytes())
72
73 del self._pil_im
74 del self._pil_draw
75
76 return data
77
[end of kivy/core/text/text_pil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/text/text_pil.py b/kivy/core/text/text_pil.py
--- a/kivy/core/text/text_pil.py
+++ b/kivy/core/text/text_pil.py
@@ -19,6 +19,9 @@
_cache = {}
def _select_font(self):
+ if self.options['font_size'] < 1:
+ return None
+
fontsize = int(self.options['font_size'])
fontname = self.options['font_name_r']
try:
@@ -34,6 +37,9 @@
def get_extents(self, text):
font = self._select_font()
+ if not font:
+ return 0, 0
+
left, top, right, bottom = font.getbbox(text)
ascent, descent = font.getmetrics()
@@ -54,17 +60,19 @@
self._pil_draw = ImageDraw.Draw(self._pil_im)
def _render_text(self, text, x, y):
+ font = self._select_font()
+ if not font:
+ return
+
color = tuple([int(c * 255) for c in self.options['color']])
# Adjust x and y position to avoid text cutoff
if self.options['limit_render_to_text_bbox']:
- font = self._select_font()
bbox = font.getbbox(text)
x -= bbox[0]
y -= bbox[1]
- self._pil_draw.text((int(x), int(y)),
- text, font=self._select_font(), fill=color)
+ self._pil_draw.text((int(x), int(y)), text, font=font, fill=color)
def _render_end(self):
data = ImageData(self._size[0], self._size[1],
| {"golden_diff": "diff --git a/kivy/core/text/text_pil.py b/kivy/core/text/text_pil.py\n--- a/kivy/core/text/text_pil.py\n+++ b/kivy/core/text/text_pil.py\n@@ -19,6 +19,9 @@\n _cache = {}\n \n def _select_font(self):\n+ if self.options['font_size'] < 1:\n+ return None\n+\n fontsize = int(self.options['font_size'])\n fontname = self.options['font_name_r']\n try:\n@@ -34,6 +37,9 @@\n \n def get_extents(self, text):\n font = self._select_font()\n+ if not font:\n+ return 0, 0\n+\n left, top, right, bottom = font.getbbox(text)\n ascent, descent = font.getmetrics()\n \n@@ -54,17 +60,19 @@\n self._pil_draw = ImageDraw.Draw(self._pil_im)\n \n def _render_text(self, text, x, y):\n+ font = self._select_font()\n+ if not font:\n+ return\n+\n color = tuple([int(c * 255) for c in self.options['color']])\n \n # Adjust x and y position to avoid text cutoff\n if self.options['limit_render_to_text_bbox']:\n- font = self._select_font()\n bbox = font.getbbox(text)\n x -= bbox[0]\n y -= bbox[1]\n \n- self._pil_draw.text((int(x), int(y)),\n- text, font=self._select_font(), fill=color)\n+ self._pil_draw.text((int(x), int(y)), text, font=font, fill=color)\n \n def _render_end(self):\n data = ImageData(self._size[0], self._size[1],\n", "issue": "`PIL` text provider: Critical issues if `font_size` is `< 1`\n**Software Versions**\r\n* Python: `3.8.9`\r\n* OS: Windows\r\n* Kivy: `2.3.0rc2`\r\n* Kivy installation method: `python -m pip install --pre \"kivy[base]\"` / `pip install Kivy==2.3.0rc2`\r\n\r\n\r\n**Describe the bug**\r\nAfter upgrading from Pillow 9.x.x to Pillow 10.x.x, some errors are occurring, some with no clear cause. Some critical errors already existed in previous versions of kivy/Pillow.\r\n\r\n\ud83d\udd34 Note: This same code works correctly (without errors) with the `\"sdl2\"` text provider\r\n\r\n\r\n# Issue 1:\r\n### Works in `9.x.x`, breaks in `10.x.x`:\r\n```python\r\nimport os\r\nos.environ[\"KIVY_TEXT\"] = \"pil\"\r\n\r\nfrom kivy.app import App\r\nfrom kivy.uix.label import Label\r\n\r\n\r\nclass LabelTest(App):\r\n def build(self):\r\n return Label(text='Kivy rulz', font_size=0)\r\n\r\nLabelTest().run()\r\n```\r\n\r\n### Error:\r\n```\r\n File \"lib\\site-packages\\kivy\\uix\\label.py\", line 432, in texture_update\r\n self._label.refresh()\r\n File \"lib\\site-packages\\kivy\\core\\text\\__init__.py\", line 836, in refresh\r\n sz = self.render()\r\n File \"lib\\site-packages\\kivy\\core\\text\\__init__.py\", line 772, in render\r\n options['space_width'] = self.get_extents(' ')[0]\r\n File \"lib\\site-packages\\kivy\\core\\text\\text_pil.py\", line 39, in get_extents\r\n font = self._select_font()\r\n File \"lib\\site-packages\\kivy\\core\\text\\text_pil.py\", line 33, in _select_font\r\n font = ImageFont.truetype(fontname, fontsize)\r\n File \"lib\\site-packages\\PIL\\ImageFont.py\", line 797, in truetype\r\n return freetype(font)\r\n File \"lib\\site-packages\\PIL\\ImageFont.py\", line 794, in freetype\r\n return FreeTypeFont(font, size, index, encoding, layout_engine)\r\n File \"lib\\site-packages\\PIL\\ImageFont.py\", line 226, in __init__\r\n self.font = core.getfont(\r\n OSError: invalid ppem value\r\n```\r\n\r\n# Issue 2:\r\n\r\n### Breaks in `9.x.x`, and in `10.x.x`, and in previous versions of kivy:\r\n\r\n```python\r\nimport os\r\nos.environ[\"KIVY_TEXT\"] = \"pil\"\r\n\r\nfrom kivy.app import App\r\nfrom kivy.uix.label import Label\r\n\r\n\r\nclass LabelTest(App):\r\n def build(self):\r\n return Label(text='Kivy rulz', font_size=-10)\r\n\r\nLabelTest().run()\r\n```\r\n\r\n### Error:\r\n```\r\n File \"lib\\site-packages\\kivy\\uix\\label.py\", line 432, in texture_update\r\n self._label.refresh()\r\n File \"lib\\site-packages\\kivy\\core\\text\\__init__.py\", line 836, in refresh\r\n sz = self.render()\r\n File \"lib\\site-packages\\kivy\\core\\text\\__init__.py\", line 812, in render\r\n w, h, clipped = layout_text(text, lines, (0, 0), (uw, uh), options,\r\n File \"kivy\\\\core\\\\text\\\\text_layout.pyx\", line 408, in kivy.core.text.text_layout.layout_text\r\n File \"kivy\\\\core\\\\text\\\\text_layout.pyx\", line 235, in kivy.core.text.text_layout.layout_text_unrestricted\r\n File \"lib\\site-packages\\kivy\\core\\text\\text_pil.py\", line 37, in get_extents\r\n left, top, right, bottom = font.getbbox(text)\r\n File \"lib\\site-packages\\PIL\\ImageFont.py\", line 408, in getbbox\r\n size, offset = self.font.getsize(\r\n OSError: invalid argument\r\n```\r\n\r\n# Issue 3:\r\n### Works in `9.x.x`, breaks in `10.x.x`:\r\n```\r\nThere is still no minimum reproducible code\r\n```\r\n\r\n### Error:\r\n```\r\nFile \"lib\\site-packages\\kivy\\core\\text\\__init__.py\", line 828, in _texture_fill\r\n self.render(real=True)\r\n File \"lib\\site-packages\\kivy\\core\\text\\markup.py\", line 143, in render\r\n ret = self._render_real()\r\n File \"lib\\site-packages\\kivy\\core\\text\\__init__.py\", line 752, in _render_real\r\n self.render_lines(lines, options, self._render_text, y, size)\r\n File \"lib\\site-packages\\kivy\\core\\text\\markup.py\", line 500, in render_lines\r\n render_text(word.text, x, y + script_pos)\r\n File \"lib\\site-packages\\kivy\\core\\text\\text_pil.py\", line 66, in _render_text\r\n self._pil_draw.text((int(x), int(y)),\r\n File \"lib\\site-packages\\PIL\\ImageDraw.py\", line 565, in text\r\n draw_text(ink)\r\n File \"lib\\site-packages\\PIL\\ImageDraw.py\", line 508, in draw_text\r\n mask, offset = font.getmask2(\r\n File \"lib\\site-packages\\PIL\\ImageFont.py\", line 580, in getmask2\r\n offset = self.font.render(\r\nOSError: unsupported bitmap pixel mode\r\n```\r\n\r\n**Expected behavior**\r\nBehave like the `sdl2` text provider\r\n\r\n### Proposed solutions\r\n- issues 1 and 2 - Limit within the `PIL` text provider the font size to `> 0`\r\n- issue 3 - If the root cause of the issue is not identified, treat the error on the line [66](https://github.com/kivy/kivy/blob/master/kivy/core/text/text_pil.py#L66):\r\n```python\r\nself._pil_draw.text((int(x), int(y)),\r\n text, font=self._select_font(), fill=color)\r\n```\r\n\n", "before_files": [{"content": "'''\nText PIL: Draw text with PIL\n'''\n\n__all__ = ('LabelPIL', )\n\nfrom PIL import Image, ImageFont, ImageDraw\n\n\nfrom kivy.compat import text_type\nfrom kivy.core.text import LabelBase\nfrom kivy.core.image import ImageData\n\n# used for fetching extends before creature image surface\ndefault_font = ImageFont.load_default()\n\n\nclass LabelPIL(LabelBase):\n _cache = {}\n\n def _select_font(self):\n fontsize = int(self.options['font_size'])\n fontname = self.options['font_name_r']\n try:\n id = '%s.%s' % (text_type(fontname), text_type(fontsize))\n except UnicodeDecodeError:\n id = '%s.%s' % (fontname, fontsize)\n\n if id not in self._cache:\n font = ImageFont.truetype(fontname, fontsize)\n self._cache[id] = font\n\n return self._cache[id]\n\n def get_extents(self, text):\n font = self._select_font()\n left, top, right, bottom = font.getbbox(text)\n ascent, descent = font.getmetrics()\n\n if self.options['limit_render_to_text_bbox']:\n h = bottom - top\n else:\n h = ascent + descent\n w = right - left\n\n return w, h\n\n def get_cached_extents(self):\n return self.get_extents\n\n def _render_begin(self):\n # create a surface, context, font...\n self._pil_im = Image.new('RGBA', self._size, color=(255, 255, 255, 0))\n self._pil_draw = ImageDraw.Draw(self._pil_im)\n\n def _render_text(self, text, x, y):\n color = tuple([int(c * 255) for c in self.options['color']])\n\n # Adjust x and y position to avoid text cutoff\n if self.options['limit_render_to_text_bbox']:\n font = self._select_font()\n bbox = font.getbbox(text)\n x -= bbox[0]\n y -= bbox[1]\n\n self._pil_draw.text((int(x), int(y)),\n text, font=self._select_font(), fill=color)\n\n def _render_end(self):\n data = ImageData(self._size[0], self._size[1],\n self._pil_im.mode.lower(), self._pil_im.tobytes())\n\n del self._pil_im\n del self._pil_draw\n\n return data\n", "path": "kivy/core/text/text_pil.py"}]} | 2,629 | 400 |
gh_patches_debug_23505 | rasdani/github-patches | git_diff | weecology__retriever-573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
urllib.request.urlretrieve fails to download Gentry data
[Gentry site data](https://github.com/weecology/retriever/blob/master/scripts/gentry.py#L28) is failing and it is because of `urllib.request.urlretrieve` in [Engine](https://github.com/weecology/retriever/blob/master/lib/engine.py) `def download_file(self, url, filename):`
I have attached a small script.
```
url = 'http://www.ecologicaldata.org/sites/default/files/gentry_sites_data.txt'
urllib.request.urlretrieve(url, "fileout")
```
`AttributeError: '_fileobject' object has no attribute 'readinto'`
ref: #502
</issue>
<code>
[start of scripts/gentry.py]
1 #retriever
2 """Retriever script for Alwyn H. Gentry Forest Transect Dataset
3
4 """
5 from __future__ import print_function
6 from builtins import str
7 from builtins import range
8
9 import os
10 import sys
11 import zipfile
12 import xlrd
13 from retriever.lib.templates import Script
14 from retriever.lib.models import Table
15 from retriever.lib.excel import Excel
16
17 VERSION = '1.0.1'
18
19 TAX_GROUPS = 9756 #9819
20
21
22 class main(Script):
23 def __init__(self, **kwargs):
24 Script.__init__(self, **kwargs)
25 self.name = "Alwyn H. Gentry Forest Transect Dataset"
26 self.shortname = "Gentry"
27 self.urls = {"stems": "http://www.mobot.org/mobot/gentry/123/all_Excel.zip",
28 "sites": "http://www.ecologicaldata.org/sites/default/files/gentry_sites_data.txt",
29 "species": "",
30 "counts": ""}
31 self.tags = ["Taxon > Plants", "Spatial Scale > Global",
32 "Data Type > Observational"]
33 self.ref = "http://www.mobot.org/mobot/research/gentry/welcome.shtml"
34 self.addendum = """Researchers who make use of the data in publications are requested to acknowledge Alwyn H. Gentry, the Missouri Botanical Garden, and collectors who assisted Gentry or contributed data for specific sites. It is also requested that a reprint of any publication making use of the Gentry Forest Transect Data be sent to:
35
36 Bruce E. Ponman
37 Missouri Botanical Garden
38 P.O. Box 299
39 St. Louis, MO 63166-0299
40 U.S.A. """
41
42 def download(self, engine=None, debug=False):
43 Script.download(self, engine, debug)
44
45 self.engine.auto_create_table(Table("sites"), url=self.urls["sites"])
46 self.engine.insert_data_from_url(self.urls["sites"])
47
48 self.engine.download_file(self.urls["stems"], "all_Excel.zip")
49 local_zip = zipfile.ZipFile(self.engine.format_filename("all_Excel.zip"))
50 filelist = local_zip.namelist()
51 local_zip.close()
52 self.engine.download_files_from_archive(self.urls["stems"], filelist)
53
54 filelist = [os.path.basename(filename) for filename in filelist]
55
56 # Currently all_Excel.zip is missing CURUYUQU.xls
57 # Download it separately and add it to the file list
58 if not self.engine.find_file('CURUYUQU.xls'):
59 self.engine.download_file("http://www.mobot.org/mobot/gentry/123/samerica/CURUYUQU.xls", "CURUYUQU.xls")
60 filelist.append('CURUYUQU.xls')
61
62 lines = []
63 tax = []
64 for filename in filelist:
65 print("Extracting data from " + filename + "...")
66 book = xlrd.open_workbook(self.engine.format_filename(filename))
67 sh = book.sheet_by_index(0)
68 rows = sh.nrows
69 cn = {'stems': []}
70 n = 0
71 for colnum, c in enumerate(sh.row(0)):
72 if not Excel.empty_cell(c):
73 cid = Excel.cell_value(c).lower()
74 # line number column is sometimes named differently
75 if cid in ["sub", "number"]:
76 cid = "line"
77 # the "number of individuals" column is named in various
78 # different ways; they always at least contain "nd"
79 if "nd" in cid:
80 cid = "count"
81 # in QUIAPACA.xls the "number of individuals" column is
82 # misnamed "STEMDBH" just like the stems columns, so weep
83 # for the state of scientific data and then fix manually
84 if filename == "QUIAPACA.xls" and colnum == 13:
85 cid = "count"
86
87 # if column is a stem, add it to the list of stems;
88 # otherwise, make note of the column name/number
89 if "stem" in cid or "dbh" in cid:
90 cn["stems"].append(n)
91 else:
92 cn[cid] = n
93 n += 1
94 # sometimes, a data file does not contain a liana or count column
95 if not "liana" in list(cn.keys()):
96 cn["liana"] = -1
97 if not "count" in list(cn.keys()):
98 cn["count"] = -1
99 for i in range(1, rows):
100 row = sh.row(i)
101 cellcount = len(row)
102 # make sure the row is real, not just empty cells
103 if not all(Excel.empty_cell(cell) for cell in row):
104 try:
105 this_line = {}
106
107 def format_value(s):
108 s = Excel.cell_value(s)
109 return str(s).title().replace("\\", "/").replace('"', '')
110
111 # get the following information from the appropriate columns
112 for i in ["line", "family", "genus", "species",
113 "liana", "count"]:
114 if cn[i] > -1:
115 this_line[i] = format_value(row[cn[i]])
116 if this_line[i] == '`':
117 this_line[i] = 1
118
119 this_line["stems"] = [Excel.cell_value(row[c])
120 for c in cn["stems"]
121 if not Excel.empty_cell(row[c])]
122 this_line["site"] = filename[0:-4]
123
124 lines.append(this_line)
125
126 # Check how far the species is identified
127 full_id = 0
128 if len(this_line["species"]) < 3:
129 if len(this_line["genus"]) < 3:
130 id_level = "family"
131 else:
132 id_level = "genus"
133 else:
134 id_level = "species"
135 full_id = 1
136 tax.append((this_line["family"],
137 this_line["genus"],
138 this_line["species"].lower().replace('\\', '').replace('"', ''),
139 id_level,
140 str(full_id)))
141 except:
142 raise
143 pass
144
145 tax = sorted(tax, key=lambda group: group[0] + " " + group[1] + " " + group[2])
146 unique_tax = []
147 tax_dict = dict()
148 tax_count = 0
149
150 # Get all unique families/genera/species
151 for group in tax:
152 if not (group in unique_tax):
153 unique_tax.append(group)
154 tax_count += 1
155 tax_dict[group[0:3]] = tax_count
156 if tax_count % 10 == 0:
157 msg = "Generating taxonomic groups: " + str(tax_count) + " / " + str(TAX_GROUPS)
158 sys.stdout.write(msg + "\b" * len(msg))
159 print("Generating taxonomic groups: " + str(TAX_GROUPS) + " / " + str(TAX_GROUPS))
160
161
162 # Create species table
163 table = Table("species", delimiter=",")
164 table.columns=[("species_id" , ("pk-int",) ),
165 ("family" , ("char", ) ),
166 ("genus" , ("char", ) ),
167 ("species" , ("char", ) ),
168 ("id_level" , ("char", 10) ),
169 ("full_id" , ("bool",) )]
170
171 data = [','.join([str(tax_dict[group[:3]])] + ['"%s"' % g for g in group])
172 for group in unique_tax]
173 table.pk = 'species_id'
174 table.contains_pk = True
175
176 self.engine.table = table
177 self.engine.create_table()
178 self.engine.add_to_table(data)
179
180
181 # Create stems table
182 table = Table("stems", delimiter=",", contains_pk=False)
183 table.columns=[("stem_id" , ("pk-auto",) ),
184 ("line" , ("int",) ),
185 ("species_id" , ("int",) ),
186 ("site_code" , ("char", 12) ),
187 ("liana" , ("char", 10) ),
188 ("stem" , ("double",) )]
189 stems = []
190 counts = []
191 for line in lines:
192 try:
193 liana = line["liana"]
194 except KeyError:
195 liana = ""
196 species_info = [line["line"],
197 tax_dict[(line["family"],
198 line["genus"],
199 line["species"].lower())],
200 line["site"],
201 liana
202 ]
203 try:
204 counts.append([str(value) for value in species_info + [line["count"]]])
205 except KeyError:
206 pass
207
208 for i in line["stems"]:
209 stem = species_info + [i]
210 stems.append([str(value) for value in stem])
211
212 data = [','.join(stem) for stem in stems]
213 self.engine.table = table
214 self.engine.create_table()
215 self.engine.add_to_table(data)
216
217
218 # Create counts table
219 table = Table("counts", delimiter=",", contains_pk=False)
220 table.columns=[("count_id" , ("pk-auto",) ),
221 ("line" , ("int",) ),
222 ("species_id" , ("int",) ),
223 ("site_code" , ("char", 12) ),
224 ("liana" , ("char", 10) ),
225 ("count" , ("double",) )]
226 data = [','.join(count) for count in counts]
227 self.engine.table = table
228 self.engine.create_table()
229 self.engine.add_to_table(data)
230
231 return self.engine
232
233
234 SCRIPT = main()
235
[end of scripts/gentry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/gentry.py b/scripts/gentry.py
--- a/scripts/gentry.py
+++ b/scripts/gentry.py
@@ -25,7 +25,7 @@
self.name = "Alwyn H. Gentry Forest Transect Dataset"
self.shortname = "Gentry"
self.urls = {"stems": "http://www.mobot.org/mobot/gentry/123/all_Excel.zip",
- "sites": "http://www.ecologicaldata.org/sites/default/files/gentry_sites_data.txt",
+ "sites": "https://ndownloader.figshare.com/files/5515373",
"species": "",
"counts": ""}
self.tags = ["Taxon > Plants", "Spatial Scale > Global",
@@ -42,7 +42,7 @@
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
- self.engine.auto_create_table(Table("sites"), url=self.urls["sites"])
+ self.engine.auto_create_table(Table("sites"), url=self.urls["sites"], filename='gentry_sites.csv')
self.engine.insert_data_from_url(self.urls["sites"])
self.engine.download_file(self.urls["stems"], "all_Excel.zip")
| {"golden_diff": "diff --git a/scripts/gentry.py b/scripts/gentry.py\n--- a/scripts/gentry.py\n+++ b/scripts/gentry.py\n@@ -25,7 +25,7 @@\n self.name = \"Alwyn H. Gentry Forest Transect Dataset\"\n self.shortname = \"Gentry\"\n self.urls = {\"stems\": \"http://www.mobot.org/mobot/gentry/123/all_Excel.zip\",\n- \"sites\": \"http://www.ecologicaldata.org/sites/default/files/gentry_sites_data.txt\",\n+ \"sites\": \"https://ndownloader.figshare.com/files/5515373\",\n \"species\": \"\",\n \"counts\": \"\"}\n self.tags = [\"Taxon > Plants\", \"Spatial Scale > Global\",\n@@ -42,7 +42,7 @@\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n \n- self.engine.auto_create_table(Table(\"sites\"), url=self.urls[\"sites\"])\n+ self.engine.auto_create_table(Table(\"sites\"), url=self.urls[\"sites\"], filename='gentry_sites.csv')\n self.engine.insert_data_from_url(self.urls[\"sites\"])\n \n self.engine.download_file(self.urls[\"stems\"], \"all_Excel.zip\")\n", "issue": "urllib.request.urlretrieve fails to download Gentry data\n[Gentry site data](https://github.com/weecology/retriever/blob/master/scripts/gentry.py#L28) is failing and it is because of `urllib.request.urlretrieve` in [Engine](https://github.com/weecology/retriever/blob/master/lib/engine.py) `def download_file(self, url, filename):` \n\nI have attached a small script.\n\n```\nurl = 'http://www.ecologicaldata.org/sites/default/files/gentry_sites_data.txt'\n\nurllib.request.urlretrieve(url, \"fileout\")\n```\n\n`AttributeError: '_fileobject' object has no attribute 'readinto'`\n\nref: #502\n\n", "before_files": [{"content": "#retriever\n\"\"\"Retriever script for Alwyn H. Gentry Forest Transect Dataset\n\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import str\nfrom builtins import range\n\nimport os\nimport sys\nimport zipfile\nimport xlrd\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table\nfrom retriever.lib.excel import Excel\n\nVERSION = '1.0.1'\n\nTAX_GROUPS = 9756 #9819\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.name = \"Alwyn H. Gentry Forest Transect Dataset\"\n self.shortname = \"Gentry\"\n self.urls = {\"stems\": \"http://www.mobot.org/mobot/gentry/123/all_Excel.zip\",\n \"sites\": \"http://www.ecologicaldata.org/sites/default/files/gentry_sites_data.txt\",\n \"species\": \"\",\n \"counts\": \"\"}\n self.tags = [\"Taxon > Plants\", \"Spatial Scale > Global\",\n \"Data Type > Observational\"]\n self.ref = \"http://www.mobot.org/mobot/research/gentry/welcome.shtml\"\n self.addendum = \"\"\"Researchers who make use of the data in publications are requested to acknowledge Alwyn H. Gentry, the Missouri Botanical Garden, and collectors who assisted Gentry or contributed data for specific sites. It is also requested that a reprint of any publication making use of the Gentry Forest Transect Data be sent to:\n\nBruce E. Ponman\nMissouri Botanical Garden\nP.O. Box 299\nSt. Louis, MO 63166-0299\nU.S.A. \"\"\"\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n\n self.engine.auto_create_table(Table(\"sites\"), url=self.urls[\"sites\"])\n self.engine.insert_data_from_url(self.urls[\"sites\"])\n\n self.engine.download_file(self.urls[\"stems\"], \"all_Excel.zip\")\n local_zip = zipfile.ZipFile(self.engine.format_filename(\"all_Excel.zip\"))\n filelist = local_zip.namelist()\n local_zip.close()\n self.engine.download_files_from_archive(self.urls[\"stems\"], filelist)\n\n filelist = [os.path.basename(filename) for filename in filelist]\n\n # Currently all_Excel.zip is missing CURUYUQU.xls\n # Download it separately and add it to the file list\n if not self.engine.find_file('CURUYUQU.xls'):\n self.engine.download_file(\"http://www.mobot.org/mobot/gentry/123/samerica/CURUYUQU.xls\", \"CURUYUQU.xls\")\n filelist.append('CURUYUQU.xls')\n\n lines = []\n tax = []\n for filename in filelist:\n print(\"Extracting data from \" + filename + \"...\")\n book = xlrd.open_workbook(self.engine.format_filename(filename))\n sh = book.sheet_by_index(0)\n rows = sh.nrows\n cn = {'stems': []}\n n = 0\n for colnum, c in enumerate(sh.row(0)):\n if not Excel.empty_cell(c):\n cid = Excel.cell_value(c).lower()\n # line number column is sometimes named differently\n if cid in [\"sub\", \"number\"]:\n cid = \"line\"\n # the \"number of individuals\" column is named in various\n # different ways; they always at least contain \"nd\"\n if \"nd\" in cid:\n cid = \"count\"\n # in QUIAPACA.xls the \"number of individuals\" column is\n # misnamed \"STEMDBH\" just like the stems columns, so weep\n # for the state of scientific data and then fix manually\n if filename == \"QUIAPACA.xls\" and colnum == 13:\n cid = \"count\"\n\n # if column is a stem, add it to the list of stems;\n # otherwise, make note of the column name/number\n if \"stem\" in cid or \"dbh\" in cid:\n cn[\"stems\"].append(n)\n else:\n cn[cid] = n\n n += 1\n # sometimes, a data file does not contain a liana or count column\n if not \"liana\" in list(cn.keys()):\n cn[\"liana\"] = -1\n if not \"count\" in list(cn.keys()):\n cn[\"count\"] = -1\n for i in range(1, rows):\n row = sh.row(i)\n cellcount = len(row)\n # make sure the row is real, not just empty cells\n if not all(Excel.empty_cell(cell) for cell in row):\n try:\n this_line = {}\n\n def format_value(s):\n s = Excel.cell_value(s)\n return str(s).title().replace(\"\\\\\", \"/\").replace('\"', '')\n\n # get the following information from the appropriate columns\n for i in [\"line\", \"family\", \"genus\", \"species\",\n \"liana\", \"count\"]:\n if cn[i] > -1:\n this_line[i] = format_value(row[cn[i]])\n if this_line[i] == '`':\n this_line[i] = 1\n\n this_line[\"stems\"] = [Excel.cell_value(row[c])\n for c in cn[\"stems\"]\n if not Excel.empty_cell(row[c])]\n this_line[\"site\"] = filename[0:-4]\n\n lines.append(this_line)\n\n # Check how far the species is identified\n full_id = 0\n if len(this_line[\"species\"]) < 3:\n if len(this_line[\"genus\"]) < 3:\n id_level = \"family\"\n else:\n id_level = \"genus\"\n else:\n id_level = \"species\"\n full_id = 1\n tax.append((this_line[\"family\"],\n this_line[\"genus\"],\n this_line[\"species\"].lower().replace('\\\\', '').replace('\"', ''),\n id_level,\n str(full_id)))\n except:\n raise\n pass\n\n tax = sorted(tax, key=lambda group: group[0] + \" \" + group[1] + \" \" + group[2])\n unique_tax = []\n tax_dict = dict()\n tax_count = 0\n\n # Get all unique families/genera/species\n for group in tax:\n if not (group in unique_tax):\n unique_tax.append(group)\n tax_count += 1\n tax_dict[group[0:3]] = tax_count\n if tax_count % 10 == 0:\n msg = \"Generating taxonomic groups: \" + str(tax_count) + \" / \" + str(TAX_GROUPS)\n sys.stdout.write(msg + \"\\b\" * len(msg))\n print(\"Generating taxonomic groups: \" + str(TAX_GROUPS) + \" / \" + str(TAX_GROUPS))\n\n\n # Create species table\n table = Table(\"species\", delimiter=\",\")\n table.columns=[(\"species_id\" , (\"pk-int\",) ),\n (\"family\" , (\"char\", ) ),\n (\"genus\" , (\"char\", ) ),\n (\"species\" , (\"char\", ) ),\n (\"id_level\" , (\"char\", 10) ),\n (\"full_id\" , (\"bool\",) )]\n\n data = [','.join([str(tax_dict[group[:3]])] + ['\"%s\"' % g for g in group])\n for group in unique_tax]\n table.pk = 'species_id'\n table.contains_pk = True\n\n self.engine.table = table\n self.engine.create_table()\n self.engine.add_to_table(data)\n\n\n # Create stems table\n table = Table(\"stems\", delimiter=\",\", contains_pk=False)\n table.columns=[(\"stem_id\" , (\"pk-auto\",) ),\n (\"line\" , (\"int\",) ),\n (\"species_id\" , (\"int\",) ),\n (\"site_code\" , (\"char\", 12) ),\n (\"liana\" , (\"char\", 10) ),\n (\"stem\" , (\"double\",) )]\n stems = []\n counts = []\n for line in lines:\n try:\n liana = line[\"liana\"]\n except KeyError:\n liana = \"\"\n species_info = [line[\"line\"],\n tax_dict[(line[\"family\"],\n line[\"genus\"],\n line[\"species\"].lower())],\n line[\"site\"],\n liana\n ]\n try:\n counts.append([str(value) for value in species_info + [line[\"count\"]]])\n except KeyError:\n pass\n\n for i in line[\"stems\"]:\n stem = species_info + [i]\n stems.append([str(value) for value in stem])\n\n data = [','.join(stem) for stem in stems]\n self.engine.table = table\n self.engine.create_table()\n self.engine.add_to_table(data)\n\n\n # Create counts table\n table = Table(\"counts\", delimiter=\",\", contains_pk=False)\n table.columns=[(\"count_id\" , (\"pk-auto\",) ),\n (\"line\" , (\"int\",) ),\n (\"species_id\" , (\"int\",) ),\n (\"site_code\" , (\"char\", 12) ),\n (\"liana\" , (\"char\", 10) ),\n (\"count\" , (\"double\",) )]\n data = [','.join(count) for count in counts]\n self.engine.table = table\n self.engine.create_table()\n self.engine.add_to_table(data)\n\n return self.engine\n\n\nSCRIPT = main()\n", "path": "scripts/gentry.py"}]} | 3,411 | 269 |
gh_patches_debug_16731 | rasdani/github-patches | git_diff | Mailu__Mailu-2468 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SSL fails for all domains if a single domain fails LetsEncrypt challenge
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html).
- [X] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple.
-
## Environment & Versions
### Environment
- [X] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
```
1.9
```
## Description
I used [mailu setup utility](https://setup.mailu.io/1.9/) to create a docker-compose.yml with multiple email domains. Turns out not all domains were already pointing to the server IP, so some challenges failed. This leads to nginx closing port 443. So even the main domain is not reachable via SSL.
After removing the non-working domains the cert is created successfully and SSL is working.
## Replication Steps
Create a new mailu setup, add multiple domains of which some are not pointing to the server.
## Expected behaviour
There should be a certificate for the domains that are reachable and nginx should make those accessible with SSL on port 443.
## Logs
```
2022-10-05T19:47:24.203180336Z Domain: email.example.com
2022-10-05T19:47:24.203182530Z Type: dns
2022-10-05T19:47:24.203184754Z Detail: no valid A records found for email.example.com; no valid AAAA records found for email.example.com
2022-10-05T19:47:24.203187149Z
2022-10-05T19:47:24.203189393Z Hint: The Certificate Authority couldn't exterally verify that the standalone plugin completed the required http-01 challenges. Ensure the plugin is configured correctly and that the changes it makes are accessible from the internet.
2022-10-05T19:47:24.203192008Z
2022-10-05T19:47:24.702017069Z 2022/10/05 21:47:24 [notice] 1#1: signal 1 (SIGHUP) received from 22, reconfiguring
2022-10-05T19:47:24.702118810Z 2022/10/05 21:47:24 [notice] 1#1: reconfiguring
2022-10-05T19:47:24.705542967Z 2022/10/05 21:47:24 [warn] 1#1: conflicting server name "" on 0.0.0.0:80, ignored
2022-10-05T19:47:24.705911789Z 2022/10/05 21:47:24 [notice] 1#1: using the "epoll" event method
2022-10-05T19:47:24.706081756Z 2022/10/05 21:47:24 [notice] 1#1: start worker processes
2022-10-05T19:47:24.706331032Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 23
2022-10-05T19:47:24.706639951Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 24
2022-10-05T19:47:24.706852248Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 25
2022-10-05T19:47:24.730032307Z Hook 'post-hook' ran with output:
2022-10-05T19:47:24.730052144Z Missing cert or key file, disabling TLS
2022-10-05T19:47:24.730291842Z Hook 'post-hook' ran with error output:
2022-10-05T19:47:24.730302613Z nginx: [warn] conflicting server name "" on 0.0.0.0:80, ignored
2022-10-05T19:47:24.732101009Z Some challenges have failed.
2022-10-05T19:47:24.732342892Z Ask for help or search for solutions at https://community.letsencrypt.org. See the logfile /var/log/letsencrypt/letsencrypt.log or re-run Certbot with -v for more details.
```
</issue>
<code>
[start of core/nginx/letsencrypt.py]
1 #!/usr/bin/python3
2
3 import os
4 import time
5 import subprocess
6
7 hostnames = ','.join(set(host.strip() for host in os.environ['HOSTNAMES'].split(',')))
8
9 command = [
10 "certbot",
11 "-n", "--agree-tos", # non-interactive
12 "-d", hostnames, "--expand", "--allow-subset-of-names",
13 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]),
14 "certonly", "--standalone",
15 "--cert-name", "mailu",
16 "--preferred-challenges", "http", "--http-01-port", "8008",
17 "--keep-until-expiring",
18 "--renew-with-new-domains",
19 "--config-dir", "/certs/letsencrypt",
20 "--post-hook", "/config.py"
21 ]
22 command2 = [
23 "certbot",
24 "-n", "--agree-tos", # non-interactive
25 "-d", hostnames, "--expand", "--allow-subset-of-names",
26 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]),
27 "certonly", "--standalone",
28 "--cert-name", "mailu-ecdsa",
29 "--preferred-challenges", "http", "--http-01-port", "8008",
30 "--keep-until-expiring",
31 "--key-type", "ecdsa",
32 "--renew-with-new-domains",
33 "--config-dir", "/certs/letsencrypt",
34 "--post-hook", "/config.py"
35 ]
36
37 # Wait for nginx to start
38 time.sleep(5)
39
40 # Run certbot every day
41 while True:
42 subprocess.call(command)
43 subprocess.call(command2)
44 time.sleep(86400)
45
[end of core/nginx/letsencrypt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -15,6 +15,7 @@
"--cert-name", "mailu",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
+ "--allow-subset-of-names",
"--renew-with-new-domains",
"--config-dir", "/certs/letsencrypt",
"--post-hook", "/config.py"
@@ -28,6 +29,7 @@
"--cert-name", "mailu-ecdsa",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
+ "--allow-subset-of-names",
"--key-type", "ecdsa",
"--renew-with-new-domains",
"--config-dir", "/certs/letsencrypt",
| {"golden_diff": "diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py\n--- a/core/nginx/letsencrypt.py\n+++ b/core/nginx/letsencrypt.py\n@@ -15,6 +15,7 @@\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n+ \"--allow-subset-of-names\",\n \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n@@ -28,6 +29,7 @@\n \"--cert-name\", \"mailu-ecdsa\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n+ \"--allow-subset-of-names\",\n \"--key-type\", \"ecdsa\",\n \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n", "issue": "SSL fails for all domains if a single domain fails LetsEncrypt challenge\n## Before you open your issue\r\n- [X] Check if no issue or pull-request for this already exists.\r\n- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). \r\n- [X] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [X] The title of the issue should be short and simple.\r\n- \r\n## Environment & Versions\r\n### Environment\r\n - [X] docker compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\n```\r\n1.9\r\n```\r\n\r\n## Description\r\nI used [mailu setup utility](https://setup.mailu.io/1.9/) to create a docker-compose.yml with multiple email domains. Turns out not all domains were already pointing to the server IP, so some challenges failed. This leads to nginx closing port 443. So even the main domain is not reachable via SSL.\r\n\r\nAfter removing the non-working domains the cert is created successfully and SSL is working.\r\n\r\n## Replication Steps\r\nCreate a new mailu setup, add multiple domains of which some are not pointing to the server.\r\n\r\n## Expected behaviour\r\nThere should be a certificate for the domains that are reachable and nginx should make those accessible with SSL on port 443.\r\n\r\n## Logs\r\n```\r\n2022-10-05T19:47:24.203180336Z Domain: email.example.com\r\n2022-10-05T19:47:24.203182530Z Type: dns\r\n2022-10-05T19:47:24.203184754Z Detail: no valid A records found for email.example.com; no valid AAAA records found for email.example.com\r\n2022-10-05T19:47:24.203187149Z\r\n2022-10-05T19:47:24.203189393Z Hint: The Certificate Authority couldn't exterally verify that the standalone plugin completed the required http-01 challenges. Ensure the plugin is configured correctly and that the changes it makes are accessible from the internet.\r\n2022-10-05T19:47:24.203192008Z\r\n2022-10-05T19:47:24.702017069Z 2022/10/05 21:47:24 [notice] 1#1: signal 1 (SIGHUP) received from 22, reconfiguring\r\n2022-10-05T19:47:24.702118810Z 2022/10/05 21:47:24 [notice] 1#1: reconfiguring\r\n2022-10-05T19:47:24.705542967Z 2022/10/05 21:47:24 [warn] 1#1: conflicting server name \"\" on 0.0.0.0:80, ignored\r\n2022-10-05T19:47:24.705911789Z 2022/10/05 21:47:24 [notice] 1#1: using the \"epoll\" event method\r\n2022-10-05T19:47:24.706081756Z 2022/10/05 21:47:24 [notice] 1#1: start worker processes\r\n2022-10-05T19:47:24.706331032Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 23\r\n2022-10-05T19:47:24.706639951Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 24\r\n2022-10-05T19:47:24.706852248Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 25\r\n2022-10-05T19:47:24.730032307Z Hook 'post-hook' ran with output:\r\n2022-10-05T19:47:24.730052144Z Missing cert or key file, disabling TLS\r\n2022-10-05T19:47:24.730291842Z Hook 'post-hook' ran with error output:\r\n2022-10-05T19:47:24.730302613Z nginx: [warn] conflicting server name \"\" on 0.0.0.0:80, ignored\r\n2022-10-05T19:47:24.732101009Z Some challenges have failed.\r\n2022-10-05T19:47:24.732342892Z Ask for help or search for solutions at https://community.letsencrypt.org. See the logfile /var/log/letsencrypt/letsencrypt.log or re-run Certbot with -v for more details.\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\nhostnames = ','.join(set(host.strip() for host in os.environ['HOSTNAMES'].split(',')))\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", hostnames, \"--expand\", \"--allow-subset-of-names\",\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\ncommand2 = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", hostnames, \"--expand\", \"--allow-subset-of-names\",\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu-ecdsa\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--key-type\", \"ecdsa\",\n \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every day\nwhile True:\n subprocess.call(command)\n subprocess.call(command2)\n time.sleep(86400)\n", "path": "core/nginx/letsencrypt.py"}]} | 2,377 | 219 |
gh_patches_debug_8126 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The status of the worker pod should be Error when there is any error happened.
The status of the worker pod is still Running and will never stop even if there are some errors in function `handle_task` like:
```bash
Traceback (most recent call last):
File "/elasticdl/python/elasticdl/worker/worker.py", line 236, in run
self._handle_task(task)
File "/elasticdl/python/elasticdl/worker/worker.py", line 190, in _handle_task
task, record_buf, min_model_version
File "/elasticdl/python/elasticdl/worker/worker.py", line 211, in _process_minibatch
features, labels
File "/elasticdl/python/elasticdl/worker/worker.py", line 168, in _run_training_task
accepted, min_model_version = self.report_gradient(grads)
File "/elasticdl/python/elasticdl/worker/worker.py", line 115, in report_gradient
res = self._stub.ReportGradient(req)
File "/usr/local/lib/python3.6/dist-packages/grpc/_channel.py", line 565, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/usr/local/lib/python3.6/dist-packages/grpc/_channel.py", line 467, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
</issue>
<code>
[start of elasticdl/python/elasticdl/worker/worker.py]
1 import logging
2 import traceback
3
4 import tensorflow as tf
5
6 assert tf.executing_eagerly() # noqa
7
8 import recordio
9
10 from contextlib import closing
11 from elasticdl.proto import elasticdl_pb2_grpc
12 from elasticdl.proto import elasticdl_pb2
13 from elasticdl.python.elasticdl.common.ndarray import (
14 ndarray_to_tensor,
15 tensor_to_ndarray,
16 )
17 from elasticdl.python.elasticdl.common.model_helper import load_module
18
19 # The default maximum number of a minibatch retry as its results
20 # (e.g. gradients) are not accepted by master.
21 DEFAULT_MAX_MINIBATCH_RETRY_NUM = 64
22
23
24 class Worker(object):
25 """ElasticDL worker"""
26
27 def __init__(
28 self,
29 worker_id,
30 model_file,
31 channel=None,
32 max_minibatch_retry_num=DEFAULT_MAX_MINIBATCH_RETRY_NUM,
33 codec_file=None,
34 ):
35 """
36 Arguments:
37 model_file: A module to define the model
38 channel: grpc channel
39 max_minibatch_retry_num: The maximum number of a minibatch retry
40 as its results (e.g. gradients) are not accepted by master.
41 """
42 self._logger = logging.getLogger(__name__)
43 self._worker_id = worker_id
44 model_module = load_module(model_file)
45 self._model = model_module.model
46 self._feature_columns = model_module.feature_columns()
47 self._var_created = self._model.built
48 self._input_fn = model_module.input_fn
49 self._opt_fn = model_module.optimizer
50 self._loss = model_module.loss
51 self._eval_metrics_fn = model_module.eval_metrics_fn
52 all_columns = self._feature_columns + model_module.label_columns()
53
54 # Initilize codec
55 codec_module = load_module(codec_file)
56 codec_module.codec.init(all_columns)
57 self._codec = codec_module.codec
58
59 if channel is None:
60 self._stub = None
61 else:
62 self._stub = elasticdl_pb2_grpc.MasterStub(channel)
63 self._max_minibatch_retry_num = max_minibatch_retry_num
64 self._model_version = -1
65
66 def get_task(self):
67 """
68 get task from master
69 """
70 req = elasticdl_pb2.GetTaskRequest()
71 req.worker_id = self._worker_id
72
73 return self._stub.GetTask(req)
74
75 def get_model(self, version, method):
76 """
77 get model from master, and update model_version
78 """
79 req = elasticdl_pb2.GetModelRequest()
80 req.version = version
81 req.method = method
82 model = self._stub.GetModel(req)
83
84 for var in self._model.trainable_variables:
85 # Assumes all trainable variables exist in model.param.
86 var.assign(tensor_to_ndarray(model.param[var.name]))
87 self._model_version = model.version
88
89 def report_task_result(self, task_id, err_msg):
90 """
91 report task result to master
92 """
93 report = elasticdl_pb2.ReportTaskResultRequest()
94 report.task_id = task_id
95 report.err_message = err_msg
96 return self._stub.ReportTaskResult(report)
97
98 def report_variable(self):
99 """
100 report variable to ps.
101 """
102 req = elasticdl_pb2.ReportVariableRequest()
103 for v in self._model.trainable_variables:
104 req.variable[v.name].CopyFrom(ndarray_to_tensor(v.numpy()))
105 self._stub.ReportVariable(req)
106
107 def report_gradient(self, grads):
108 """
109 report gradient to ps, return (accepted, model_version) from rpc call.
110 """
111 req = elasticdl_pb2.ReportGradientRequest()
112 for g, v in zip(grads, self._model.trainable_variables):
113 req.gradient[v.name].CopyFrom(ndarray_to_tensor(g.numpy()))
114 req.model_version = self._model_version
115 res = self._stub.ReportGradient(req)
116 return res.accepted, res.model_version
117
118 def report_evaluation_metrics(self, evaluation_metrics):
119 """
120 report evaluation metrics to ps, return (accepted, model_version)
121 from rpc call.
122 """
123 req = elasticdl_pb2.ReportEvaluationMetricsRequest()
124 for k, v in evaluation_metrics.items():
125 v_np = v.numpy()
126 # If scalar, convert to numpy 1D array with size 1
127 if not v_np.shape:
128 v_np = v_np.reshape(1)
129 req.evaluation_metrics[k].CopyFrom(ndarray_to_tensor(v_np))
130 req.model_version = self._model_version
131 res = self._stub.ReportEvaluationMetrics(req)
132 return res.accepted, res.model_version
133
134 @staticmethod
135 def _get_batch(reader, batch_size, decode):
136 res = []
137 for i in range(batch_size):
138 record = reader.record()
139 if record is None:
140 break
141 res.append(decode(record))
142 return res
143
144 def _get_features_and_labels(self, record_buf):
145 batch_input_data, batch_labels = self._input_fn(record_buf)
146 features = [
147 batch_input_data[f_col.key] for f_col in self._feature_columns
148 ]
149 if len(features) == 1:
150 features = features[0]
151 return features, batch_labels
152
153 def _create_variable_and_report(self, features):
154 # Use model.call to create variables, then report to ps
155 _ = self._model.call(features)
156 self.report_variable()
157 self._var_created = True
158
159 def _run_training_task(self, features, labels):
160 with tf.GradientTape() as tape:
161 outputs = self._model.call(features, training=True)
162 loss = self._loss(outputs, labels)
163
164 # TODO: Add regularization loss if any,
165 # which should be divided by the
166 # number of contributing workers.
167 grads = tape.gradient(loss, self._model.trainable_variables)
168 accepted, min_model_version = self.report_gradient(grads)
169 return accepted, min_model_version, loss
170
171 def _run_evaluation_task(self, features, labels):
172 outputs = self._model.call(features, training=False)
173 evaluation_metrics = self._eval_metrics_fn(outputs, labels)
174 return self.report_evaluation_metrics(evaluation_metrics)
175
176 def _handle_task(self, task):
177 min_model_version = task.model_version
178 with closing(
179 recordio.Scanner(
180 task.shard_file_name, task.start, task.end - task.start
181 )
182 ) as reader:
183 while True:
184 record_buf = self._get_batch(
185 reader, task.minibatch_size, self._codec.decode
186 )
187 if not record_buf:
188 break
189 min_model_version = self._process_minibatch(
190 task, record_buf, min_model_version
191 )
192
193 def _process_minibatch(self, task, record_buf, min_model_version):
194 features, labels = self._get_features_and_labels(record_buf)
195 if not self._var_created:
196 self._create_variable_and_report(features)
197 for _ in range(self._max_minibatch_retry_num):
198 if task.type == elasticdl_pb2.EVALUATION:
199 self.get_model(min_model_version, elasticdl_pb2.FIXED)
200 accepted, _ = self._run_evaluation_task(features, labels)
201 if accepted:
202 break
203 elif task.type == elasticdl_pb2.TRAINING:
204 # TODO: optimize the logic to avoid unnecessary
205 # get_model call.
206 self.get_model(
207 max(self._model_version, min_model_version),
208 elasticdl_pb2.MINIMUM,
209 )
210 accepted, min_model_version, loss = self._run_training_task(
211 features, labels
212 )
213 if accepted:
214 self._logger.info("Loss is %f" % loss.numpy())
215 break
216 else:
217 raise RuntimeError("Unrecognized task type, %s" % task.type)
218 else:
219 # Worker got stuck, fail the task.
220 # TODO: stop the worker if it fails to make any
221 # progress for some time.
222 raise RuntimeError("Worker got stuck")
223 return min_model_version
224
225 def run(self):
226 """
227 Fetches task from master and performs training or evaluation.
228 """
229 while True:
230 task = self.get_task()
231 if not task.shard_file_name:
232 # No more task
233 break
234 err_msg = ""
235 try:
236 self._handle_task(task)
237 except Exception as ex:
238 err_msg = str(ex)
239 traceback.print_exc()
240 self.report_task_result(task.task_id, err_msg)
241
[end of elasticdl/python/elasticdl/worker/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/elasticdl/worker/worker.py b/elasticdl/python/elasticdl/worker/worker.py
--- a/elasticdl/python/elasticdl/worker/worker.py
+++ b/elasticdl/python/elasticdl/worker/worker.py
@@ -234,7 +234,11 @@
err_msg = ""
try:
self._handle_task(task)
+ except RuntimeError as err:
+ err_msg = str(err)
+ traceback.print_exc()
except Exception as ex:
err_msg = str(ex)
traceback.print_exc()
+ raise ex
self.report_task_result(task.task_id, err_msg)
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/worker/worker.py b/elasticdl/python/elasticdl/worker/worker.py\n--- a/elasticdl/python/elasticdl/worker/worker.py\n+++ b/elasticdl/python/elasticdl/worker/worker.py\n@@ -234,7 +234,11 @@\n err_msg = \"\"\n try:\n self._handle_task(task)\n+ except RuntimeError as err:\n+ err_msg = str(err)\n+ traceback.print_exc()\n except Exception as ex:\n err_msg = str(ex)\n traceback.print_exc()\n+ raise ex\n self.report_task_result(task.task_id, err_msg)\n", "issue": "The status of the worker pod should be Error when there is any error happened.\nThe status of the worker pod is still Running and will never stop even if there are some errors in function `handle_task` like:\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/elasticdl/python/elasticdl/worker/worker.py\", line 236, in run\r\n self._handle_task(task)\r\n File \"/elasticdl/python/elasticdl/worker/worker.py\", line 190, in _handle_task\r\n task, record_buf, min_model_version\r\n File \"/elasticdl/python/elasticdl/worker/worker.py\", line 211, in _process_minibatch\r\n features, labels\r\n File \"/elasticdl/python/elasticdl/worker/worker.py\", line 168, in _run_training_task\r\n accepted, min_model_version = self.report_gradient(grads)\r\n File \"/elasticdl/python/elasticdl/worker/worker.py\", line 115, in report_gradient\r\n res = self._stub.ReportGradient(req)\r\n File \"/usr/local/lib/python3.6/dist-packages/grpc/_channel.py\", line 565, in __call__\r\n return _end_unary_response_blocking(state, call, False, None)\r\n File \"/usr/local/lib/python3.6/dist-packages/grpc/_channel.py\", line 467, in _end_unary_response_blocking\r\n raise _Rendezvous(state, None, None, deadline)\n", "before_files": [{"content": "import logging\nimport traceback\n\nimport tensorflow as tf\n\nassert tf.executing_eagerly() # noqa\n\nimport recordio\n\nfrom contextlib import closing\nfrom elasticdl.proto import elasticdl_pb2_grpc\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.elasticdl.common.ndarray import (\n ndarray_to_tensor,\n tensor_to_ndarray,\n)\nfrom elasticdl.python.elasticdl.common.model_helper import load_module\n\n# The default maximum number of a minibatch retry as its results\n# (e.g. gradients) are not accepted by master.\nDEFAULT_MAX_MINIBATCH_RETRY_NUM = 64\n\n\nclass Worker(object):\n \"\"\"ElasticDL worker\"\"\"\n\n def __init__(\n self,\n worker_id,\n model_file,\n channel=None,\n max_minibatch_retry_num=DEFAULT_MAX_MINIBATCH_RETRY_NUM,\n codec_file=None,\n ):\n \"\"\"\n Arguments:\n model_file: A module to define the model\n channel: grpc channel\n max_minibatch_retry_num: The maximum number of a minibatch retry\n as its results (e.g. gradients) are not accepted by master.\n \"\"\"\n self._logger = logging.getLogger(__name__)\n self._worker_id = worker_id\n model_module = load_module(model_file)\n self._model = model_module.model\n self._feature_columns = model_module.feature_columns()\n self._var_created = self._model.built\n self._input_fn = model_module.input_fn\n self._opt_fn = model_module.optimizer\n self._loss = model_module.loss\n self._eval_metrics_fn = model_module.eval_metrics_fn\n all_columns = self._feature_columns + model_module.label_columns()\n\n # Initilize codec\n codec_module = load_module(codec_file)\n codec_module.codec.init(all_columns)\n self._codec = codec_module.codec\n\n if channel is None:\n self._stub = None\n else:\n self._stub = elasticdl_pb2_grpc.MasterStub(channel)\n self._max_minibatch_retry_num = max_minibatch_retry_num\n self._model_version = -1\n\n def get_task(self):\n \"\"\"\n get task from master\n \"\"\"\n req = elasticdl_pb2.GetTaskRequest()\n req.worker_id = self._worker_id\n\n return self._stub.GetTask(req)\n\n def get_model(self, version, method):\n \"\"\"\n get model from master, and update model_version\n \"\"\"\n req = elasticdl_pb2.GetModelRequest()\n req.version = version\n req.method = method\n model = self._stub.GetModel(req)\n\n for var in self._model.trainable_variables:\n # Assumes all trainable variables exist in model.param.\n var.assign(tensor_to_ndarray(model.param[var.name]))\n self._model_version = model.version\n\n def report_task_result(self, task_id, err_msg):\n \"\"\"\n report task result to master\n \"\"\"\n report = elasticdl_pb2.ReportTaskResultRequest()\n report.task_id = task_id\n report.err_message = err_msg\n return self._stub.ReportTaskResult(report)\n\n def report_variable(self):\n \"\"\"\n report variable to ps.\n \"\"\"\n req = elasticdl_pb2.ReportVariableRequest()\n for v in self._model.trainable_variables:\n req.variable[v.name].CopyFrom(ndarray_to_tensor(v.numpy()))\n self._stub.ReportVariable(req)\n\n def report_gradient(self, grads):\n \"\"\"\n report gradient to ps, return (accepted, model_version) from rpc call.\n \"\"\"\n req = elasticdl_pb2.ReportGradientRequest()\n for g, v in zip(grads, self._model.trainable_variables):\n req.gradient[v.name].CopyFrom(ndarray_to_tensor(g.numpy()))\n req.model_version = self._model_version\n res = self._stub.ReportGradient(req)\n return res.accepted, res.model_version\n\n def report_evaluation_metrics(self, evaluation_metrics):\n \"\"\"\n report evaluation metrics to ps, return (accepted, model_version)\n from rpc call.\n \"\"\"\n req = elasticdl_pb2.ReportEvaluationMetricsRequest()\n for k, v in evaluation_metrics.items():\n v_np = v.numpy()\n # If scalar, convert to numpy 1D array with size 1\n if not v_np.shape:\n v_np = v_np.reshape(1)\n req.evaluation_metrics[k].CopyFrom(ndarray_to_tensor(v_np))\n req.model_version = self._model_version\n res = self._stub.ReportEvaluationMetrics(req)\n return res.accepted, res.model_version\n\n @staticmethod\n def _get_batch(reader, batch_size, decode):\n res = []\n for i in range(batch_size):\n record = reader.record()\n if record is None:\n break\n res.append(decode(record))\n return res\n\n def _get_features_and_labels(self, record_buf):\n batch_input_data, batch_labels = self._input_fn(record_buf)\n features = [\n batch_input_data[f_col.key] for f_col in self._feature_columns\n ]\n if len(features) == 1:\n features = features[0]\n return features, batch_labels\n\n def _create_variable_and_report(self, features):\n # Use model.call to create variables, then report to ps\n _ = self._model.call(features)\n self.report_variable()\n self._var_created = True\n\n def _run_training_task(self, features, labels):\n with tf.GradientTape() as tape:\n outputs = self._model.call(features, training=True)\n loss = self._loss(outputs, labels)\n\n # TODO: Add regularization loss if any,\n # which should be divided by the\n # number of contributing workers.\n grads = tape.gradient(loss, self._model.trainable_variables)\n accepted, min_model_version = self.report_gradient(grads)\n return accepted, min_model_version, loss\n\n def _run_evaluation_task(self, features, labels):\n outputs = self._model.call(features, training=False)\n evaluation_metrics = self._eval_metrics_fn(outputs, labels)\n return self.report_evaluation_metrics(evaluation_metrics)\n\n def _handle_task(self, task):\n min_model_version = task.model_version\n with closing(\n recordio.Scanner(\n task.shard_file_name, task.start, task.end - task.start\n )\n ) as reader:\n while True:\n record_buf = self._get_batch(\n reader, task.minibatch_size, self._codec.decode\n )\n if not record_buf:\n break\n min_model_version = self._process_minibatch(\n task, record_buf, min_model_version\n )\n\n def _process_minibatch(self, task, record_buf, min_model_version):\n features, labels = self._get_features_and_labels(record_buf)\n if not self._var_created:\n self._create_variable_and_report(features)\n for _ in range(self._max_minibatch_retry_num):\n if task.type == elasticdl_pb2.EVALUATION:\n self.get_model(min_model_version, elasticdl_pb2.FIXED)\n accepted, _ = self._run_evaluation_task(features, labels)\n if accepted:\n break\n elif task.type == elasticdl_pb2.TRAINING:\n # TODO: optimize the logic to avoid unnecessary\n # get_model call.\n self.get_model(\n max(self._model_version, min_model_version),\n elasticdl_pb2.MINIMUM,\n )\n accepted, min_model_version, loss = self._run_training_task(\n features, labels\n )\n if accepted:\n self._logger.info(\"Loss is %f\" % loss.numpy())\n break\n else:\n raise RuntimeError(\"Unrecognized task type, %s\" % task.type)\n else:\n # Worker got stuck, fail the task.\n # TODO: stop the worker if it fails to make any\n # progress for some time.\n raise RuntimeError(\"Worker got stuck\")\n return min_model_version\n\n def run(self):\n \"\"\"\n Fetches task from master and performs training or evaluation.\n \"\"\"\n while True:\n task = self.get_task()\n if not task.shard_file_name:\n # No more task\n break\n err_msg = \"\"\n try:\n self._handle_task(task)\n except Exception as ex:\n err_msg = str(ex)\n traceback.print_exc()\n self.report_task_result(task.task_id, err_msg)\n", "path": "elasticdl/python/elasticdl/worker/worker.py"}]} | 3,291 | 146 |
gh_patches_debug_14215 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-1734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors when `completion show` setting is false
Pressing up, down, or tab when there are completions available but the `completion show` setting is `false` crashes qutebrowser. The relevant error:
```
...
File "/home/marshall/Github/qutebrowser/qutebrowser/completion/completionwidget.py", line 164, in _next_idx
idx = self.selectionModel().currentIndex()
AttributeError: 'NoneType' object has no attribute 'currentIndex'
```
This appears after the recent completion fixes from @rcorre.
Generally, the completion `show` and `auto-open` options don't seem to make sense. Before the change, `show` would do what I expected `auto-open` to do (turning it off hides the completion window until the user asks for it with up, down, or tab). `auto-open` doesn't do anything (before or after the change), as far as I can tell.
</issue>
<code>
[start of qutebrowser/completion/completionwidget.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completion view for statusbar command section.
21
22 Defines a CompletionView which uses CompletionFiterModel and CompletionModel
23 subclasses to provide completions.
24 """
25
26 from PyQt5.QtWidgets import QStyle, QTreeView, QSizePolicy
27 from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QItemSelectionModel,
28 QItemSelection)
29
30 from qutebrowser.config import config, style
31 from qutebrowser.completion import completiondelegate
32 from qutebrowser.completion.models import base
33 from qutebrowser.utils import qtutils, objreg, utils, usertypes
34 from qutebrowser.commands import cmdexc, cmdutils
35
36
37 class CompletionView(QTreeView):
38
39 """The view showing available completions.
40
41 Based on QTreeView but heavily customized so root elements show as category
42 headers, and children show as flat list.
43
44 Attributes:
45 enabled: Whether showing the CompletionView is enabled.
46 _win_id: The ID of the window this CompletionView is associated with.
47 _height: The height to use for the CompletionView.
48 _height_perc: Either None or a percentage if height should be relative.
49 _delegate: The item delegate used.
50 _column_widths: A list of column widths, in percent.
51
52 Signals:
53 resize_completion: Emitted when the completion should be resized.
54 selection_changed: Emitted when the completion item selection changes.
55 """
56
57 # Drawing the item foreground will be done by CompletionItemDelegate, so we
58 # don't define that in this stylesheet.
59 STYLESHEET = """
60 QTreeView {
61 font: {{ font['completion'] }};
62 background-color: {{ color['completion.bg'] }};
63 alternate-background-color: {{ color['completion.alternate-bg'] }};
64 outline: 0;
65 border: 0px;
66 }
67
68 QTreeView::item:disabled {
69 background-color: {{ color['completion.category.bg'] }};
70 border-top: 1px solid
71 {{ color['completion.category.border.top'] }};
72 border-bottom: 1px solid
73 {{ color['completion.category.border.bottom'] }};
74 }
75
76 QTreeView::item:selected, QTreeView::item:selected:hover {
77 border-top: 1px solid
78 {{ color['completion.item.selected.border.top'] }};
79 border-bottom: 1px solid
80 {{ color['completion.item.selected.border.bottom'] }};
81 background-color: {{ color['completion.item.selected.bg'] }};
82 }
83
84 QTreeView:item::hover {
85 border: 0px;
86 }
87
88 QTreeView QScrollBar {
89 width: {{ config.get('completion', 'scrollbar-width') }}px;
90 background: {{ color['completion.scrollbar.bg'] }};
91 }
92
93 QTreeView QScrollBar::handle {
94 background: {{ color['completion.scrollbar.fg'] }};
95 border: {{ config.get('completion', 'scrollbar-padding') }}px solid
96 {{ color['completion.scrollbar.bg'] }};
97 min-height: 10px;
98 }
99
100 QTreeView QScrollBar::sub-line, QScrollBar::add-line {
101 border: none;
102 background: none;
103 }
104 """
105
106 resize_completion = pyqtSignal()
107 selection_changed = pyqtSignal(QItemSelection)
108
109 def __init__(self, win_id, parent=None):
110 super().__init__(parent)
111 self._win_id = win_id
112 self.enabled = config.get('completion', 'show')
113 objreg.get('config').changed.connect(self.set_enabled)
114 # FIXME handle new aliases.
115 # objreg.get('config').changed.connect(self.init_command_completion)
116
117 self._column_widths = base.BaseCompletionModel.COLUMN_WIDTHS
118
119 self._delegate = completiondelegate.CompletionItemDelegate(self)
120 self.setItemDelegate(self._delegate)
121 style.set_register_stylesheet(self)
122 self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
123 self.setHeaderHidden(True)
124 self.setAlternatingRowColors(True)
125 self.setIndentation(0)
126 self.setItemsExpandable(False)
127 self.setExpandsOnDoubleClick(False)
128 self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
129 # WORKAROUND
130 # This is a workaround for weird race conditions with invalid
131 # item indexes leading to segfaults in Qt.
132 #
133 # Some background: http://bugs.quassel-irc.org/issues/663
134 # The proposed fix there was later reverted because it didn't help.
135 self.setUniformRowHeights(True)
136 self.hide()
137 # FIXME set elidemode
138 # https://github.com/The-Compiler/qutebrowser/issues/118
139
140 def __repr__(self):
141 return utils.get_repr(self)
142
143 def _resize_columns(self):
144 """Resize the completion columns based on column_widths."""
145 width = self.size().width()
146 pixel_widths = [(width * perc // 100) for perc in self._column_widths]
147 if self.verticalScrollBar().isVisible():
148 pixel_widths[-1] -= self.style().pixelMetric(
149 QStyle.PM_ScrollBarExtent) + 5
150 for i, w in enumerate(pixel_widths):
151 self.setColumnWidth(i, w)
152
153 def _next_idx(self, upwards):
154 """Get the previous/next QModelIndex displayed in the view.
155
156 Used by tab_handler.
157
158 Args:
159 upwards: Get previous item, not next.
160
161 Return:
162 A QModelIndex.
163 """
164 idx = self.selectionModel().currentIndex()
165 if not idx.isValid():
166 # No item selected yet
167 if upwards:
168 return self.model().last_item()
169 else:
170 return self.model().first_item()
171 while True:
172 idx = self.indexAbove(idx) if upwards else self.indexBelow(idx)
173 # wrap around if we arrived at beginning/end
174 if not idx.isValid() and upwards:
175 return self.model().last_item()
176 elif not idx.isValid() and not upwards:
177 idx = self.model().first_item()
178 self.scrollTo(idx.parent())
179 return idx
180 elif idx.parent().isValid():
181 # Item is a real item, not a category header -> success
182 return idx
183
184 def _next_prev_item(self, prev):
185 """Handle a tab press for the CompletionView.
186
187 Select the previous/next item and write the new text to the
188 statusbar.
189
190 Helper for completion_item_next and completion_item_prev.
191
192 Args:
193 prev: True for prev item, False for next one.
194 """
195 idx = self._next_idx(prev)
196 qtutils.ensure_valid(idx)
197 self.selectionModel().setCurrentIndex(
198 idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)
199
200 def set_model(self, model):
201 """Switch completion to a new model.
202
203 Called from on_update_completion().
204
205 Args:
206 model: The model to use.
207 """
208 old_model = self.model()
209 sel_model = self.selectionModel()
210
211 self.setModel(model)
212
213 if sel_model is not None:
214 sel_model.deleteLater()
215 if old_model is not None:
216 old_model.deleteLater()
217
218 for i in range(model.rowCount()):
219 self.expand(model.index(i, 0))
220
221 self._column_widths = model.srcmodel.COLUMN_WIDTHS
222 self._resize_columns()
223 self.maybe_resize_completion()
224
225 def set_pattern(self, pattern):
226 """Set the completion pattern for the current model.
227
228 Called from on_update_completion().
229
230 Args:
231 pattern: The filter pattern to set (what the user entered).
232 """
233 self.model().set_pattern(pattern)
234 self.maybe_resize_completion()
235
236 @pyqtSlot()
237 def maybe_resize_completion(self):
238 """Emit the resize_completion signal if the config says so."""
239 if config.get('completion', 'shrink'):
240 self.resize_completion.emit()
241
242 @config.change_filter('completion', 'show')
243 def set_enabled(self):
244 """Update self.enabled when the config changed."""
245 self.enabled = config.get('completion', 'show')
246
247 @pyqtSlot()
248 def on_clear_completion_selection(self):
249 """Clear the selection model when an item is activated."""
250 selmod = self.selectionModel()
251 if selmod is not None:
252 selmod.clearSelection()
253 selmod.clearCurrentIndex()
254
255 def selectionChanged(self, selected, deselected):
256 """Extend selectionChanged to call completers selection_changed."""
257 super().selectionChanged(selected, deselected)
258 self.selection_changed.emit(selected)
259
260 def resizeEvent(self, e):
261 """Extend resizeEvent to adjust column size."""
262 super().resizeEvent(e)
263 self._resize_columns()
264
265 def showEvent(self, e):
266 """Adjust the completion size and scroll when it's freshly shown."""
267 self.resize_completion.emit()
268 scrollbar = self.verticalScrollBar()
269 if scrollbar is not None:
270 scrollbar.setValue(scrollbar.minimum())
271 super().showEvent(e)
272
273 @cmdutils.register(instance='completion', hide=True,
274 modes=[usertypes.KeyMode.command], scope='window')
275 def completion_item_prev(self):
276 """Select the previous completion item."""
277 self._next_prev_item(True)
278
279 @cmdutils.register(instance='completion', hide=True,
280 modes=[usertypes.KeyMode.command], scope='window')
281 def completion_item_next(self):
282 """Select the next completion item."""
283 self._next_prev_item(False)
284
285 @cmdutils.register(instance='completion', hide=True,
286 modes=[usertypes.KeyMode.command], scope='window')
287 def completion_item_del(self):
288 """Delete the current completion item."""
289 if not self.currentIndex().isValid():
290 raise cmdexc.CommandError("No item selected!")
291 try:
292 self.model().srcmodel.delete_cur_item(self)
293 except NotImplementedError:
294 raise cmdexc.CommandError("Cannot delete this item.")
295
[end of qutebrowser/completion/completionwidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qutebrowser/completion/completionwidget.py b/qutebrowser/completion/completionwidget.py
--- a/qutebrowser/completion/completionwidget.py
+++ b/qutebrowser/completion/completionwidget.py
@@ -192,10 +192,13 @@
Args:
prev: True for prev item, False for next one.
"""
- idx = self._next_idx(prev)
- qtutils.ensure_valid(idx)
- self.selectionModel().setCurrentIndex(
- idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)
+ # selmodel can be None if 'show' and 'auto-open' are set to False
+ # https://github.com/The-Compiler/qutebrowser/issues/1731
+ selmodel = self.selectionModel()
+ if (selmodel is not None):
+ idx = self._next_idx(prev)
+ selmodel.setCurrentIndex(idx,
+ QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)
def set_model(self, model):
"""Switch completion to a new model.
| {"golden_diff": "diff --git a/qutebrowser/completion/completionwidget.py b/qutebrowser/completion/completionwidget.py\n--- a/qutebrowser/completion/completionwidget.py\n+++ b/qutebrowser/completion/completionwidget.py\n@@ -192,10 +192,13 @@\n Args:\n prev: True for prev item, False for next one.\n \"\"\"\n- idx = self._next_idx(prev)\n- qtutils.ensure_valid(idx)\n- self.selectionModel().setCurrentIndex(\n- idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)\n+ # selmodel can be None if 'show' and 'auto-open' are set to False\n+ # https://github.com/The-Compiler/qutebrowser/issues/1731\n+ selmodel = self.selectionModel()\n+ if (selmodel is not None):\n+ idx = self._next_idx(prev)\n+ selmodel.setCurrentIndex(idx,\n+ QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)\n \n def set_model(self, model):\n \"\"\"Switch completion to a new model.\n", "issue": "Errors when `completion show` setting is false\nPressing up, down, or tab when there are completions available but the `completion show` setting is `false` crashes qutebrowser. The relevant error:\n\n```\n ...\n File \"/home/marshall/Github/qutebrowser/qutebrowser/completion/completionwidget.py\", line 164, in _next_idx\n idx = self.selectionModel().currentIndex()\nAttributeError: 'NoneType' object has no attribute 'currentIndex'\n```\n\nThis appears after the recent completion fixes from @rcorre.\n\nGenerally, the completion `show` and `auto-open` options don't seem to make sense. Before the change, `show` would do what I expected `auto-open` to do (turning it off hides the completion window until the user asks for it with up, down, or tab). `auto-open` doesn't do anything (before or after the change), as far as I can tell.\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completion view for statusbar command section.\n\nDefines a CompletionView which uses CompletionFiterModel and CompletionModel\nsubclasses to provide completions.\n\"\"\"\n\nfrom PyQt5.QtWidgets import QStyle, QTreeView, QSizePolicy\nfrom PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QItemSelectionModel,\n QItemSelection)\n\nfrom qutebrowser.config import config, style\nfrom qutebrowser.completion import completiondelegate\nfrom qutebrowser.completion.models import base\nfrom qutebrowser.utils import qtutils, objreg, utils, usertypes\nfrom qutebrowser.commands import cmdexc, cmdutils\n\n\nclass CompletionView(QTreeView):\n\n \"\"\"The view showing available completions.\n\n Based on QTreeView but heavily customized so root elements show as category\n headers, and children show as flat list.\n\n Attributes:\n enabled: Whether showing the CompletionView is enabled.\n _win_id: The ID of the window this CompletionView is associated with.\n _height: The height to use for the CompletionView.\n _height_perc: Either None or a percentage if height should be relative.\n _delegate: The item delegate used.\n _column_widths: A list of column widths, in percent.\n\n Signals:\n resize_completion: Emitted when the completion should be resized.\n selection_changed: Emitted when the completion item selection changes.\n \"\"\"\n\n # Drawing the item foreground will be done by CompletionItemDelegate, so we\n # don't define that in this stylesheet.\n STYLESHEET = \"\"\"\n QTreeView {\n font: {{ font['completion'] }};\n background-color: {{ color['completion.bg'] }};\n alternate-background-color: {{ color['completion.alternate-bg'] }};\n outline: 0;\n border: 0px;\n }\n\n QTreeView::item:disabled {\n background-color: {{ color['completion.category.bg'] }};\n border-top: 1px solid\n {{ color['completion.category.border.top'] }};\n border-bottom: 1px solid\n {{ color['completion.category.border.bottom'] }};\n }\n\n QTreeView::item:selected, QTreeView::item:selected:hover {\n border-top: 1px solid\n {{ color['completion.item.selected.border.top'] }};\n border-bottom: 1px solid\n {{ color['completion.item.selected.border.bottom'] }};\n background-color: {{ color['completion.item.selected.bg'] }};\n }\n\n QTreeView:item::hover {\n border: 0px;\n }\n\n QTreeView QScrollBar {\n width: {{ config.get('completion', 'scrollbar-width') }}px;\n background: {{ color['completion.scrollbar.bg'] }};\n }\n\n QTreeView QScrollBar::handle {\n background: {{ color['completion.scrollbar.fg'] }};\n border: {{ config.get('completion', 'scrollbar-padding') }}px solid\n {{ color['completion.scrollbar.bg'] }};\n min-height: 10px;\n }\n\n QTreeView QScrollBar::sub-line, QScrollBar::add-line {\n border: none;\n background: none;\n }\n \"\"\"\n\n resize_completion = pyqtSignal()\n selection_changed = pyqtSignal(QItemSelection)\n\n def __init__(self, win_id, parent=None):\n super().__init__(parent)\n self._win_id = win_id\n self.enabled = config.get('completion', 'show')\n objreg.get('config').changed.connect(self.set_enabled)\n # FIXME handle new aliases.\n # objreg.get('config').changed.connect(self.init_command_completion)\n\n self._column_widths = base.BaseCompletionModel.COLUMN_WIDTHS\n\n self._delegate = completiondelegate.CompletionItemDelegate(self)\n self.setItemDelegate(self._delegate)\n style.set_register_stylesheet(self)\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)\n self.setHeaderHidden(True)\n self.setAlternatingRowColors(True)\n self.setIndentation(0)\n self.setItemsExpandable(False)\n self.setExpandsOnDoubleClick(False)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n # WORKAROUND\n # This is a workaround for weird race conditions with invalid\n # item indexes leading to segfaults in Qt.\n #\n # Some background: http://bugs.quassel-irc.org/issues/663\n # The proposed fix there was later reverted because it didn't help.\n self.setUniformRowHeights(True)\n self.hide()\n # FIXME set elidemode\n # https://github.com/The-Compiler/qutebrowser/issues/118\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _resize_columns(self):\n \"\"\"Resize the completion columns based on column_widths.\"\"\"\n width = self.size().width()\n pixel_widths = [(width * perc // 100) for perc in self._column_widths]\n if self.verticalScrollBar().isVisible():\n pixel_widths[-1] -= self.style().pixelMetric(\n QStyle.PM_ScrollBarExtent) + 5\n for i, w in enumerate(pixel_widths):\n self.setColumnWidth(i, w)\n\n def _next_idx(self, upwards):\n \"\"\"Get the previous/next QModelIndex displayed in the view.\n\n Used by tab_handler.\n\n Args:\n upwards: Get previous item, not next.\n\n Return:\n A QModelIndex.\n \"\"\"\n idx = self.selectionModel().currentIndex()\n if not idx.isValid():\n # No item selected yet\n if upwards:\n return self.model().last_item()\n else:\n return self.model().first_item()\n while True:\n idx = self.indexAbove(idx) if upwards else self.indexBelow(idx)\n # wrap around if we arrived at beginning/end\n if not idx.isValid() and upwards:\n return self.model().last_item()\n elif not idx.isValid() and not upwards:\n idx = self.model().first_item()\n self.scrollTo(idx.parent())\n return idx\n elif idx.parent().isValid():\n # Item is a real item, not a category header -> success\n return idx\n\n def _next_prev_item(self, prev):\n \"\"\"Handle a tab press for the CompletionView.\n\n Select the previous/next item and write the new text to the\n statusbar.\n\n Helper for completion_item_next and completion_item_prev.\n\n Args:\n prev: True for prev item, False for next one.\n \"\"\"\n idx = self._next_idx(prev)\n qtutils.ensure_valid(idx)\n self.selectionModel().setCurrentIndex(\n idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)\n\n def set_model(self, model):\n \"\"\"Switch completion to a new model.\n\n Called from on_update_completion().\n\n Args:\n model: The model to use.\n \"\"\"\n old_model = self.model()\n sel_model = self.selectionModel()\n\n self.setModel(model)\n\n if sel_model is not None:\n sel_model.deleteLater()\n if old_model is not None:\n old_model.deleteLater()\n\n for i in range(model.rowCount()):\n self.expand(model.index(i, 0))\n\n self._column_widths = model.srcmodel.COLUMN_WIDTHS\n self._resize_columns()\n self.maybe_resize_completion()\n\n def set_pattern(self, pattern):\n \"\"\"Set the completion pattern for the current model.\n\n Called from on_update_completion().\n\n Args:\n pattern: The filter pattern to set (what the user entered).\n \"\"\"\n self.model().set_pattern(pattern)\n self.maybe_resize_completion()\n\n @pyqtSlot()\n def maybe_resize_completion(self):\n \"\"\"Emit the resize_completion signal if the config says so.\"\"\"\n if config.get('completion', 'shrink'):\n self.resize_completion.emit()\n\n @config.change_filter('completion', 'show')\n def set_enabled(self):\n \"\"\"Update self.enabled when the config changed.\"\"\"\n self.enabled = config.get('completion', 'show')\n\n @pyqtSlot()\n def on_clear_completion_selection(self):\n \"\"\"Clear the selection model when an item is activated.\"\"\"\n selmod = self.selectionModel()\n if selmod is not None:\n selmod.clearSelection()\n selmod.clearCurrentIndex()\n\n def selectionChanged(self, selected, deselected):\n \"\"\"Extend selectionChanged to call completers selection_changed.\"\"\"\n super().selectionChanged(selected, deselected)\n self.selection_changed.emit(selected)\n\n def resizeEvent(self, e):\n \"\"\"Extend resizeEvent to adjust column size.\"\"\"\n super().resizeEvent(e)\n self._resize_columns()\n\n def showEvent(self, e):\n \"\"\"Adjust the completion size and scroll when it's freshly shown.\"\"\"\n self.resize_completion.emit()\n scrollbar = self.verticalScrollBar()\n if scrollbar is not None:\n scrollbar.setValue(scrollbar.minimum())\n super().showEvent(e)\n\n @cmdutils.register(instance='completion', hide=True,\n modes=[usertypes.KeyMode.command], scope='window')\n def completion_item_prev(self):\n \"\"\"Select the previous completion item.\"\"\"\n self._next_prev_item(True)\n\n @cmdutils.register(instance='completion', hide=True,\n modes=[usertypes.KeyMode.command], scope='window')\n def completion_item_next(self):\n \"\"\"Select the next completion item.\"\"\"\n self._next_prev_item(False)\n\n @cmdutils.register(instance='completion', hide=True,\n modes=[usertypes.KeyMode.command], scope='window')\n def completion_item_del(self):\n \"\"\"Delete the current completion item.\"\"\"\n if not self.currentIndex().isValid():\n raise cmdexc.CommandError(\"No item selected!\")\n try:\n self.model().srcmodel.delete_cur_item(self)\n except NotImplementedError:\n raise cmdexc.CommandError(\"Cannot delete this item.\")\n", "path": "qutebrowser/completion/completionwidget.py"}]} | 3,796 | 240 |
gh_patches_debug_29212 | rasdani/github-patches | git_diff | NVIDIA-Merlin__NVTabular-1414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEA] Simplify AddMetadata Tag for NVTabular
**Is your feature request related to a problem? Please describe.**
Currently, we provide the functionality for the user to tag columns with the operator `AddMetadata`.
The use case is that users will use the operator mainly for adding tags.
Should we provide a wrapper called `AddTag` or `TagAs` to simplify the AddMetadata operator?
Should we provide multiple wrappers for common tags - e.g.
`TagAsUserID()`, `TagAsItemID()`, `TagAsUserFeatures()`, `TagAsItemFeatures()`, etc.
</issue>
<code>
[start of nvtabular/ops/add_metadata.py]
1 #
2 # Copyright (c) 2021, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 from nvtabular.dispatch import DataFrameType
17
18 from .operator import ColumnSelector, Operator
19
20
21 class AddMetadata(Operator):
22 """
23 This operator will add user defined tags and properties
24 to a Schema.
25 """
26
27 def __init__(self, tags=None, properties=None):
28 super().__init__()
29 self.tags = tags or []
30 self.properties = properties or {}
31
32 def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
33 return df
34
35 @property
36 def output_tags(self):
37 return self.tags
38
39 @property
40 def output_properties(self):
41 return self.properties
42
[end of nvtabular/ops/add_metadata.py]
[start of nvtabular/ops/__init__.py]
1 #
2 # Copyright (c) 2021, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 # alias submodules here to avoid breaking everything with moving to submodules
18 # flake8: noqa
19 from .add_metadata import AddMetadata
20 from .bucketize import Bucketize
21 from .categorify import Categorify, get_embedding_sizes
22 from .clip import Clip
23 from .column_similarity import ColumnSimilarity
24 from .data_stats import DataStats
25 from .difference_lag import DifferenceLag
26 from .drop_low_cardinality import DropLowCardinality
27 from .dropna import Dropna
28 from .fill import FillMedian, FillMissing
29 from .filter import Filter
30 from .groupby import Groupby
31 from .hash_bucket import HashBucket
32 from .hashed_cross import HashedCross
33 from .join_external import JoinExternal
34 from .join_groupby import JoinGroupby
35 from .lambdaop import LambdaOp
36 from .list_slice import ListSlice
37 from .logop import LogOp
38 from .normalize import Normalize, NormalizeMinMax
39 from .operator import ColumnSelector, Operator
40 from .reduce_dtype_size import ReduceDtypeSize
41 from .rename import Rename
42 from .stat_operator import StatOperator
43 from .target_encoding import TargetEncoding
44 from .value_counts import ValueCount
45
[end of nvtabular/ops/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py
--- a/nvtabular/ops/__init__.py
+++ b/nvtabular/ops/__init__.py
@@ -16,7 +16,15 @@
# alias submodules here to avoid breaking everything with moving to submodules
# flake8: noqa
-from .add_metadata import AddMetadata
+from .add_metadata import (
+ AddMetadata,
+ AddProperties,
+ AddTags,
+ TagAsItemFeatures,
+ TagAsItemID,
+ TagAsUserFeatures,
+ TagAsUserID,
+)
from .bucketize import Bucketize
from .categorify import Categorify, get_embedding_sizes
from .clip import Clip
diff --git a/nvtabular/ops/add_metadata.py b/nvtabular/ops/add_metadata.py
--- a/nvtabular/ops/add_metadata.py
+++ b/nvtabular/ops/add_metadata.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from merlin.schema.tags import Tags
from nvtabular.dispatch import DataFrameType
from .operator import ColumnSelector, Operator
@@ -39,3 +40,38 @@
@property
def output_properties(self):
return self.properties
+
+
+class AddTags(AddMetadata):
+ def __init__(self, tags=None):
+ super().__init__(tags=tags)
+
+
+class AddProperties(AddMetadata):
+ def __init__(self, properties=None):
+ super().__init__(properties=properties)
+
+
+# Wrappers for common features
+class TagAsUserID(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.USER_ID]
+
+
+class TagAsItemID(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.ITEM_ID]
+
+
+class TagAsUserFeatures(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.USER]
+
+
+class TagAsItemFeatures(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.ITEM]
| {"golden_diff": "diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py\n--- a/nvtabular/ops/__init__.py\n+++ b/nvtabular/ops/__init__.py\n@@ -16,7 +16,15 @@\n \n # alias submodules here to avoid breaking everything with moving to submodules\n # flake8: noqa\n-from .add_metadata import AddMetadata\n+from .add_metadata import (\n+ AddMetadata,\n+ AddProperties,\n+ AddTags,\n+ TagAsItemFeatures,\n+ TagAsItemID,\n+ TagAsUserFeatures,\n+ TagAsUserID,\n+)\n from .bucketize import Bucketize\n from .categorify import Categorify, get_embedding_sizes\n from .clip import Clip\ndiff --git a/nvtabular/ops/add_metadata.py b/nvtabular/ops/add_metadata.py\n--- a/nvtabular/ops/add_metadata.py\n+++ b/nvtabular/ops/add_metadata.py\n@@ -13,6 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n+from merlin.schema.tags import Tags\n from nvtabular.dispatch import DataFrameType\n \n from .operator import ColumnSelector, Operator\n@@ -39,3 +40,38 @@\n @property\n def output_properties(self):\n return self.properties\n+\n+\n+class AddTags(AddMetadata):\n+ def __init__(self, tags=None):\n+ super().__init__(tags=tags)\n+\n+\n+class AddProperties(AddMetadata):\n+ def __init__(self, properties=None):\n+ super().__init__(properties=properties)\n+\n+\n+# Wrappers for common features\n+class TagAsUserID(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.USER_ID]\n+\n+\n+class TagAsItemID(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.ITEM_ID]\n+\n+\n+class TagAsUserFeatures(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.USER]\n+\n+\n+class TagAsItemFeatures(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.ITEM]\n", "issue": "[FEA] Simplify AddMetadata Tag for NVTabular\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, we provide the functionality for the user to tag columns with the operator `AddMetadata`.\r\nThe use case is that users will use the operator mainly for adding tags. \r\n\r\nShould we provide a wrapper called `AddTag` or `TagAs` to simplify the AddMetadata operator?\r\nShould we provide multiple wrappers for common tags - e.g.\r\n\r\n`TagAsUserID()`, `TagAsItemID()`, `TagAsUserFeatures()`, `TagAsItemFeatures()`, etc.\r\n\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom nvtabular.dispatch import DataFrameType\n\nfrom .operator import ColumnSelector, Operator\n\n\nclass AddMetadata(Operator):\n \"\"\"\n This operator will add user defined tags and properties\n to a Schema.\n \"\"\"\n\n def __init__(self, tags=None, properties=None):\n super().__init__()\n self.tags = tags or []\n self.properties = properties or {}\n\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n return df\n\n @property\n def output_tags(self):\n return self.tags\n\n @property\n def output_properties(self):\n return self.properties\n", "path": "nvtabular/ops/add_metadata.py"}, {"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# alias submodules here to avoid breaking everything with moving to submodules\n# flake8: noqa\nfrom .add_metadata import AddMetadata\nfrom .bucketize import Bucketize\nfrom .categorify import Categorify, get_embedding_sizes\nfrom .clip import Clip\nfrom .column_similarity import ColumnSimilarity\nfrom .data_stats import DataStats\nfrom .difference_lag import DifferenceLag\nfrom .drop_low_cardinality import DropLowCardinality\nfrom .dropna import Dropna\nfrom .fill import FillMedian, FillMissing\nfrom .filter import Filter\nfrom .groupby import Groupby\nfrom .hash_bucket import HashBucket\nfrom .hashed_cross import HashedCross\nfrom .join_external import JoinExternal\nfrom .join_groupby import JoinGroupby\nfrom .lambdaop import LambdaOp\nfrom .list_slice import ListSlice\nfrom .logop import LogOp\nfrom .normalize import Normalize, NormalizeMinMax\nfrom .operator import ColumnSelector, Operator\nfrom .reduce_dtype_size import ReduceDtypeSize\nfrom .rename import Rename\nfrom .stat_operator import StatOperator\nfrom .target_encoding import TargetEncoding\nfrom .value_counts import ValueCount\n", "path": "nvtabular/ops/__init__.py"}]} | 1,508 | 499 |
gh_patches_debug_41674 | rasdani/github-patches | git_diff | liberapay__liberapay.com-441 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Passing bad data to basic auth can result in a 500
https://sentry.changaco.oy.lc/share/issue/322e3532/
To reproduce: `curl --basic -u admin:admin 'http://localhost:8339/'`
</issue>
<code>
[start of liberapay/security/csrf.py]
1 """Cross Site Request Forgery middleware, borrowed from Django.
2
3 See also:
4
5 https://github.com/django/django/blob/master/django/middleware/csrf.py
6 https://docs.djangoproject.com/en/dev/ref/contrib/csrf/
7 https://github.com/gratipay/gratipay.com/issues/88
8
9 """
10 from __future__ import absolute_import, division, print_function, unicode_literals
11
12 from datetime import timedelta
13 import re
14
15 from .crypto import constant_time_compare, get_random_string
16
17
18 TOKEN_LENGTH = 32
19 CSRF_TOKEN = str('csrf_token') # bytes in python2, unicode in python3
20 CSRF_TIMEOUT = timedelta(days=7)
21
22 _get_new_token = lambda: get_random_string(TOKEN_LENGTH)
23 _token_re = re.compile(r'^[a-zA-Z0-9]{%d}$' % TOKEN_LENGTH)
24 _sanitize_token = lambda t: t if _token_re.match(t) else None
25
26
27 def extract_token_from_cookie(request):
28 """Given a Request object, return a csrf_token.
29 """
30 try:
31 token = request.headers.cookie[CSRF_TOKEN].value
32 except KeyError:
33 token = None
34 else:
35 token = _sanitize_token(token)
36
37 # Don't set a CSRF cookie on assets, to avoid busting the cache.
38 # Don't set it on callbacks, because we don't need it there.
39
40 if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):
41 token = None
42 else:
43 token = token or _get_new_token()
44
45 return {'csrf_token': token}
46
47
48 def reject_forgeries(request, response, csrf_token):
49 # Assume that anything not defined as 'safe' by RC2616 needs protection.
50 if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
51
52 # except webhooks
53 if request.line.uri.startswith('/callbacks/'):
54 return
55 # and requests using HTTP auth
56 if b'Authorization' in request.headers:
57 return
58
59 # Check non-cookie token for match.
60 second_token = ""
61 if request.line.method == "POST":
62 if isinstance(request.body, dict):
63 second_token = request.body.get('csrf_token', '')
64
65 if second_token == "":
66 # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,
67 # and possible for PUT/DELETE.
68 second_token = request.headers.get(b'X-CSRF-TOKEN', b'').decode('ascii', 'replace')
69
70 if not constant_time_compare(second_token, csrf_token):
71 raise response.error(403, "Bad CSRF cookie")
72
73
74 def add_token_to_response(response, csrf_token=None):
75 """Store the latest CSRF token as a cookie.
76 """
77 if csrf_token:
78 # Don't set httponly so that we can POST using XHR.
79 # https://github.com/gratipay/gratipay.com/issues/3030
80 response.set_cookie(CSRF_TOKEN, csrf_token, expires=CSRF_TIMEOUT, httponly=False)
81
[end of liberapay/security/csrf.py]
[start of liberapay/security/authentication.py]
1 """Defines website authentication helpers.
2 """
3 import binascii
4
5 from six.moves.urllib.parse import urlencode
6
7 from pando import Response
8
9 from liberapay.constants import SESSION, SESSION_TIMEOUT
10 from liberapay.exceptions import LoginRequired
11 from liberapay.models.participant import Participant
12
13
14 class _ANON(object):
15 ANON = True
16 is_admin = False
17 id = None
18 __bool__ = __nonzero__ = lambda *a: False
19 get_tip_to = lambda self, tippee: Participant._zero_tip_dict(tippee)
20 __repr__ = lambda self: '<ANON>'
21
22
23 ANON = _ANON()
24
25
26 def _get_body(request):
27 try:
28 body = request.body
29 except Response:
30 return
31 if not isinstance(body, dict):
32 return
33 return body
34
35
36 def sign_in_with_form_data(body, state):
37 p = None
38 _, website = state['_'], state['website']
39
40 if body.get('log-in.id'):
41 id = body.pop('log-in.id')
42 password = body.pop('log-in.password', None)
43 k = 'email' if '@' in id else 'username'
44 if password:
45 p = Participant.authenticate(
46 k, 'password',
47 id, password,
48 )
49 if not p:
50 state['log-in.error'] = _("Bad username or password.")
51 elif k == 'username':
52 state['log-in.error'] = _("\"{0}\" is not a valid email address.", id)
53 return
54 else:
55 email = id
56 p = Participant._from_thing('email', email)
57 if p:
58 p.start_session()
59 qs = {'log-in.id': p.id, 'log-in.token': p.session_token}
60 p.send_email(
61 'login_link',
62 email=email,
63 link=p.url('settings/', qs),
64 link_validity=SESSION_TIMEOUT,
65 )
66 state['log-in.email-sent-to'] = email
67 else:
68 state['log-in.error'] = _(
69 "We didn't find any account whose primary email address is {0}.",
70 email
71 )
72 p = None
73
74 elif 'sign-in.email' in body:
75 response = state['response']
76 kind = body.pop('sign-in.kind')
77 if kind not in ('individual', 'organization'):
78 raise response.error(400, 'bad kind')
79 email = body.pop('sign-in.email')
80 if not email:
81 raise response.error(400, 'email is required')
82 with website.db.get_cursor() as c:
83 p = Participant.make_active(
84 kind, body.pop('sign-in.username', None),
85 body.pop('sign-in.password', None), cursor=c,
86 )
87 p.set_email_lang(state['request'].headers.get(b'Accept-Language'), cursor=c)
88 p.add_email(email, cursor=c)
89 p.authenticated = True
90
91 return p
92
93
94 def start_user_as_anon():
95 """Make sure we always have a user object, regardless of exceptions during authentication.
96 """
97 return {'user': ANON}
98
99
100 def authenticate_user_if_possible(request, response, state, user, _):
101 """This signs the user in.
102 """
103 if request.line.uri.startswith('/assets/'):
104 return
105
106 # HTTP auth
107 if b'Authorization' in request.headers:
108 header = request.headers[b'Authorization']
109 if not header.startswith(b'Basic '):
110 raise response.error(401, 'Unsupported authentication method')
111 try:
112 creds = binascii.a2b_base64(header[len('Basic '):]).decode('utf8').split(':', 1)
113 except (binascii.Error, UnicodeDecodeError):
114 raise response.error(400, 'Malformed "Authorization" header')
115 participant = Participant.authenticate('id', 'password', *creds)
116 if not participant:
117 raise response.error(401, 'Invalid credentials')
118 return {'user': participant}
119
120 # Cookie and form auth
121 # We want to try cookie auth first, but we want form auth to supersede it
122 p = None
123 if SESSION in request.headers.cookie:
124 creds = request.headers.cookie[SESSION].value.split(':', 1)
125 p = Participant.authenticate('id', 'session', *creds)
126 if p:
127 state['user'] = p
128 session_p, p = p, None
129 session_suffix = ''
130 redirect_url = request.line.uri
131 if request.method == 'POST':
132 body = _get_body(request)
133 if body:
134 p = sign_in_with_form_data(body, state)
135 carry_on = body.pop('log-in.carry-on', None)
136 if not p and carry_on:
137 p_email = session_p and (
138 session_p.email or session_p.get_emails()[0].address
139 )
140 if p_email != carry_on:
141 state['log-in.carry-on'] = carry_on
142 raise LoginRequired
143 redirect_url = body.get('sign-in.back-to') or redirect_url
144 elif request.method == 'GET' and request.qs.get('log-in.id'):
145 id, token = request.qs.pop('log-in.id'), request.qs.pop('log-in.token')
146 p = Participant.authenticate('id', 'session', id, token)
147 if not p and (not session_p or session_p.id != id):
148 raise response.error(400, _("This login link is expired or invalid."))
149 else:
150 qs = '?' + urlencode(request.qs, doseq=True) if request.qs else ''
151 redirect_url = request.path.raw + qs
152 session_p = p
153 session_suffix = '.em'
154 if p:
155 if session_p:
156 session_p.sign_out(response.headers.cookie)
157 if p.status == 'closed':
158 p.update_status('active')
159 p.sign_in(response.headers.cookie, session_suffix)
160 state['user'] = p
161 if request.body.pop('form.repost', None) != 'true':
162 response.redirect(redirect_url)
163
164
165 def add_auth_to_response(response, request=None, user=ANON):
166 if request is None:
167 return # early parsing must've failed
168 if request.line.uri.startswith('/assets/'):
169 return # assets never get auth headers
170
171 if SESSION in request.headers.cookie:
172 if not user.ANON:
173 user.keep_signed_in(response.headers.cookie)
174
[end of liberapay/security/authentication.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/security/authentication.py b/liberapay/security/authentication.py
--- a/liberapay/security/authentication.py
+++ b/liberapay/security/authentication.py
@@ -109,10 +109,12 @@
if not header.startswith(b'Basic '):
raise response.error(401, 'Unsupported authentication method')
try:
- creds = binascii.a2b_base64(header[len('Basic '):]).decode('utf8').split(':', 1)
- except (binascii.Error, UnicodeDecodeError):
+ uid, pwd = binascii.a2b_base64(header[len('Basic '):]).decode('utf8').split(':', 1)
+ except (binascii.Error, UnicodeDecodeError, ValueError):
raise response.error(400, 'Malformed "Authorization" header')
- participant = Participant.authenticate('id', 'password', *creds)
+ if not uid.isdigit():
+ raise response.error(401, 'Invalid user id: expected an integer, got `%s`' % uid)
+ participant = Participant.authenticate('id', 'password', uid, pwd)
if not participant:
raise response.error(401, 'Invalid credentials')
return {'user': participant}
diff --git a/liberapay/security/csrf.py b/liberapay/security/csrf.py
--- a/liberapay/security/csrf.py
+++ b/liberapay/security/csrf.py
@@ -1,4 +1,4 @@
-"""Cross Site Request Forgery middleware, borrowed from Django.
+"""Cross Site Request Forgery middleware, originally borrowed from Django.
See also:
@@ -18,6 +18,7 @@
TOKEN_LENGTH = 32
CSRF_TOKEN = str('csrf_token') # bytes in python2, unicode in python3
CSRF_TIMEOUT = timedelta(days=7)
+SAFE_METHODS = {'GET', 'HEAD', 'OPTIONS', 'TRACE'}
_get_new_token = lambda: get_random_string(TOKEN_LENGTH)
_token_re = re.compile(r'^[a-zA-Z0-9]{%d}$' % TOKEN_LENGTH)
@@ -27,34 +28,36 @@
def extract_token_from_cookie(request):
"""Given a Request object, return a csrf_token.
"""
- try:
- token = request.headers.cookie[CSRF_TOKEN].value
- except KeyError:
- token = None
- else:
- token = _sanitize_token(token)
- # Don't set a CSRF cookie on assets, to avoid busting the cache.
- # Don't set it on callbacks, because we don't need it there.
+ off = (
+ # Turn off CSRF protection on assets, to avoid busting the cache.
+ request.path.raw.startswith('/assets/') or
+ # Turn off CSRF protection on callbacks, so they can receive POST requests.
+ request.path.raw.startswith('/callbacks/') or
+ # Turn off CSRF when using HTTP auth, so API users can use POST and others.
+ b'Authorization' in request.headers
+ )
- if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):
+ if off:
token = None
else:
- token = token or _get_new_token()
+ try:
+ token = request.headers.cookie[CSRF_TOKEN].value
+ except KeyError:
+ token = _get_new_token()
+ else:
+ token = _sanitize_token(token) or _get_new_token()
return {'csrf_token': token}
def reject_forgeries(request, response, csrf_token):
- # Assume that anything not defined as 'safe' by RC2616 needs protection.
- if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
-
- # except webhooks
- if request.line.uri.startswith('/callbacks/'):
- return
- # and requests using HTTP auth
- if b'Authorization' in request.headers:
- return
+ if csrf_token is None:
+ # CSRF protection is turned off for this request
+ return
+
+ # Assume that anything not defined as 'safe' by RFC7231 needs protection.
+ if request.line.method not in SAFE_METHODS:
# Check non-cookie token for match.
second_token = ""
| {"golden_diff": "diff --git a/liberapay/security/authentication.py b/liberapay/security/authentication.py\n--- a/liberapay/security/authentication.py\n+++ b/liberapay/security/authentication.py\n@@ -109,10 +109,12 @@\n if not header.startswith(b'Basic '):\n raise response.error(401, 'Unsupported authentication method')\n try:\n- creds = binascii.a2b_base64(header[len('Basic '):]).decode('utf8').split(':', 1)\n- except (binascii.Error, UnicodeDecodeError):\n+ uid, pwd = binascii.a2b_base64(header[len('Basic '):]).decode('utf8').split(':', 1)\n+ except (binascii.Error, UnicodeDecodeError, ValueError):\n raise response.error(400, 'Malformed \"Authorization\" header')\n- participant = Participant.authenticate('id', 'password', *creds)\n+ if not uid.isdigit():\n+ raise response.error(401, 'Invalid user id: expected an integer, got `%s`' % uid)\n+ participant = Participant.authenticate('id', 'password', uid, pwd)\n if not participant:\n raise response.error(401, 'Invalid credentials')\n return {'user': participant}\ndiff --git a/liberapay/security/csrf.py b/liberapay/security/csrf.py\n--- a/liberapay/security/csrf.py\n+++ b/liberapay/security/csrf.py\n@@ -1,4 +1,4 @@\n-\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n+\"\"\"Cross Site Request Forgery middleware, originally borrowed from Django.\n \n See also:\n \n@@ -18,6 +18,7 @@\n TOKEN_LENGTH = 32\n CSRF_TOKEN = str('csrf_token') # bytes in python2, unicode in python3\n CSRF_TIMEOUT = timedelta(days=7)\n+SAFE_METHODS = {'GET', 'HEAD', 'OPTIONS', 'TRACE'}\n \n _get_new_token = lambda: get_random_string(TOKEN_LENGTH)\n _token_re = re.compile(r'^[a-zA-Z0-9]{%d}$' % TOKEN_LENGTH)\n@@ -27,34 +28,36 @@\n def extract_token_from_cookie(request):\n \"\"\"Given a Request object, return a csrf_token.\n \"\"\"\n- try:\n- token = request.headers.cookie[CSRF_TOKEN].value\n- except KeyError:\n- token = None\n- else:\n- token = _sanitize_token(token)\n \n- # Don't set a CSRF cookie on assets, to avoid busting the cache.\n- # Don't set it on callbacks, because we don't need it there.\n+ off = (\n+ # Turn off CSRF protection on assets, to avoid busting the cache.\n+ request.path.raw.startswith('/assets/') or\n+ # Turn off CSRF protection on callbacks, so they can receive POST requests.\n+ request.path.raw.startswith('/callbacks/') or\n+ # Turn off CSRF when using HTTP auth, so API users can use POST and others.\n+ b'Authorization' in request.headers\n+ )\n \n- if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n+ if off:\n token = None\n else:\n- token = token or _get_new_token()\n+ try:\n+ token = request.headers.cookie[CSRF_TOKEN].value\n+ except KeyError:\n+ token = _get_new_token()\n+ else:\n+ token = _sanitize_token(token) or _get_new_token()\n \n return {'csrf_token': token}\n \n \n def reject_forgeries(request, response, csrf_token):\n- # Assume that anything not defined as 'safe' by RC2616 needs protection.\n- if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n-\n- # except webhooks\n- if request.line.uri.startswith('/callbacks/'):\n- return\n- # and requests using HTTP auth\n- if b'Authorization' in request.headers:\n- return\n+ if csrf_token is None:\n+ # CSRF protection is turned off for this request\n+ return\n+\n+ # Assume that anything not defined as 'safe' by RFC7231 needs protection.\n+ if request.line.method not in SAFE_METHODS:\n \n # Check non-cookie token for match.\n second_token = \"\"\n", "issue": "Passing bad data to basic auth can result in a 500\nhttps://sentry.changaco.oy.lc/share/issue/322e3532/\n\nTo reproduce: `curl --basic -u admin:admin 'http://localhost:8339/'`\n\n", "before_files": [{"content": "\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n\nSee also:\n\n https://github.com/django/django/blob/master/django/middleware/csrf.py\n https://docs.djangoproject.com/en/dev/ref/contrib/csrf/\n https://github.com/gratipay/gratipay.com/issues/88\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom datetime import timedelta\nimport re\n\nfrom .crypto import constant_time_compare, get_random_string\n\n\nTOKEN_LENGTH = 32\nCSRF_TOKEN = str('csrf_token') # bytes in python2, unicode in python3\nCSRF_TIMEOUT = timedelta(days=7)\n\n_get_new_token = lambda: get_random_string(TOKEN_LENGTH)\n_token_re = re.compile(r'^[a-zA-Z0-9]{%d}$' % TOKEN_LENGTH)\n_sanitize_token = lambda t: t if _token_re.match(t) else None\n\n\ndef extract_token_from_cookie(request):\n \"\"\"Given a Request object, return a csrf_token.\n \"\"\"\n try:\n token = request.headers.cookie[CSRF_TOKEN].value\n except KeyError:\n token = None\n else:\n token = _sanitize_token(token)\n\n # Don't set a CSRF cookie on assets, to avoid busting the cache.\n # Don't set it on callbacks, because we don't need it there.\n\n if request.path.raw.startswith('/assets/') or request.path.raw.startswith('/callbacks/'):\n token = None\n else:\n token = token or _get_new_token()\n\n return {'csrf_token': token}\n\n\ndef reject_forgeries(request, response, csrf_token):\n # Assume that anything not defined as 'safe' by RC2616 needs protection.\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n # except webhooks\n if request.line.uri.startswith('/callbacks/'):\n return\n # and requests using HTTP auth\n if b'Authorization' in request.headers:\n return\n\n # Check non-cookie token for match.\n second_token = \"\"\n if request.line.method == \"POST\":\n if isinstance(request.body, dict):\n second_token = request.body.get('csrf_token', '')\n\n if second_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n second_token = request.headers.get(b'X-CSRF-TOKEN', b'').decode('ascii', 'replace')\n\n if not constant_time_compare(second_token, csrf_token):\n raise response.error(403, \"Bad CSRF cookie\")\n\n\ndef add_token_to_response(response, csrf_token=None):\n \"\"\"Store the latest CSRF token as a cookie.\n \"\"\"\n if csrf_token:\n # Don't set httponly so that we can POST using XHR.\n # https://github.com/gratipay/gratipay.com/issues/3030\n response.set_cookie(CSRF_TOKEN, csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n", "path": "liberapay/security/csrf.py"}, {"content": "\"\"\"Defines website authentication helpers.\n\"\"\"\nimport binascii\n\nfrom six.moves.urllib.parse import urlencode\n\nfrom pando import Response\n\nfrom liberapay.constants import SESSION, SESSION_TIMEOUT\nfrom liberapay.exceptions import LoginRequired\nfrom liberapay.models.participant import Participant\n\n\nclass _ANON(object):\n ANON = True\n is_admin = False\n id = None\n __bool__ = __nonzero__ = lambda *a: False\n get_tip_to = lambda self, tippee: Participant._zero_tip_dict(tippee)\n __repr__ = lambda self: '<ANON>'\n\n\nANON = _ANON()\n\n\ndef _get_body(request):\n try:\n body = request.body\n except Response:\n return\n if not isinstance(body, dict):\n return\n return body\n\n\ndef sign_in_with_form_data(body, state):\n p = None\n _, website = state['_'], state['website']\n\n if body.get('log-in.id'):\n id = body.pop('log-in.id')\n password = body.pop('log-in.password', None)\n k = 'email' if '@' in id else 'username'\n if password:\n p = Participant.authenticate(\n k, 'password',\n id, password,\n )\n if not p:\n state['log-in.error'] = _(\"Bad username or password.\")\n elif k == 'username':\n state['log-in.error'] = _(\"\\\"{0}\\\" is not a valid email address.\", id)\n return\n else:\n email = id\n p = Participant._from_thing('email', email)\n if p:\n p.start_session()\n qs = {'log-in.id': p.id, 'log-in.token': p.session_token}\n p.send_email(\n 'login_link',\n email=email,\n link=p.url('settings/', qs),\n link_validity=SESSION_TIMEOUT,\n )\n state['log-in.email-sent-to'] = email\n else:\n state['log-in.error'] = _(\n \"We didn't find any account whose primary email address is {0}.\",\n email\n )\n p = None\n\n elif 'sign-in.email' in body:\n response = state['response']\n kind = body.pop('sign-in.kind')\n if kind not in ('individual', 'organization'):\n raise response.error(400, 'bad kind')\n email = body.pop('sign-in.email')\n if not email:\n raise response.error(400, 'email is required')\n with website.db.get_cursor() as c:\n p = Participant.make_active(\n kind, body.pop('sign-in.username', None),\n body.pop('sign-in.password', None), cursor=c,\n )\n p.set_email_lang(state['request'].headers.get(b'Accept-Language'), cursor=c)\n p.add_email(email, cursor=c)\n p.authenticated = True\n\n return p\n\n\ndef start_user_as_anon():\n \"\"\"Make sure we always have a user object, regardless of exceptions during authentication.\n \"\"\"\n return {'user': ANON}\n\n\ndef authenticate_user_if_possible(request, response, state, user, _):\n \"\"\"This signs the user in.\n \"\"\"\n if request.line.uri.startswith('/assets/'):\n return\n\n # HTTP auth\n if b'Authorization' in request.headers:\n header = request.headers[b'Authorization']\n if not header.startswith(b'Basic '):\n raise response.error(401, 'Unsupported authentication method')\n try:\n creds = binascii.a2b_base64(header[len('Basic '):]).decode('utf8').split(':', 1)\n except (binascii.Error, UnicodeDecodeError):\n raise response.error(400, 'Malformed \"Authorization\" header')\n participant = Participant.authenticate('id', 'password', *creds)\n if not participant:\n raise response.error(401, 'Invalid credentials')\n return {'user': participant}\n\n # Cookie and form auth\n # We want to try cookie auth first, but we want form auth to supersede it\n p = None\n if SESSION in request.headers.cookie:\n creds = request.headers.cookie[SESSION].value.split(':', 1)\n p = Participant.authenticate('id', 'session', *creds)\n if p:\n state['user'] = p\n session_p, p = p, None\n session_suffix = ''\n redirect_url = request.line.uri\n if request.method == 'POST':\n body = _get_body(request)\n if body:\n p = sign_in_with_form_data(body, state)\n carry_on = body.pop('log-in.carry-on', None)\n if not p and carry_on:\n p_email = session_p and (\n session_p.email or session_p.get_emails()[0].address\n )\n if p_email != carry_on:\n state['log-in.carry-on'] = carry_on\n raise LoginRequired\n redirect_url = body.get('sign-in.back-to') or redirect_url\n elif request.method == 'GET' and request.qs.get('log-in.id'):\n id, token = request.qs.pop('log-in.id'), request.qs.pop('log-in.token')\n p = Participant.authenticate('id', 'session', id, token)\n if not p and (not session_p or session_p.id != id):\n raise response.error(400, _(\"This login link is expired or invalid.\"))\n else:\n qs = '?' + urlencode(request.qs, doseq=True) if request.qs else ''\n redirect_url = request.path.raw + qs\n session_p = p\n session_suffix = '.em'\n if p:\n if session_p:\n session_p.sign_out(response.headers.cookie)\n if p.status == 'closed':\n p.update_status('active')\n p.sign_in(response.headers.cookie, session_suffix)\n state['user'] = p\n if request.body.pop('form.repost', None) != 'true':\n response.redirect(redirect_url)\n\n\ndef add_auth_to_response(response, request=None, user=ANON):\n if request is None:\n return # early parsing must've failed\n if request.line.uri.startswith('/assets/'):\n return # assets never get auth headers\n\n if SESSION in request.headers.cookie:\n if not user.ANON:\n user.keep_signed_in(response.headers.cookie)\n", "path": "liberapay/security/authentication.py"}]} | 3,214 | 951 |
gh_patches_debug_7075 | rasdani/github-patches | git_diff | streamlink__streamlink-3619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to use --player-fifo with MPV
Streamlink 2.0.0
mpv 0.33.0-76-g93066ff12f Copyright © 2000-2020 mpv/MPlayer/mplayer2 projects
```
streamlink --player-fifo https://www.twitch.tv/channelName best
[cli][info] Found matching plugin twitch for URL https://www.twitch.tv/channelName
[cli][info] Available streams: audio_only, 160p (worst), 360p, 480p, 720p, 720p60, 1080p60 (best)
[cli][info] Opening stream: 1080p60 (hls)
[cli][info] Creating pipe streamlinkpipe-1140
[cli][info] Starting player: "c:\Programs\MPV\mpv.exe"
error: Failed to start player: "c:\Programs\MPV\mpv.exe" (Process exited prematurely)
[cli][info] Closing currently open stream...
```
Works normally without FIFO
</issue>
<code>
[start of src/streamlink_cli/output.py]
1 import logging
2 import os
3 import re
4 import shlex
5 import subprocess
6 import sys
7 from time import sleep
8
9 from streamlink_cli.compat import is_win32, stdout
10 from streamlink_cli.constants import PLAYER_ARGS_INPUT_DEFAULT, PLAYER_ARGS_INPUT_FALLBACK, SUPPORTED_PLAYERS
11 from streamlink_cli.utils import ignored
12
13 if is_win32:
14 import msvcrt
15
16 log = logging.getLogger("streamlink.cli.output")
17
18
19 class Output:
20 def __init__(self):
21 self.opened = False
22
23 def open(self):
24 self._open()
25 self.opened = True
26
27 def close(self):
28 if self.opened:
29 self._close()
30
31 self.opened = False
32
33 def write(self, data):
34 if not self.opened:
35 raise OSError("Output is not opened")
36
37 return self._write(data)
38
39 def _open(self):
40 pass
41
42 def _close(self):
43 pass
44
45 def _write(self, data):
46 pass
47
48
49 class FileOutput(Output):
50 def __init__(self, filename=None, fd=None, record=None):
51 super().__init__()
52 self.filename = filename
53 self.fd = fd
54 self.record = record
55
56 def _open(self):
57 if self.filename:
58 self.fd = open(self.filename, "wb")
59
60 if self.record:
61 self.record.open()
62
63 if is_win32:
64 msvcrt.setmode(self.fd.fileno(), os.O_BINARY)
65
66 def _close(self):
67 if self.fd is not stdout:
68 self.fd.close()
69 if self.record:
70 self.record.close()
71
72 def _write(self, data):
73 self.fd.write(data)
74 if self.record:
75 self.record.write(data)
76
77
78 class PlayerOutput(Output):
79 PLAYER_TERMINATE_TIMEOUT = 10.0
80
81 _re_player_args_input = re.compile("|".join(map(
82 lambda const: re.escape(f"{{{const}}}"),
83 [PLAYER_ARGS_INPUT_DEFAULT, PLAYER_ARGS_INPUT_FALLBACK]
84 )))
85
86 def __init__(self, cmd, args="", filename=None, quiet=True, kill=True,
87 call=False, http=None, namedpipe=None, record=None, title=None):
88 super().__init__()
89 self.cmd = cmd
90 self.args = args
91 self.kill = kill
92 self.call = call
93 self.quiet = quiet
94
95 self.filename = filename
96 self.namedpipe = namedpipe
97 self.http = http
98 self.title = title
99 self.player = None
100 self.player_name = self.supported_player(self.cmd)
101 self.record = record
102
103 if self.namedpipe or self.filename or self.http:
104 self.stdin = sys.stdin
105 else:
106 self.stdin = subprocess.PIPE
107
108 if self.quiet:
109 self.stdout = open(os.devnull, "w")
110 self.stderr = open(os.devnull, "w")
111 else:
112 self.stdout = sys.stdout
113 self.stderr = sys.stderr
114
115 if not self._re_player_args_input.search(self.args):
116 self.args += f"{' ' if self.args else ''}{{{PLAYER_ARGS_INPUT_DEFAULT}}}"
117
118 @property
119 def running(self):
120 sleep(0.5)
121 return self.player.poll() is None
122
123 @classmethod
124 def supported_player(cls, cmd):
125 """
126 Check if the current player supports adding a title
127
128 :param cmd: command to test
129 :return: name of the player|None
130 """
131 if not is_win32:
132 # under a POSIX system use shlex to find the actual command
133 # under windows this is not an issue because executables end in .exe
134 cmd = shlex.split(cmd)[0]
135
136 cmd = os.path.basename(cmd.lower())
137 for player, possiblecmds in SUPPORTED_PLAYERS.items():
138 for possiblecmd in possiblecmds:
139 if cmd.startswith(possiblecmd):
140 return player
141
142 @classmethod
143 def _mpv_title_escape(cls, title_string):
144 # mpv has a "disable property-expansion" token which must be handled
145 # in order to accurately represent $$ in title
146 if r'\$>' in title_string:
147 processed_title = ""
148 double_dollars = True
149 i = dollars = 0
150 while i < len(title_string):
151 if double_dollars:
152 if title_string[i] == "\\":
153 if title_string[i + 1] == "$":
154 processed_title += "$"
155 dollars += 1
156 i += 1
157 if title_string[i + 1] == ">" and dollars % 2 == 1:
158 double_dollars = False
159 processed_title += ">"
160 i += 1
161 else:
162 processed_title += "\\"
163 elif title_string[i] == "$":
164 processed_title += "$$"
165 else:
166 dollars = 0
167 processed_title += title_string[i]
168 else:
169 if title_string[i:i + 2] == "\\$":
170 processed_title += "$"
171 i += 1
172 else:
173 processed_title += title_string[i]
174 i += 1
175 return processed_title
176 else:
177 # not possible for property-expansion to be disabled, happy days
178 return title_string.replace("$", "$$").replace(r'\$$', "$")
179
180 def _create_arguments(self):
181 if self.namedpipe:
182 filename = self.namedpipe.path
183 elif self.filename:
184 filename = self.filename
185 elif self.http:
186 filename = self.http.url
187 else:
188 filename = "-"
189 extra_args = []
190
191 if self.title is not None:
192 # vlc
193 if self.player_name == "vlc":
194 # see https://wiki.videolan.org/Documentation:Format_String/, allow escaping with \$
195 self.title = self.title.replace("$", "$$").replace(r'\$$', "$")
196 extra_args.extend(["--input-title-format", self.title])
197
198 # mpv
199 if self.player_name == "mpv":
200 # see https://mpv.io/manual/stable/#property-expansion, allow escaping with \$, respect mpv's $>
201 self.title = self._mpv_title_escape(self.title)
202 extra_args.append(f"--force-media-title={self.title}")
203
204 # potplayer
205 if self.player_name == "potplayer":
206 if filename != "-":
207 # PotPlayer - About - Command Line
208 # You can specify titles for URLs by separating them with a backslash (\) at the end of URLs.
209 # eg. "http://...\title of this url"
210 self.title = self.title.replace('"', '')
211 filename = filename[:-1] + '\\' + self.title + filename[-1]
212
213 args = self.args.format(**{PLAYER_ARGS_INPUT_DEFAULT: filename, PLAYER_ARGS_INPUT_FALLBACK: filename})
214 cmd = self.cmd
215
216 # player command
217 if is_win32:
218 eargs = subprocess.list2cmdline(extra_args)
219 # do not insert and extra " " when there are no extra_args
220 return " ".join([cmd] + ([eargs] if eargs else []) + [args])
221 return shlex.split(cmd) + extra_args + shlex.split(args)
222
223 def _open(self):
224 try:
225 if self.record:
226 self.record.open()
227 if self.call and self.filename:
228 self._open_call()
229 else:
230 self._open_subprocess()
231 finally:
232 if self.quiet:
233 # Output streams no longer needed in parent process
234 self.stdout.close()
235 self.stderr.close()
236
237 def _open_call(self):
238 args = self._create_arguments()
239 if is_win32:
240 fargs = args
241 else:
242 fargs = subprocess.list2cmdline(args)
243 log.debug(f"Calling: {fargs}")
244
245 subprocess.call(args,
246 stdout=self.stdout,
247 stderr=self.stderr)
248
249 def _open_subprocess(self):
250 # Force bufsize=0 on all Python versions to avoid writing the
251 # unflushed buffer when closing a broken input pipe
252 args = self._create_arguments()
253 if is_win32:
254 fargs = args
255 else:
256 fargs = subprocess.list2cmdline(args)
257 log.debug(f"Opening subprocess: {fargs}")
258
259 self.player = subprocess.Popen(args,
260 stdin=self.stdin, bufsize=0,
261 stdout=self.stdout,
262 stderr=self.stderr)
263 # Wait 0.5 seconds to see if program exited prematurely
264 if not self.running:
265 raise OSError("Process exited prematurely")
266
267 if self.namedpipe:
268 self.namedpipe.open("wb")
269 elif self.http:
270 self.http.open()
271
272 def _close(self):
273 # Close input to the player first to signal the end of the
274 # stream and allow the player to terminate of its own accord
275 if self.namedpipe:
276 self.namedpipe.close()
277 elif self.http:
278 self.http.close()
279 elif not self.filename:
280 self.player.stdin.close()
281
282 if self.record:
283 self.record.close()
284
285 if self.kill:
286 with ignored(Exception):
287 self.player.terminate()
288 if not is_win32:
289 t, timeout = 0.0, self.PLAYER_TERMINATE_TIMEOUT
290 while self.player.poll() is None and t < timeout:
291 sleep(0.5)
292 t += 0.5
293
294 if not self.player.returncode:
295 self.player.kill()
296 self.player.wait()
297
298 def _write(self, data):
299 if self.record:
300 self.record.write(data)
301
302 if self.namedpipe:
303 self.namedpipe.write(data)
304 elif self.http:
305 self.http.write(data)
306 else:
307 self.player.stdin.write(data)
308
309
310 __all__ = ["PlayerOutput", "FileOutput"]
311
[end of src/streamlink_cli/output.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink_cli/output.py b/src/streamlink_cli/output.py
--- a/src/streamlink_cli/output.py
+++ b/src/streamlink_cli/output.py
@@ -180,6 +180,11 @@
def _create_arguments(self):
if self.namedpipe:
filename = self.namedpipe.path
+ if is_win32:
+ if self.player_name == "vlc":
+ filename = f"stream://\\{filename}"
+ elif self.player_name == "mpv":
+ filename = f"file://{filename}"
elif self.filename:
filename = self.filename
elif self.http:
| {"golden_diff": "diff --git a/src/streamlink_cli/output.py b/src/streamlink_cli/output.py\n--- a/src/streamlink_cli/output.py\n+++ b/src/streamlink_cli/output.py\n@@ -180,6 +180,11 @@\n def _create_arguments(self):\n if self.namedpipe:\n filename = self.namedpipe.path\n+ if is_win32:\n+ if self.player_name == \"vlc\":\n+ filename = f\"stream://\\\\{filename}\"\n+ elif self.player_name == \"mpv\":\n+ filename = f\"file://{filename}\"\n elif self.filename:\n filename = self.filename\n elif self.http:\n", "issue": "Unable to use --player-fifo with MPV\nStreamlink 2.0.0\r\nmpv 0.33.0-76-g93066ff12f Copyright \u00a9 2000-2020 mpv/MPlayer/mplayer2 projects\r\n\r\n```\r\nstreamlink --player-fifo https://www.twitch.tv/channelName best\r\n[cli][info] Found matching plugin twitch for URL https://www.twitch.tv/channelName \r\n[cli][info] Available streams: audio_only, 160p (worst), 360p, 480p, 720p, 720p60, 1080p60 (best)\r\n[cli][info] Opening stream: 1080p60 (hls)\r\n[cli][info] Creating pipe streamlinkpipe-1140\r\n[cli][info] Starting player: \"c:\\Programs\\MPV\\mpv.exe\"\r\nerror: Failed to start player: \"c:\\Programs\\MPV\\mpv.exe\" (Process exited prematurely)\r\n[cli][info] Closing currently open stream...\r\n```\r\n\r\nWorks normally without FIFO\n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nfrom time import sleep\n\nfrom streamlink_cli.compat import is_win32, stdout\nfrom streamlink_cli.constants import PLAYER_ARGS_INPUT_DEFAULT, PLAYER_ARGS_INPUT_FALLBACK, SUPPORTED_PLAYERS\nfrom streamlink_cli.utils import ignored\n\nif is_win32:\n import msvcrt\n\nlog = logging.getLogger(\"streamlink.cli.output\")\n\n\nclass Output:\n def __init__(self):\n self.opened = False\n\n def open(self):\n self._open()\n self.opened = True\n\n def close(self):\n if self.opened:\n self._close()\n\n self.opened = False\n\n def write(self, data):\n if not self.opened:\n raise OSError(\"Output is not opened\")\n\n return self._write(data)\n\n def _open(self):\n pass\n\n def _close(self):\n pass\n\n def _write(self, data):\n pass\n\n\nclass FileOutput(Output):\n def __init__(self, filename=None, fd=None, record=None):\n super().__init__()\n self.filename = filename\n self.fd = fd\n self.record = record\n\n def _open(self):\n if self.filename:\n self.fd = open(self.filename, \"wb\")\n\n if self.record:\n self.record.open()\n\n if is_win32:\n msvcrt.setmode(self.fd.fileno(), os.O_BINARY)\n\n def _close(self):\n if self.fd is not stdout:\n self.fd.close()\n if self.record:\n self.record.close()\n\n def _write(self, data):\n self.fd.write(data)\n if self.record:\n self.record.write(data)\n\n\nclass PlayerOutput(Output):\n PLAYER_TERMINATE_TIMEOUT = 10.0\n\n _re_player_args_input = re.compile(\"|\".join(map(\n lambda const: re.escape(f\"{{{const}}}\"),\n [PLAYER_ARGS_INPUT_DEFAULT, PLAYER_ARGS_INPUT_FALLBACK]\n )))\n\n def __init__(self, cmd, args=\"\", filename=None, quiet=True, kill=True,\n call=False, http=None, namedpipe=None, record=None, title=None):\n super().__init__()\n self.cmd = cmd\n self.args = args\n self.kill = kill\n self.call = call\n self.quiet = quiet\n\n self.filename = filename\n self.namedpipe = namedpipe\n self.http = http\n self.title = title\n self.player = None\n self.player_name = self.supported_player(self.cmd)\n self.record = record\n\n if self.namedpipe or self.filename or self.http:\n self.stdin = sys.stdin\n else:\n self.stdin = subprocess.PIPE\n\n if self.quiet:\n self.stdout = open(os.devnull, \"w\")\n self.stderr = open(os.devnull, \"w\")\n else:\n self.stdout = sys.stdout\n self.stderr = sys.stderr\n\n if not self._re_player_args_input.search(self.args):\n self.args += f\"{' ' if self.args else ''}{{{PLAYER_ARGS_INPUT_DEFAULT}}}\"\n\n @property\n def running(self):\n sleep(0.5)\n return self.player.poll() is None\n\n @classmethod\n def supported_player(cls, cmd):\n \"\"\"\n Check if the current player supports adding a title\n\n :param cmd: command to test\n :return: name of the player|None\n \"\"\"\n if not is_win32:\n # under a POSIX system use shlex to find the actual command\n # under windows this is not an issue because executables end in .exe\n cmd = shlex.split(cmd)[0]\n\n cmd = os.path.basename(cmd.lower())\n for player, possiblecmds in SUPPORTED_PLAYERS.items():\n for possiblecmd in possiblecmds:\n if cmd.startswith(possiblecmd):\n return player\n\n @classmethod\n def _mpv_title_escape(cls, title_string):\n # mpv has a \"disable property-expansion\" token which must be handled\n # in order to accurately represent $$ in title\n if r'\\$>' in title_string:\n processed_title = \"\"\n double_dollars = True\n i = dollars = 0\n while i < len(title_string):\n if double_dollars:\n if title_string[i] == \"\\\\\":\n if title_string[i + 1] == \"$\":\n processed_title += \"$\"\n dollars += 1\n i += 1\n if title_string[i + 1] == \">\" and dollars % 2 == 1:\n double_dollars = False\n processed_title += \">\"\n i += 1\n else:\n processed_title += \"\\\\\"\n elif title_string[i] == \"$\":\n processed_title += \"$$\"\n else:\n dollars = 0\n processed_title += title_string[i]\n else:\n if title_string[i:i + 2] == \"\\\\$\":\n processed_title += \"$\"\n i += 1\n else:\n processed_title += title_string[i]\n i += 1\n return processed_title\n else:\n # not possible for property-expansion to be disabled, happy days\n return title_string.replace(\"$\", \"$$\").replace(r'\\$$', \"$\")\n\n def _create_arguments(self):\n if self.namedpipe:\n filename = self.namedpipe.path\n elif self.filename:\n filename = self.filename\n elif self.http:\n filename = self.http.url\n else:\n filename = \"-\"\n extra_args = []\n\n if self.title is not None:\n # vlc\n if self.player_name == \"vlc\":\n # see https://wiki.videolan.org/Documentation:Format_String/, allow escaping with \\$\n self.title = self.title.replace(\"$\", \"$$\").replace(r'\\$$', \"$\")\n extra_args.extend([\"--input-title-format\", self.title])\n\n # mpv\n if self.player_name == \"mpv\":\n # see https://mpv.io/manual/stable/#property-expansion, allow escaping with \\$, respect mpv's $>\n self.title = self._mpv_title_escape(self.title)\n extra_args.append(f\"--force-media-title={self.title}\")\n\n # potplayer\n if self.player_name == \"potplayer\":\n if filename != \"-\":\n # PotPlayer - About - Command Line\n # You can specify titles for URLs by separating them with a backslash (\\) at the end of URLs.\n # eg. \"http://...\\title of this url\"\n self.title = self.title.replace('\"', '')\n filename = filename[:-1] + '\\\\' + self.title + filename[-1]\n\n args = self.args.format(**{PLAYER_ARGS_INPUT_DEFAULT: filename, PLAYER_ARGS_INPUT_FALLBACK: filename})\n cmd = self.cmd\n\n # player command\n if is_win32:\n eargs = subprocess.list2cmdline(extra_args)\n # do not insert and extra \" \" when there are no extra_args\n return \" \".join([cmd] + ([eargs] if eargs else []) + [args])\n return shlex.split(cmd) + extra_args + shlex.split(args)\n\n def _open(self):\n try:\n if self.record:\n self.record.open()\n if self.call and self.filename:\n self._open_call()\n else:\n self._open_subprocess()\n finally:\n if self.quiet:\n # Output streams no longer needed in parent process\n self.stdout.close()\n self.stderr.close()\n\n def _open_call(self):\n args = self._create_arguments()\n if is_win32:\n fargs = args\n else:\n fargs = subprocess.list2cmdline(args)\n log.debug(f\"Calling: {fargs}\")\n\n subprocess.call(args,\n stdout=self.stdout,\n stderr=self.stderr)\n\n def _open_subprocess(self):\n # Force bufsize=0 on all Python versions to avoid writing the\n # unflushed buffer when closing a broken input pipe\n args = self._create_arguments()\n if is_win32:\n fargs = args\n else:\n fargs = subprocess.list2cmdline(args)\n log.debug(f\"Opening subprocess: {fargs}\")\n\n self.player = subprocess.Popen(args,\n stdin=self.stdin, bufsize=0,\n stdout=self.stdout,\n stderr=self.stderr)\n # Wait 0.5 seconds to see if program exited prematurely\n if not self.running:\n raise OSError(\"Process exited prematurely\")\n\n if self.namedpipe:\n self.namedpipe.open(\"wb\")\n elif self.http:\n self.http.open()\n\n def _close(self):\n # Close input to the player first to signal the end of the\n # stream and allow the player to terminate of its own accord\n if self.namedpipe:\n self.namedpipe.close()\n elif self.http:\n self.http.close()\n elif not self.filename:\n self.player.stdin.close()\n\n if self.record:\n self.record.close()\n\n if self.kill:\n with ignored(Exception):\n self.player.terminate()\n if not is_win32:\n t, timeout = 0.0, self.PLAYER_TERMINATE_TIMEOUT\n while self.player.poll() is None and t < timeout:\n sleep(0.5)\n t += 0.5\n\n if not self.player.returncode:\n self.player.kill()\n self.player.wait()\n\n def _write(self, data):\n if self.record:\n self.record.write(data)\n\n if self.namedpipe:\n self.namedpipe.write(data)\n elif self.http:\n self.http.write(data)\n else:\n self.player.stdin.write(data)\n\n\n__all__ = [\"PlayerOutput\", \"FileOutput\"]\n", "path": "src/streamlink_cli/output.py"}]} | 3,715 | 139 |
gh_patches_debug_17762 | rasdani/github-patches | git_diff | pytorch__TensorRT-2505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`aten.arange.start_step`
</issue>
<code>
[start of py/torch_tensorrt/dynamo/conversion/ops_evaluators.py]
1 import logging
2 import operator
3 from typing import Dict, Sequence, Tuple, Union
4
5 import torch
6 from torch.fx.node import Argument, Node, Target
7 from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
8 from torch_tensorrt.dynamo.conversion._ConverterRegistry import (
9 ConverterRegistry,
10 dynamo_tensorrt_converter,
11 )
12 from torch_tensorrt.fx.types import TRTTensor
13
14 _LOGGER: logging.Logger = logging.getLogger(__name__)
15
16
17 def getitem_validator(getitem_node: Node) -> bool:
18 from torch_tensorrt.dynamo.conversion._ConverterRegistry import DYNAMO_CONVERTERS
19
20 # Getitem nodes can only be converted if their parent node also can
21 return getitem_node.args[0] in DYNAMO_CONVERTERS
22
23
24 # TODO: Subsequent evaluators should be registered here with their own validators
25 @dynamo_tensorrt_converter(operator.getitem, capability_validator=getitem_validator)
26 @dynamo_tensorrt_converter(torch.ops.aten.detach.default)
27 def generic_evaluator(
28 ctx: ConversionContext,
29 target: Target,
30 args: Tuple[Argument, ...],
31 kwargs: Dict[str, Argument],
32 name: str,
33 ) -> Union[TRTTensor, Sequence[TRTTensor]]:
34 _LOGGER.debug(
35 f"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}"
36 )
37 return target(*args)
38
[end of py/torch_tensorrt/dynamo/conversion/ops_evaluators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py
--- a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py
+++ b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py
@@ -2,6 +2,7 @@
import operator
from typing import Dict, Sequence, Tuple, Union
+import numpy as np
import torch
from torch.fx.node import Argument, Node, Target
from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
@@ -35,3 +36,14 @@
f"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}"
)
return target(*args)
+
+
+@dynamo_tensorrt_converter(torch.ops.aten.arange.start_step)
+def aten_ops_arange_start_step(
+ ctx: ConversionContext,
+ target: Target,
+ args: Tuple[Argument, ...],
+ kwargs: Dict[str, Argument],
+ name: str,
+) -> Union[TRTTensor, Sequence[TRTTensor]]:
+ return np.arange(*args)
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py\n--- a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py\n+++ b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py\n@@ -2,6 +2,7 @@\n import operator\n from typing import Dict, Sequence, Tuple, Union\n \n+import numpy as np\n import torch\n from torch.fx.node import Argument, Node, Target\n from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext\n@@ -35,3 +36,14 @@\n f\"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}\"\n )\n return target(*args)\n+\n+\n+@dynamo_tensorrt_converter(torch.ops.aten.arange.start_step)\n+def aten_ops_arange_start_step(\n+ ctx: ConversionContext,\n+ target: Target,\n+ args: Tuple[Argument, ...],\n+ kwargs: Dict[str, Argument],\n+ name: str,\n+) -> Union[TRTTensor, Sequence[TRTTensor]]:\n+ return np.arange(*args)\n", "issue": "`aten.arange.start_step`\n\n", "before_files": [{"content": "import logging\nimport operator\nfrom typing import Dict, Sequence, Tuple, Union\n\nimport torch\nfrom torch.fx.node import Argument, Node, Target\nfrom torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext\nfrom torch_tensorrt.dynamo.conversion._ConverterRegistry import (\n ConverterRegistry,\n dynamo_tensorrt_converter,\n)\nfrom torch_tensorrt.fx.types import TRTTensor\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\ndef getitem_validator(getitem_node: Node) -> bool:\n from torch_tensorrt.dynamo.conversion._ConverterRegistry import DYNAMO_CONVERTERS\n\n # Getitem nodes can only be converted if their parent node also can\n return getitem_node.args[0] in DYNAMO_CONVERTERS\n\n\n# TODO: Subsequent evaluators should be registered here with their own validators\n@dynamo_tensorrt_converter(operator.getitem, capability_validator=getitem_validator)\n@dynamo_tensorrt_converter(torch.ops.aten.detach.default)\ndef generic_evaluator(\n ctx: ConversionContext,\n target: Target,\n args: Tuple[Argument, ...],\n kwargs: Dict[str, Argument],\n name: str,\n) -> Union[TRTTensor, Sequence[TRTTensor]]:\n _LOGGER.debug(\n f\"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}\"\n )\n return target(*args)\n", "path": "py/torch_tensorrt/dynamo/conversion/ops_evaluators.py"}]} | 926 | 262 |
gh_patches_debug_39746 | rasdani/github-patches | git_diff | unionai-oss__pandera-960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mypy: pa.typing.Series does not support pd.DatetimeTZDtype as a type argument
**Describe the bug**
Under mypy, `pa.typing.Series` does not support `pd.DatetimeTZDtype` as a valid type argument.
Bug occurs with and without `pandera.mypy` plugin
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
- [ ] (optional) I have confirmed this bug exists on the master branch of pandera.
#### Code Sample, a copy-pastable example
```python
from typing import Annotated
import pandas as pd
import pandera as pa
from pandera.typing import Series
class MySchema(pa.SchemaModel):
# both lines taken from the pandera documentation
a: Series[Annotated[pd.DatetimeTZDtype, "ns", "UTC"]]
b: Series[pd.DatetimeTZDtype] = pa.Field(dtype_kwargs={"unit": "ns", "tz": "UTC"})
```
#### Expected behavior
mypy should show no errors
#### Actual behaviour
mypy produces the following errors
```
test.py:8: error: Value of type variable "GenericDtype" of "Series" cannot be "DatetimeTZDtype"
test.py:9: error: Value of type variable "GenericDtype" of "Series" cannot be "DatetimeTZDtype"
```
#### Versions
```
pandas==1.5.0
pandas-stubs==1.4.3.220807
pandera==0.13.3
mypy==0.982
mypy-extensions==0.4.3
```
</issue>
<code>
[start of pandera/typing/common.py]
1 """Common typing functionality."""
2 # pylint:disable=abstract-method,disable=too-many-ancestors
3
4 import inspect
5 from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar
6
7 import pandas as pd
8 import typing_inspect
9
10 from .. import dtypes
11 from ..engines import numpy_engine, pandas_engine
12
13 Bool = dtypes.Bool #: ``"bool"`` numpy dtype
14 Date = dtypes.Date #: ``datetime.date`` object dtype
15 DateTime = dtypes.DateTime #: ``"datetime64[ns]"`` numpy dtype
16 Decimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype
17 Timedelta = dtypes.Timedelta #: ``"timedelta64[ns]"`` numpy dtype
18 Category = dtypes.Category #: pandas ``"categorical"`` datatype
19 Float = dtypes.Float #: ``"float"`` numpy dtype
20 Float16 = dtypes.Float16 #: ``"float16"`` numpy dtype
21 Float32 = dtypes.Float32 #: ``"float32"`` numpy dtype
22 Float64 = dtypes.Float64 #: ``"float64"`` numpy dtype
23 Int = dtypes.Int #: ``"int"`` numpy dtype
24 Int8 = dtypes.Int8 #: ``"int8"`` numpy dtype
25 Int16 = dtypes.Int16 #: ``"int16"`` numpy dtype
26 Int32 = dtypes.Int32 #: ``"int32"`` numpy dtype
27 Int64 = dtypes.Int64 #: ``"int64"`` numpy dtype
28 UInt8 = dtypes.UInt8 #: ``"uint8"`` numpy dtype
29 UInt16 = dtypes.UInt16 #: ``"uint16"`` numpy dtype
30 UInt32 = dtypes.UInt32 #: ``"uint32"`` numpy dtype
31 UInt64 = dtypes.UInt64 #: ``"uint64"`` numpy dtype
32 INT8 = pandas_engine.INT8 #: ``"Int8"`` pandas dtype:: pandas 0.24.0+
33 INT16 = pandas_engine.INT16 #: ``"Int16"`` pandas dtype: pandas 0.24.0+
34 INT32 = pandas_engine.INT32 #: ``"Int32"`` pandas dtype: pandas 0.24.0+
35 INT64 = pandas_engine.INT64 #: ``"Int64"`` pandas dtype: pandas 0.24.0+
36 UINT8 = pandas_engine.UINT8 #: ``"UInt8"`` pandas dtype:: pandas 0.24.0+
37 UINT16 = pandas_engine.UINT16 #: ``"UInt16"`` pandas dtype: pandas 0.24.0+
38 UINT32 = pandas_engine.UINT32 #: ``"UInt32"`` pandas dtype: pandas 0.24.0+
39 UINT64 = pandas_engine.UINT64 #: ``"UInt64"`` pandas dtype: pandas 0.24.0+
40 Object = numpy_engine.Object #: ``"object"`` numpy dtype
41 String = dtypes.String #: ``"str"`` numpy dtype
42 #: ``"string"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will
43 #: fall back on the str-as-object-array representation.
44 STRING = pandas_engine.STRING #: ``"str"`` numpy dtype
45 BOOL = pandas_engine.BOOL #: ``"str"`` numpy dtype
46
47 try:
48 Geometry = pandas_engine.Geometry # : ``"geometry"`` geopandas dtype
49 GEOPANDAS_INSTALLED = True
50 except AttributeError:
51 GEOPANDAS_INSTALLED = False
52
53 if GEOPANDAS_INSTALLED:
54 GenericDtype = TypeVar( # type: ignore
55 "GenericDtype",
56 bool,
57 int,
58 str,
59 float,
60 pd.core.dtypes.base.ExtensionDtype,
61 Bool,
62 Date,
63 DateTime,
64 Decimal,
65 Timedelta,
66 Category,
67 Float,
68 Float16,
69 Float32,
70 Float64,
71 Int,
72 Int8,
73 Int16,
74 Int32,
75 Int64,
76 UInt8,
77 UInt16,
78 UInt32,
79 UInt64,
80 INT8,
81 INT16,
82 INT32,
83 INT64,
84 UINT8,
85 UINT16,
86 UINT32,
87 UINT64,
88 Object,
89 String,
90 STRING,
91 Geometry,
92 covariant=True,
93 )
94 else:
95 GenericDtype = TypeVar( # type: ignore
96 "GenericDtype",
97 bool,
98 int,
99 str,
100 float,
101 pd.core.dtypes.base.ExtensionDtype,
102 Bool,
103 Date,
104 DateTime,
105 Decimal,
106 Timedelta,
107 Category,
108 Float,
109 Float16,
110 Float32,
111 Float64,
112 Int,
113 Int8,
114 Int16,
115 Int32,
116 Int64,
117 UInt8,
118 UInt16,
119 UInt32,
120 UInt64,
121 INT8,
122 INT16,
123 INT32,
124 INT64,
125 UINT8,
126 UINT16,
127 UINT32,
128 UINT64,
129 Object,
130 String,
131 STRING,
132 covariant=True,
133 )
134
135 Schema = TypeVar("Schema", bound="SchemaModel") # type: ignore
136
137
138 # pylint:disable=invalid-name
139 if TYPE_CHECKING:
140 T = TypeVar("T") # pragma: no cover
141 else:
142 T = Schema
143
144
145 class DataFrameBase(Generic[T]):
146 # pylint: disable=too-few-public-methods
147 """
148 Pandera Dataframe base class for validating dataframes on
149 initialization.
150 """
151
152 default_dtype: Optional[Type] = None
153
154 def __setattr__(self, name: str, value: Any) -> None:
155 # pylint: disable=no-member
156 object.__setattr__(self, name, value)
157 if name == "__orig_class__":
158 orig_class = getattr(self, "__orig_class__")
159 class_args = getattr(orig_class, "__args__", None)
160 if class_args is not None and any(
161 x.__name__ == "SchemaModel"
162 for x in inspect.getmro(class_args[0])
163 ):
164 schema_model = value.__args__[0]
165
166 # prevent the double validation problem by preventing checks for
167 # dataframes with a defined pandera.schema
168 pandera_accessor = getattr(self, "pandera")
169 if (
170 pandera_accessor.schema is None
171 or pandera_accessor.schema != schema_model.to_schema()
172 ):
173 pandera_accessor.add_schema(schema_model.to_schema())
174 self.__dict__ = schema_model.validate(self).__dict__
175
176
177 # pylint:disable=too-few-public-methods
178 class SeriesBase(Generic[GenericDtype]):
179 """Pandera Series base class to use for all pandas-like APIs."""
180
181 default_dtype: Optional[Type] = None
182
183 def __get__(
184 self, instance: object, owner: Type
185 ) -> str: # pragma: no cover
186 raise AttributeError("Series should resolve to Field-s")
187
188
189 # pylint:disable=too-few-public-methods
190 class IndexBase(Generic[GenericDtype]):
191 """Representation of pandas.Index, only used for type annotation.
192
193 *new in 0.5.0*
194 """
195
196 default_dtype: Optional[Type] = None
197
198 def __get__(
199 self, instance: object, owner: Type
200 ) -> str: # pragma: no cover
201 raise AttributeError("Indexes should resolve to pa.Index-s")
202
203
204 class AnnotationInfo: # pylint:disable=too-few-public-methods
205 """Captures extra information about an annotation.
206
207 Attributes:
208 origin: The non-parameterized generic class.
209 arg: The first generic type (SchemaModel does not support more than
210 1 argument).
211 literal: Whether the annotation is a literal.
212 optional: Whether the annotation is optional.
213 raw_annotation: The raw annotation.
214 metadata: Extra arguments passed to :data:`typing.Annotated`.
215 """
216
217 def __init__(self, raw_annotation: Type) -> None:
218 self._parse_annotation(raw_annotation)
219
220 @property
221 def is_generic_df(self) -> bool:
222 """True if the annotation is a DataFrameBase subclass."""
223 try:
224 if self.origin is None:
225 return False
226 return issubclass(self.origin, DataFrameBase)
227 except TypeError:
228 return False
229
230 def _parse_annotation(self, raw_annotation: Type) -> None:
231 """Parse key information from annotation.
232
233 :param annotation: A subscripted type.
234 :returns: Annotation
235 """
236 self.raw_annotation = raw_annotation
237 self.origin = self.arg = None
238
239 self.optional = typing_inspect.is_optional_type(raw_annotation)
240 if self.optional and typing_inspect.is_union_type(raw_annotation):
241 # Annotated with Optional or Union[..., NoneType]
242 # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)
243 raw_annotation = typing_inspect.get_args(raw_annotation)[0]
244
245 self.origin = typing_inspect.get_origin(raw_annotation)
246 # Replace empty tuple returned from get_args by None
247 args = typing_inspect.get_args(raw_annotation) or None
248 self.arg = args[0] if args else args
249
250 self.metadata = getattr(self.arg, "__metadata__", None)
251 if self.metadata:
252 self.arg = typing_inspect.get_args(self.arg)[0]
253
254 self.literal = typing_inspect.is_literal_type(self.arg)
255 if self.literal:
256 self.arg = typing_inspect.get_args(self.arg)[0]
257
258 self.default_dtype = getattr(raw_annotation, "default_dtype", None)
259
[end of pandera/typing/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandera/typing/common.py b/pandera/typing/common.py
--- a/pandera/typing/common.py
+++ b/pandera/typing/common.py
@@ -2,7 +2,7 @@
# pylint:disable=abstract-method,disable=too-many-ancestors
import inspect
-from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar
+from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, Union
import pandas as pd
import typing_inspect
@@ -53,83 +53,85 @@
if GEOPANDAS_INSTALLED:
GenericDtype = TypeVar( # type: ignore
"GenericDtype",
- bool,
- int,
- str,
- float,
- pd.core.dtypes.base.ExtensionDtype,
- Bool,
- Date,
- DateTime,
- Decimal,
- Timedelta,
- Category,
- Float,
- Float16,
- Float32,
- Float64,
- Int,
- Int8,
- Int16,
- Int32,
- Int64,
- UInt8,
- UInt16,
- UInt32,
- UInt64,
- INT8,
- INT16,
- INT32,
- INT64,
- UINT8,
- UINT16,
- UINT32,
- UINT64,
- Object,
- String,
- STRING,
- Geometry,
- covariant=True,
+ bound=Union[
+ bool,
+ int,
+ str,
+ float,
+ pd.core.dtypes.base.ExtensionDtype,
+ Bool,
+ Date,
+ DateTime,
+ Decimal,
+ Timedelta,
+ Category,
+ Float,
+ Float16,
+ Float32,
+ Float64,
+ Int,
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ UInt8,
+ UInt16,
+ UInt32,
+ UInt64,
+ INT8,
+ INT16,
+ INT32,
+ INT64,
+ UINT8,
+ UINT16,
+ UINT32,
+ UINT64,
+ Object,
+ String,
+ STRING,
+ Geometry,
+ ],
)
else:
GenericDtype = TypeVar( # type: ignore
"GenericDtype",
- bool,
- int,
- str,
- float,
- pd.core.dtypes.base.ExtensionDtype,
- Bool,
- Date,
- DateTime,
- Decimal,
- Timedelta,
- Category,
- Float,
- Float16,
- Float32,
- Float64,
- Int,
- Int8,
- Int16,
- Int32,
- Int64,
- UInt8,
- UInt16,
- UInt32,
- UInt64,
- INT8,
- INT16,
- INT32,
- INT64,
- UINT8,
- UINT16,
- UINT32,
- UINT64,
- Object,
- String,
- STRING,
- covariant=True,
+ bound=Union[
+ bool,
+ int,
+ str,
+ float,
+ pd.core.dtypes.base.ExtensionDtype,
+ Bool,
+ Date,
+ DateTime,
+ Decimal,
+ Timedelta,
+ Category,
+ Float,
+ Float16,
+ Float32,
+ Float64,
+ Int,
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ UInt8,
+ UInt16,
+ UInt32,
+ UInt64,
+ INT8,
+ INT16,
+ INT32,
+ INT64,
+ UINT8,
+ UINT16,
+ UINT32,
+ UINT64,
+ Object,
+ String,
+ STRING,
+ ],
)
Schema = TypeVar("Schema", bound="SchemaModel") # type: ignore
| {"golden_diff": "diff --git a/pandera/typing/common.py b/pandera/typing/common.py\n--- a/pandera/typing/common.py\n+++ b/pandera/typing/common.py\n@@ -2,7 +2,7 @@\n # pylint:disable=abstract-method,disable=too-many-ancestors\n \n import inspect\n-from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar\n+from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, Union\n \n import pandas as pd\n import typing_inspect\n@@ -53,83 +53,85 @@\n if GEOPANDAS_INSTALLED:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n- bool,\n- int,\n- str,\n- float,\n- pd.core.dtypes.base.ExtensionDtype,\n- Bool,\n- Date,\n- DateTime,\n- Decimal,\n- Timedelta,\n- Category,\n- Float,\n- Float16,\n- Float32,\n- Float64,\n- Int,\n- Int8,\n- Int16,\n- Int32,\n- Int64,\n- UInt8,\n- UInt16,\n- UInt32,\n- UInt64,\n- INT8,\n- INT16,\n- INT32,\n- INT64,\n- UINT8,\n- UINT16,\n- UINT32,\n- UINT64,\n- Object,\n- String,\n- STRING,\n- Geometry,\n- covariant=True,\n+ bound=Union[\n+ bool,\n+ int,\n+ str,\n+ float,\n+ pd.core.dtypes.base.ExtensionDtype,\n+ Bool,\n+ Date,\n+ DateTime,\n+ Decimal,\n+ Timedelta,\n+ Category,\n+ Float,\n+ Float16,\n+ Float32,\n+ Float64,\n+ Int,\n+ Int8,\n+ Int16,\n+ Int32,\n+ Int64,\n+ UInt8,\n+ UInt16,\n+ UInt32,\n+ UInt64,\n+ INT8,\n+ INT16,\n+ INT32,\n+ INT64,\n+ UINT8,\n+ UINT16,\n+ UINT32,\n+ UINT64,\n+ Object,\n+ String,\n+ STRING,\n+ Geometry,\n+ ],\n )\n else:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n- bool,\n- int,\n- str,\n- float,\n- pd.core.dtypes.base.ExtensionDtype,\n- Bool,\n- Date,\n- DateTime,\n- Decimal,\n- Timedelta,\n- Category,\n- Float,\n- Float16,\n- Float32,\n- Float64,\n- Int,\n- Int8,\n- Int16,\n- Int32,\n- Int64,\n- UInt8,\n- UInt16,\n- UInt32,\n- UInt64,\n- INT8,\n- INT16,\n- INT32,\n- INT64,\n- UINT8,\n- UINT16,\n- UINT32,\n- UINT64,\n- Object,\n- String,\n- STRING,\n- covariant=True,\n+ bound=Union[\n+ bool,\n+ int,\n+ str,\n+ float,\n+ pd.core.dtypes.base.ExtensionDtype,\n+ Bool,\n+ Date,\n+ DateTime,\n+ Decimal,\n+ Timedelta,\n+ Category,\n+ Float,\n+ Float16,\n+ Float32,\n+ Float64,\n+ Int,\n+ Int8,\n+ Int16,\n+ Int32,\n+ Int64,\n+ UInt8,\n+ UInt16,\n+ UInt32,\n+ UInt64,\n+ INT8,\n+ INT16,\n+ INT32,\n+ INT64,\n+ UINT8,\n+ UINT16,\n+ UINT32,\n+ UINT64,\n+ Object,\n+ String,\n+ STRING,\n+ ],\n )\n \n Schema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n", "issue": "mypy: pa.typing.Series does not support pd.DatetimeTZDtype as a type argument\n**Describe the bug**\r\n\r\nUnder mypy, `pa.typing.Series` does not support `pd.DatetimeTZDtype` as a valid type argument.\r\n\r\nBug occurs with and without `pandera.mypy` plugin\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n- [ ] (optional) I have confirmed this bug exists on the master branch of pandera.\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\n```python\r\nfrom typing import Annotated\r\nimport pandas as pd\r\nimport pandera as pa\r\nfrom pandera.typing import Series\r\n\r\nclass MySchema(pa.SchemaModel):\r\n # both lines taken from the pandera documentation\r\n a: Series[Annotated[pd.DatetimeTZDtype, \"ns\", \"UTC\"]]\r\n b: Series[pd.DatetimeTZDtype] = pa.Field(dtype_kwargs={\"unit\": \"ns\", \"tz\": \"UTC\"})\r\n```\r\n\r\n#### Expected behavior\r\n\r\nmypy should show no errors\r\n\r\n#### Actual behaviour\r\n\r\nmypy produces the following errors\r\n\r\n```\r\ntest.py:8: error: Value of type variable \"GenericDtype\" of \"Series\" cannot be \"DatetimeTZDtype\"\r\ntest.py:9: error: Value of type variable \"GenericDtype\" of \"Series\" cannot be \"DatetimeTZDtype\"\r\n```\r\n\r\n#### Versions\r\n\r\n```\r\npandas==1.5.0\r\npandas-stubs==1.4.3.220807\r\npandera==0.13.3\r\nmypy==0.982\r\nmypy-extensions==0.4.3\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Common typing functionality.\"\"\"\n# pylint:disable=abstract-method,disable=too-many-ancestors\n\nimport inspect\nfrom typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar\n\nimport pandas as pd\nimport typing_inspect\n\nfrom .. import dtypes\nfrom ..engines import numpy_engine, pandas_engine\n\nBool = dtypes.Bool #: ``\"bool\"`` numpy dtype\nDate = dtypes.Date #: ``datetime.date`` object dtype\nDateTime = dtypes.DateTime #: ``\"datetime64[ns]\"`` numpy dtype\nDecimal = dtypes.Decimal #: ``decimal.Decimal`` object dtype\nTimedelta = dtypes.Timedelta #: ``\"timedelta64[ns]\"`` numpy dtype\nCategory = dtypes.Category #: pandas ``\"categorical\"`` datatype\nFloat = dtypes.Float #: ``\"float\"`` numpy dtype\nFloat16 = dtypes.Float16 #: ``\"float16\"`` numpy dtype\nFloat32 = dtypes.Float32 #: ``\"float32\"`` numpy dtype\nFloat64 = dtypes.Float64 #: ``\"float64\"`` numpy dtype\nInt = dtypes.Int #: ``\"int\"`` numpy dtype\nInt8 = dtypes.Int8 #: ``\"int8\"`` numpy dtype\nInt16 = dtypes.Int16 #: ``\"int16\"`` numpy dtype\nInt32 = dtypes.Int32 #: ``\"int32\"`` numpy dtype\nInt64 = dtypes.Int64 #: ``\"int64\"`` numpy dtype\nUInt8 = dtypes.UInt8 #: ``\"uint8\"`` numpy dtype\nUInt16 = dtypes.UInt16 #: ``\"uint16\"`` numpy dtype\nUInt32 = dtypes.UInt32 #: ``\"uint32\"`` numpy dtype\nUInt64 = dtypes.UInt64 #: ``\"uint64\"`` numpy dtype\nINT8 = pandas_engine.INT8 #: ``\"Int8\"`` pandas dtype:: pandas 0.24.0+\nINT16 = pandas_engine.INT16 #: ``\"Int16\"`` pandas dtype: pandas 0.24.0+\nINT32 = pandas_engine.INT32 #: ``\"Int32\"`` pandas dtype: pandas 0.24.0+\nINT64 = pandas_engine.INT64 #: ``\"Int64\"`` pandas dtype: pandas 0.24.0+\nUINT8 = pandas_engine.UINT8 #: ``\"UInt8\"`` pandas dtype:: pandas 0.24.0+\nUINT16 = pandas_engine.UINT16 #: ``\"UInt16\"`` pandas dtype: pandas 0.24.0+\nUINT32 = pandas_engine.UINT32 #: ``\"UInt32\"`` pandas dtype: pandas 0.24.0+\nUINT64 = pandas_engine.UINT64 #: ``\"UInt64\"`` pandas dtype: pandas 0.24.0+\nObject = numpy_engine.Object #: ``\"object\"`` numpy dtype\nString = dtypes.String #: ``\"str\"`` numpy dtype\n#: ``\"string\"`` pandas dtypes: pandas 1.0.0+. For <1.0.0, this enum will\n#: fall back on the str-as-object-array representation.\nSTRING = pandas_engine.STRING #: ``\"str\"`` numpy dtype\nBOOL = pandas_engine.BOOL #: ``\"str\"`` numpy dtype\n\ntry:\n Geometry = pandas_engine.Geometry # : ``\"geometry\"`` geopandas dtype\n GEOPANDAS_INSTALLED = True\nexcept AttributeError:\n GEOPANDAS_INSTALLED = False\n\nif GEOPANDAS_INSTALLED:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n Date,\n DateTime,\n Decimal,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n Geometry,\n covariant=True,\n )\nelse:\n GenericDtype = TypeVar( # type: ignore\n \"GenericDtype\",\n bool,\n int,\n str,\n float,\n pd.core.dtypes.base.ExtensionDtype,\n Bool,\n Date,\n DateTime,\n Decimal,\n Timedelta,\n Category,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n INT8,\n INT16,\n INT32,\n INT64,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n Object,\n String,\n STRING,\n covariant=True,\n )\n\nSchema = TypeVar(\"Schema\", bound=\"SchemaModel\") # type: ignore\n\n\n# pylint:disable=invalid-name\nif TYPE_CHECKING:\n T = TypeVar(\"T\") # pragma: no cover\nelse:\n T = Schema\n\n\nclass DataFrameBase(Generic[T]):\n # pylint: disable=too-few-public-methods\n \"\"\"\n Pandera Dataframe base class for validating dataframes on\n initialization.\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __setattr__(self, name: str, value: Any) -> None:\n # pylint: disable=no-member\n object.__setattr__(self, name, value)\n if name == \"__orig_class__\":\n orig_class = getattr(self, \"__orig_class__\")\n class_args = getattr(orig_class, \"__args__\", None)\n if class_args is not None and any(\n x.__name__ == \"SchemaModel\"\n for x in inspect.getmro(class_args[0])\n ):\n schema_model = value.__args__[0]\n\n # prevent the double validation problem by preventing checks for\n # dataframes with a defined pandera.schema\n pandera_accessor = getattr(self, \"pandera\")\n if (\n pandera_accessor.schema is None\n or pandera_accessor.schema != schema_model.to_schema()\n ):\n pandera_accessor.add_schema(schema_model.to_schema())\n self.__dict__ = schema_model.validate(self).__dict__\n\n\n# pylint:disable=too-few-public-methods\nclass SeriesBase(Generic[GenericDtype]):\n \"\"\"Pandera Series base class to use for all pandas-like APIs.\"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Series should resolve to Field-s\")\n\n\n# pylint:disable=too-few-public-methods\nclass IndexBase(Generic[GenericDtype]):\n \"\"\"Representation of pandas.Index, only used for type annotation.\n\n *new in 0.5.0*\n \"\"\"\n\n default_dtype: Optional[Type] = None\n\n def __get__(\n self, instance: object, owner: Type\n ) -> str: # pragma: no cover\n raise AttributeError(\"Indexes should resolve to pa.Index-s\")\n\n\nclass AnnotationInfo: # pylint:disable=too-few-public-methods\n \"\"\"Captures extra information about an annotation.\n\n Attributes:\n origin: The non-parameterized generic class.\n arg: The first generic type (SchemaModel does not support more than\n 1 argument).\n literal: Whether the annotation is a literal.\n optional: Whether the annotation is optional.\n raw_annotation: The raw annotation.\n metadata: Extra arguments passed to :data:`typing.Annotated`.\n \"\"\"\n\n def __init__(self, raw_annotation: Type) -> None:\n self._parse_annotation(raw_annotation)\n\n @property\n def is_generic_df(self) -> bool:\n \"\"\"True if the annotation is a DataFrameBase subclass.\"\"\"\n try:\n if self.origin is None:\n return False\n return issubclass(self.origin, DataFrameBase)\n except TypeError:\n return False\n\n def _parse_annotation(self, raw_annotation: Type) -> None:\n \"\"\"Parse key information from annotation.\n\n :param annotation: A subscripted type.\n :returns: Annotation\n \"\"\"\n self.raw_annotation = raw_annotation\n self.origin = self.arg = None\n\n self.optional = typing_inspect.is_optional_type(raw_annotation)\n if self.optional and typing_inspect.is_union_type(raw_annotation):\n # Annotated with Optional or Union[..., NoneType]\n # get_args -> (pandera.typing.Index[str], <class 'NoneType'>)\n raw_annotation = typing_inspect.get_args(raw_annotation)[0]\n\n self.origin = typing_inspect.get_origin(raw_annotation)\n # Replace empty tuple returned from get_args by None\n args = typing_inspect.get_args(raw_annotation) or None\n self.arg = args[0] if args else args\n\n self.metadata = getattr(self.arg, \"__metadata__\", None)\n if self.metadata:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.literal = typing_inspect.is_literal_type(self.arg)\n if self.literal:\n self.arg = typing_inspect.get_args(self.arg)[0]\n\n self.default_dtype = getattr(raw_annotation, \"default_dtype\", None)\n", "path": "pandera/typing/common.py"}]} | 3,729 | 971 |
gh_patches_debug_18681 | rasdani/github-patches | git_diff | pytorch__vision-5583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extra dot in 0.12 docs
Looks like something similar to the previous release happened https://github.com/pytorch/vision/issues/4754
The newly pushed docs are https://pytorch.org/vision/0.12./ instead of https://pytorch.org/vision/0.12 (extra dot at the end), and the version tag shows a lot of extra info:

@mattip is this something you could help us with, like you did in https://github.com/pytorch/vision/pull/4755 ?
</issue>
<code>
[start of docs/source/conf.py]
1 #!/usr/bin/env python3
2 #
3 # PyTorch documentation build configuration file, created by
4 # sphinx-quickstart on Fri Dec 23 13:31:47 2016.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 # import os
20 # import sys
21 # sys.path.insert(0, os.path.abspath('.'))
22
23 import pytorch_sphinx_theme
24 import torchvision
25
26
27 # -- General configuration ------------------------------------------------
28
29 # Required version of sphinx is set from docs/requirements.txt
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = [
35 "sphinx.ext.autodoc",
36 "sphinx.ext.autosummary",
37 "sphinx.ext.doctest",
38 "sphinx.ext.intersphinx",
39 "sphinx.ext.todo",
40 "sphinx.ext.mathjax",
41 "sphinx.ext.napoleon",
42 "sphinx.ext.viewcode",
43 "sphinx.ext.duration",
44 "sphinx_gallery.gen_gallery",
45 "sphinx_copybutton",
46 ]
47
48 sphinx_gallery_conf = {
49 "examples_dirs": "../../gallery/", # path to your example scripts
50 "gallery_dirs": "auto_examples", # path to where to save gallery generated output
51 "backreferences_dir": "gen_modules/backreferences",
52 "doc_module": ("torchvision",),
53 }
54
55 napoleon_use_ivar = True
56 napoleon_numpy_docstring = False
57 napoleon_google_docstring = True
58
59
60 # Add any paths that contain templates here, relative to this directory.
61 templates_path = ["_templates"]
62
63 # The suffix(es) of source filenames.
64 # You can specify multiple suffix as a list of string:
65 #
66 source_suffix = {
67 ".rst": "restructuredtext",
68 }
69
70 # The master toctree document.
71 master_doc = "index"
72
73 # General information about the project.
74 project = "Torchvision"
75 copyright = "2017-present, Torch Contributors"
76 author = "Torch Contributors"
77
78 # The version info for the project you're documenting, acts as replacement for
79 # |version| and |release|, also used in various other places throughout the
80 # built documents.
81 #
82 # The short X.Y version.
83 # TODO: change to [:2] at v1.0
84 version = "main (" + torchvision.__version__ + " )"
85 # The full version, including alpha/beta/rc tags.
86 # TODO: verify this works as expected
87 release = "main"
88
89 # The language for content autogenerated by Sphinx. Refer to documentation
90 # for a list of supported languages.
91 #
92 # This is also used if you do content translation via gettext catalogs.
93 # Usually you set "language" from the command line for these cases.
94 language = None
95
96 # List of patterns, relative to source directory, that match files and
97 # directories to ignore when looking for source files.
98 # This patterns also effect to html_static_path and html_extra_path
99 exclude_patterns = []
100
101 # The name of the Pygments (syntax highlighting) style to use.
102 pygments_style = "sphinx"
103
104 # If true, `todo` and `todoList` produce output, else they produce nothing.
105 todo_include_todos = True
106
107
108 # -- Options for HTML output ----------------------------------------------
109
110 # The theme to use for HTML and HTML Help pages. See the documentation for
111 # a list of builtin themes.
112 #
113 html_theme = "pytorch_sphinx_theme"
114 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
115
116 # Theme options are theme-specific and customize the look and feel of a theme
117 # further. For a list of options available for each theme, see the
118 # documentation.
119 #
120 html_theme_options = {
121 "collapse_navigation": False,
122 "display_version": True,
123 "logo_only": True,
124 "pytorch_project": "docs",
125 "navigation_with_keys": True,
126 "analytics_id": "UA-117752657-2",
127 }
128
129 html_logo = "_static/img/pytorch-logo-dark.svg"
130
131 # Add any paths that contain custom static files (such as style sheets) here,
132 # relative to this directory. They are copied after the builtin static files,
133 # so a file named "default.css" will overwrite the builtin "default.css".
134 html_static_path = ["_static"]
135
136 # TODO: remove this once https://github.com/pytorch/pytorch_sphinx_theme/issues/125 is fixed
137 html_css_files = [
138 "css/custom_torchvision.css",
139 ]
140
141 # -- Options for HTMLHelp output ------------------------------------------
142
143 # Output file base name for HTML help builder.
144 htmlhelp_basename = "PyTorchdoc"
145
146
147 autosummary_generate = True
148
149
150 # -- Options for LaTeX output ---------------------------------------------
151 latex_elements = {
152 # The paper size ('letterpaper' or 'a4paper').
153 #
154 # 'papersize': 'letterpaper',
155 # The font size ('10pt', '11pt' or '12pt').
156 #
157 # 'pointsize': '10pt',
158 # Additional stuff for the LaTeX preamble.
159 #
160 # 'preamble': '',
161 # Latex figure (float) alignment
162 #
163 # 'figure_align': 'htbp',
164 }
165
166
167 # Grouping the document tree into LaTeX files. List of tuples
168 # (source start file, target name, title,
169 # author, documentclass [howto, manual, or own class]).
170 latex_documents = [
171 (master_doc, "pytorch.tex", "torchvision Documentation", "Torch Contributors", "manual"),
172 ]
173
174
175 # -- Options for manual page output ---------------------------------------
176
177 # One entry per manual page. List of tuples
178 # (source start file, name, description, authors, manual section).
179 man_pages = [(master_doc, "torchvision", "torchvision Documentation", [author], 1)]
180
181
182 # -- Options for Texinfo output -------------------------------------------
183
184 # Grouping the document tree into Texinfo files. List of tuples
185 # (source start file, target name, title, author,
186 # dir menu entry, description, category)
187 texinfo_documents = [
188 (
189 master_doc,
190 "torchvision",
191 "torchvision Documentation",
192 author,
193 "torchvision",
194 "One line description of project.",
195 "Miscellaneous",
196 ),
197 ]
198
199
200 # Example configuration for intersphinx: refer to the Python standard library.
201 intersphinx_mapping = {
202 "python": ("https://docs.python.org/3/", None),
203 "torch": ("https://pytorch.org/docs/stable/", None),
204 "numpy": ("https://numpy.org/doc/stable/", None),
205 "PIL": ("https://pillow.readthedocs.io/en/stable/", None),
206 "matplotlib": ("https://matplotlib.org/stable/", None),
207 }
208
209 # -- A patch that prevents Sphinx from cross-referencing ivar tags -------
210 # See http://stackoverflow.com/a/41184353/3343043
211
212 from docutils import nodes
213 from sphinx import addnodes
214 from sphinx.util.docfields import TypedField
215
216
217 def patched_make_field(self, types, domain, items, **kw):
218 # `kw` catches `env=None` needed for newer sphinx while maintaining
219 # backwards compatibility when passed along further down!
220
221 # type: (list, unicode, tuple) -> nodes.field # noqa: F821
222 def handle_item(fieldarg, content):
223 par = nodes.paragraph()
224 par += addnodes.literal_strong("", fieldarg) # Patch: this line added
225 # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
226 # addnodes.literal_strong))
227 if fieldarg in types:
228 par += nodes.Text(" (")
229 # NOTE: using .pop() here to prevent a single type node to be
230 # inserted twice into the doctree, which leads to
231 # inconsistencies later when references are resolved
232 fieldtype = types.pop(fieldarg)
233 if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
234 typename = "".join(n.astext() for n in fieldtype)
235 typename = typename.replace("int", "python:int")
236 typename = typename.replace("long", "python:long")
237 typename = typename.replace("float", "python:float")
238 typename = typename.replace("type", "python:type")
239 par.extend(self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis, **kw))
240 else:
241 par += fieldtype
242 par += nodes.Text(")")
243 par += nodes.Text(" -- ")
244 par += content
245 return par
246
247 fieldname = nodes.field_name("", self.label)
248 if len(items) == 1 and self.can_collapse:
249 fieldarg, content = items[0]
250 bodynode = handle_item(fieldarg, content)
251 else:
252 bodynode = self.list_type()
253 for fieldarg, content in items:
254 bodynode += nodes.list_item("", handle_item(fieldarg, content))
255 fieldbody = nodes.field_body("", bodynode)
256 return nodes.field("", fieldname, fieldbody)
257
258
259 TypedField.make_field = patched_make_field
260
261
262 def inject_minigalleries(app, what, name, obj, options, lines):
263 """Inject a minigallery into a docstring.
264
265 This avoids having to manually write the .. minigallery directive for every item we want a minigallery for,
266 as it would be easy to miss some.
267
268 This callback is called after the .. auto directives (like ..autoclass) have been processed,
269 and modifies the lines parameter inplace to add the .. minigallery that will show which examples
270 are using which object.
271
272 It's a bit hacky, but not *that* hacky when you consider that the recommended way is to do pretty much the same,
273 but instead with templates using autosummary (which we don't want to use):
274 (https://sphinx-gallery.github.io/stable/configuration.html#auto-documenting-your-api-with-links-to-examples)
275
276 For docs on autodoc-process-docstring, see the autodoc docs:
277 https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
278 """
279
280 if what in ("class", "function"):
281 lines.append(f".. minigallery:: {name}")
282 lines.append(f" :add-heading: Examples using ``{name.split('.')[-1]}``:")
283 # avoid heading entirely to avoid warning. As a bonud it actually renders better
284 lines.append(" :heading-level: 9")
285 lines.append("\n")
286
287
288 def setup(app):
289 app.connect("autodoc-process-docstring", inject_minigalleries)
290
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -20,6 +20,8 @@
# import sys
# sys.path.insert(0, os.path.abspath('.'))
+import os
+
import pytorch_sphinx_theme
import torchvision
@@ -80,11 +82,16 @@
# built documents.
#
# The short X.Y version.
-# TODO: change to [:2] at v1.0
version = "main (" + torchvision.__version__ + " )"
# The full version, including alpha/beta/rc tags.
-# TODO: verify this works as expected
release = "main"
+VERSION = os.environ.get("VERSION", None)
+if VERSION:
+ # Turn 1.11.0aHASH into 1.11 (major.minor only)
+ version = ".".join(version.split(".")[:2])
+ html_title = " ".join((project, version, "documentation"))
+ release = version
+
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -20,6 +20,8 @@\n # import sys\n # sys.path.insert(0, os.path.abspath('.'))\n \n+import os\n+\n import pytorch_sphinx_theme\n import torchvision\n \n@@ -80,11 +82,16 @@\n # built documents.\n #\n # The short X.Y version.\n-# TODO: change to [:2] at v1.0\n version = \"main (\" + torchvision.__version__ + \" )\"\n # The full version, including alpha/beta/rc tags.\n-# TODO: verify this works as expected\n release = \"main\"\n+VERSION = os.environ.get(\"VERSION\", None)\n+if VERSION:\n+ # Turn 1.11.0aHASH into 1.11 (major.minor only)\n+ version = \".\".join(version.split(\".\")[:2])\n+ html_title = \" \".join((project, version, \"documentation\"))\n+ release = version\n+\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n", "issue": "Extra dot in 0.12 docs\nLooks like something similar to the previous release happened https://github.com/pytorch/vision/issues/4754\r\n\r\nThe newly pushed docs are https://pytorch.org/vision/0.12./ instead of https://pytorch.org/vision/0.12 (extra dot at the end), and the version tag shows a lot of extra info:\r\n\r\n\r\n\r\n\r\n@mattip is this something you could help us with, like you did in https://github.com/pytorch/vision/pull/4755 ?\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# PyTorch documentation build configuration file, created by\n# sphinx-quickstart on Fri Dec 23 13:31:47 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport pytorch_sphinx_theme\nimport torchvision\n\n\n# -- General configuration ------------------------------------------------\n\n# Required version of sphinx is set from docs/requirements.txt\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.duration\",\n \"sphinx_gallery.gen_gallery\",\n \"sphinx_copybutton\",\n]\n\nsphinx_gallery_conf = {\n \"examples_dirs\": \"../../gallery/\", # path to your example scripts\n \"gallery_dirs\": \"auto_examples\", # path to where to save gallery generated output\n \"backreferences_dir\": \"gen_modules/backreferences\",\n \"doc_module\": (\"torchvision\",),\n}\n\nnapoleon_use_ivar = True\nnapoleon_numpy_docstring = False\nnapoleon_google_docstring = True\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n}\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Torchvision\"\ncopyright = \"2017-present, Torch Contributors\"\nauthor = \"Torch Contributors\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# TODO: change to [:2] at v1.0\nversion = \"main (\" + torchvision.__version__ + \" )\"\n# The full version, including alpha/beta/rc tags.\n# TODO: verify this works as expected\nrelease = \"main\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-117752657-2\",\n}\n\nhtml_logo = \"_static/img/pytorch-logo-dark.svg\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# TODO: remove this once https://github.com/pytorch/pytorch_sphinx_theme/issues/125 is fixed\nhtml_css_files = [\n \"css/custom_torchvision.css\",\n]\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"PyTorchdoc\"\n\n\nautosummary_generate = True\n\n\n# -- Options for LaTeX output ---------------------------------------------\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"pytorch.tex\", \"torchvision Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"torchvision\", \"torchvision Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"torchvision\",\n \"torchvision Documentation\",\n author,\n \"torchvision\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n}\n\n# -- A patch that prevents Sphinx from cross-referencing ivar tags -------\n# See http://stackoverflow.com/a/41184353/3343043\n\nfrom docutils import nodes\nfrom sphinx import addnodes\nfrom sphinx.util.docfields import TypedField\n\n\ndef patched_make_field(self, types, domain, items, **kw):\n # `kw` catches `env=None` needed for newer sphinx while maintaining\n # backwards compatibility when passed along further down!\n\n # type: (list, unicode, tuple) -> nodes.field # noqa: F821\n def handle_item(fieldarg, content):\n par = nodes.paragraph()\n par += addnodes.literal_strong(\"\", fieldarg) # Patch: this line added\n # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,\n # addnodes.literal_strong))\n if fieldarg in types:\n par += nodes.Text(\" (\")\n # NOTE: using .pop() here to prevent a single type node to be\n # inserted twice into the doctree, which leads to\n # inconsistencies later when references are resolved\n fieldtype = types.pop(fieldarg)\n if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):\n typename = \"\".join(n.astext() for n in fieldtype)\n typename = typename.replace(\"int\", \"python:int\")\n typename = typename.replace(\"long\", \"python:long\")\n typename = typename.replace(\"float\", \"python:float\")\n typename = typename.replace(\"type\", \"python:type\")\n par.extend(self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis, **kw))\n else:\n par += fieldtype\n par += nodes.Text(\")\")\n par += nodes.Text(\" -- \")\n par += content\n return par\n\n fieldname = nodes.field_name(\"\", self.label)\n if len(items) == 1 and self.can_collapse:\n fieldarg, content = items[0]\n bodynode = handle_item(fieldarg, content)\n else:\n bodynode = self.list_type()\n for fieldarg, content in items:\n bodynode += nodes.list_item(\"\", handle_item(fieldarg, content))\n fieldbody = nodes.field_body(\"\", bodynode)\n return nodes.field(\"\", fieldname, fieldbody)\n\n\nTypedField.make_field = patched_make_field\n\n\ndef inject_minigalleries(app, what, name, obj, options, lines):\n \"\"\"Inject a minigallery into a docstring.\n\n This avoids having to manually write the .. minigallery directive for every item we want a minigallery for,\n as it would be easy to miss some.\n\n This callback is called after the .. auto directives (like ..autoclass) have been processed,\n and modifies the lines parameter inplace to add the .. minigallery that will show which examples\n are using which object.\n\n It's a bit hacky, but not *that* hacky when you consider that the recommended way is to do pretty much the same,\n but instead with templates using autosummary (which we don't want to use):\n (https://sphinx-gallery.github.io/stable/configuration.html#auto-documenting-your-api-with-links-to-examples)\n\n For docs on autodoc-process-docstring, see the autodoc docs:\n https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html\n \"\"\"\n\n if what in (\"class\", \"function\"):\n lines.append(f\".. minigallery:: {name}\")\n lines.append(f\" :add-heading: Examples using ``{name.split('.')[-1]}``:\")\n # avoid heading entirely to avoid warning. As a bonud it actually renders better\n lines.append(\" :heading-level: 9\")\n lines.append(\"\\n\")\n\n\ndef setup(app):\n app.connect(\"autodoc-process-docstring\", inject_minigalleries)\n", "path": "docs/source/conf.py"}]} | 3,889 | 252 |
gh_patches_debug_35582 | rasdani/github-patches | git_diff | pyca__cryptography-2840 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise an error on openssl 0.9.8
With an env var (probably `CRYPTOGRAPHY_ALLOW_OPENSSL_098`) to allow disabling it for this one release.
</issue>
<code>
[start of src/cryptography/hazmat/bindings/openssl/binding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import collections
8 import os
9 import threading
10 import types
11 import warnings
12
13 from cryptography import utils
14 from cryptography.exceptions import InternalError
15 from cryptography.hazmat.bindings._openssl import ffi, lib
16 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
17
18 _OpenSSLError = collections.namedtuple("_OpenSSLError",
19 ["code", "lib", "func", "reason"])
20 _OpenSSLErrorWithText = collections.namedtuple(
21 "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"]
22 )
23
24
25 def _consume_errors(lib):
26 errors = []
27 while True:
28 code = lib.ERR_get_error()
29 if code == 0:
30 break
31
32 err_lib = lib.ERR_GET_LIB(code)
33 err_func = lib.ERR_GET_FUNC(code)
34 err_reason = lib.ERR_GET_REASON(code)
35
36 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))
37
38 return errors
39
40
41 def _openssl_assert(lib, ok):
42 if not ok:
43 errors = _consume_errors(lib)
44 errors_with_text = []
45 for err in errors:
46 err_text_reason = ffi.string(
47 lib.ERR_error_string(err.code, ffi.NULL)
48 )
49 errors_with_text.append(
50 _OpenSSLErrorWithText(
51 err.code, err.lib, err.func, err.reason, err_text_reason
52 )
53 )
54
55 raise InternalError(
56 "Unknown OpenSSL error. This error is commonly encountered when "
57 "another library is not cleaning up the OpenSSL error stack. If "
58 "you are using cryptography with another library that uses "
59 "OpenSSL try disabling it before reporting a bug. Otherwise "
60 "please file an issue at https://github.com/pyca/cryptography/"
61 "issues with information on how to reproduce "
62 "this. ({0!r})".format(errors_with_text),
63 errors_with_text
64 )
65
66
67 def ffi_callback(signature, name, **kwargs):
68 """Callback dispatcher
69
70 The ffi_callback() dispatcher keeps callbacks compatible between dynamic
71 and static callbacks.
72 """
73 def wrapper(func):
74 if lib.Cryptography_STATIC_CALLBACKS:
75 # def_extern() returns a decorator that sets the internal
76 # function pointer and returns the original function unmodified.
77 ffi.def_extern(name=name, **kwargs)(func)
78 callback = getattr(lib, name)
79 else:
80 # callback() wraps the function in a cdata function.
81 callback = ffi.callback(signature, **kwargs)(func)
82 return callback
83 return wrapper
84
85
86 @ffi_callback("int (*)(unsigned char *, int)",
87 name="Cryptography_rand_bytes",
88 error=-1)
89 def _osrandom_rand_bytes(buf, size):
90 signed = ffi.cast("char *", buf)
91 result = os.urandom(size)
92 signed[0:size] = result
93 return 1
94
95
96 @ffi_callback("int (*)(void)", name="Cryptography_rand_status")
97 def _osrandom_rand_status():
98 return 1
99
100
101 def build_conditional_library(lib, conditional_names):
102 conditional_lib = types.ModuleType("lib")
103 excluded_names = set()
104 for condition, names in conditional_names.items():
105 if not getattr(lib, condition):
106 excluded_names |= set(names)
107
108 for attr in dir(lib):
109 if attr not in excluded_names:
110 setattr(conditional_lib, attr, getattr(lib, attr))
111
112 return conditional_lib
113
114
115 class Binding(object):
116 """
117 OpenSSL API wrapper.
118 """
119 lib = None
120 ffi = ffi
121 _lib_loaded = False
122 _locks = None
123 _lock_cb_handle = None
124 _init_lock = threading.Lock()
125 _lock_init_lock = threading.Lock()
126
127 _osrandom_engine_id = ffi.new("const char[]", b"osrandom")
128 _osrandom_engine_name = ffi.new("const char[]", b"osrandom_engine")
129 _osrandom_method = ffi.new(
130 "RAND_METHOD *",
131 dict(bytes=_osrandom_rand_bytes,
132 pseudorand=_osrandom_rand_bytes,
133 status=_osrandom_rand_status)
134 )
135
136 def __init__(self):
137 self._ensure_ffi_initialized()
138
139 @classmethod
140 def _register_osrandom_engine(cls):
141 _openssl_assert(cls.lib, cls.lib.ERR_peek_error() == 0)
142
143 engine = cls.lib.ENGINE_new()
144 _openssl_assert(cls.lib, engine != cls.ffi.NULL)
145 try:
146 result = cls.lib.ENGINE_set_id(engine, cls._osrandom_engine_id)
147 _openssl_assert(cls.lib, result == 1)
148 result = cls.lib.ENGINE_set_name(engine, cls._osrandom_engine_name)
149 _openssl_assert(cls.lib, result == 1)
150 result = cls.lib.ENGINE_set_RAND(engine, cls._osrandom_method)
151 _openssl_assert(cls.lib, result == 1)
152 result = cls.lib.ENGINE_add(engine)
153 if result != 1:
154 errors = _consume_errors(cls.lib)
155 _openssl_assert(
156 cls.lib,
157 errors[0].reason == cls.lib.ENGINE_R_CONFLICTING_ENGINE_ID
158 )
159
160 finally:
161 result = cls.lib.ENGINE_free(engine)
162 _openssl_assert(cls.lib, result == 1)
163
164 @classmethod
165 def _ensure_ffi_initialized(cls):
166 with cls._init_lock:
167 if not cls._lib_loaded:
168 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
169 cls._lib_loaded = True
170 # initialize the SSL library
171 cls.lib.SSL_library_init()
172 # adds all ciphers/digests for EVP
173 cls.lib.OpenSSL_add_all_algorithms()
174 # loads error strings for libcrypto and libssl functions
175 cls.lib.SSL_load_error_strings()
176 cls._register_osrandom_engine()
177
178 @classmethod
179 def init_static_locks(cls):
180 with cls._lock_init_lock:
181 cls._ensure_ffi_initialized()
182
183 if not cls._lock_cb_handle:
184 wrapper = ffi_callback(
185 "void(int, int, const char *, int)",
186 name="Cryptography_locking_cb",
187 )
188 cls._lock_cb_handle = wrapper(cls._lock_cb)
189
190 # Use Python's implementation if available, importing _ssl triggers
191 # the setup for this.
192 __import__("_ssl")
193
194 if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
195 return
196
197 # If nothing else has setup a locking callback already, we set up
198 # our own
199 num_locks = cls.lib.CRYPTO_num_locks()
200 cls._locks = [threading.Lock() for n in range(num_locks)]
201
202 cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
203
204 @classmethod
205 def _lock_cb(cls, mode, n, file, line):
206 lock = cls._locks[n]
207
208 if mode & cls.lib.CRYPTO_LOCK:
209 lock.acquire()
210 elif mode & cls.lib.CRYPTO_UNLOCK:
211 lock.release()
212 else:
213 raise RuntimeError(
214 "Unknown lock mode {0}: lock={1}, file={2}, line={3}.".format(
215 mode, n, file, line
216 )
217 )
218
219
220 # OpenSSL is not thread safe until the locks are initialized. We call this
221 # method in module scope so that it executes with the import lock. On
222 # Pythons < 3.4 this import lock is a global lock, which can prevent a race
223 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
224 # is per module so this approach will not work.
225 Binding.init_static_locks()
226
227 if Binding.lib.SSLeay() < 0x10000000:
228 warnings.warn(
229 "OpenSSL version 0.9.8 is no longer supported by the OpenSSL project, "
230 "please upgrade. The next version of cryptography will drop support "
231 "for it.",
232 utils.DeprecatedIn12
233 )
234 elif Binding.lib.SSLeay() < 0x10001000:
235 warnings.warn(
236 "OpenSSL versions less than 1.0.1 are no longer supported by the "
237 "OpenSSL project, please upgrade. A future version of cryptography "
238 "will drop support for these versions.",
239 DeprecationWarning
240 )
241
[end of src/cryptography/hazmat/bindings/openssl/binding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -10,7 +10,6 @@
import types
import warnings
-from cryptography import utils
from cryptography.exceptions import InternalError
from cryptography.hazmat.bindings._openssl import ffi, lib
from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
@@ -217,6 +216,30 @@
)
+def _verify_openssl_version(version):
+ if version < 0x10000000:
+ if os.environ.get("CRYPTOGRAPHY_ALLOW_OPENSSL_098"):
+ warnings.warn(
+ "OpenSSL version 0.9.8 is no longer supported by the OpenSSL "
+ "project, please upgrade. The next version of cryptography "
+ "will completely remove support for it.",
+ DeprecationWarning
+ )
+ else:
+ raise RuntimeError(
+ "You are linking against OpenSSL 0.9.8, which is no longer "
+ "support by the OpenSSL project. You need to upgrade to a "
+ "newer version of OpenSSL."
+ )
+ elif version < 0x10001000:
+ warnings.warn(
+ "OpenSSL versions less than 1.0.1 are no longer supported by the "
+ "OpenSSL project, please upgrade. A future version of "
+ "cryptography will drop support for these versions of OpenSSL.",
+ DeprecationWarning
+ )
+
+
# OpenSSL is not thread safe until the locks are initialized. We call this
# method in module scope so that it executes with the import lock. On
# Pythons < 3.4 this import lock is a global lock, which can prevent a race
@@ -224,17 +247,4 @@
# is per module so this approach will not work.
Binding.init_static_locks()
-if Binding.lib.SSLeay() < 0x10000000:
- warnings.warn(
- "OpenSSL version 0.9.8 is no longer supported by the OpenSSL project, "
- "please upgrade. The next version of cryptography will drop support "
- "for it.",
- utils.DeprecatedIn12
- )
-elif Binding.lib.SSLeay() < 0x10001000:
- warnings.warn(
- "OpenSSL versions less than 1.0.1 are no longer supported by the "
- "OpenSSL project, please upgrade. A future version of cryptography "
- "will drop support for these versions.",
- DeprecationWarning
- )
+_verify_openssl_version(Binding.lib.SSLeay())
| {"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -10,7 +10,6 @@\n import types\n import warnings\n \n-from cryptography import utils\n from cryptography.exceptions import InternalError\n from cryptography.hazmat.bindings._openssl import ffi, lib\n from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n@@ -217,6 +216,30 @@\n )\n \n \n+def _verify_openssl_version(version):\n+ if version < 0x10000000:\n+ if os.environ.get(\"CRYPTOGRAPHY_ALLOW_OPENSSL_098\"):\n+ warnings.warn(\n+ \"OpenSSL version 0.9.8 is no longer supported by the OpenSSL \"\n+ \"project, please upgrade. The next version of cryptography \"\n+ \"will completely remove support for it.\",\n+ DeprecationWarning\n+ )\n+ else:\n+ raise RuntimeError(\n+ \"You are linking against OpenSSL 0.9.8, which is no longer \"\n+ \"support by the OpenSSL project. You need to upgrade to a \"\n+ \"newer version of OpenSSL.\"\n+ )\n+ elif version < 0x10001000:\n+ warnings.warn(\n+ \"OpenSSL versions less than 1.0.1 are no longer supported by the \"\n+ \"OpenSSL project, please upgrade. A future version of \"\n+ \"cryptography will drop support for these versions of OpenSSL.\",\n+ DeprecationWarning\n+ )\n+\n+\n # OpenSSL is not thread safe until the locks are initialized. We call this\n # method in module scope so that it executes with the import lock. On\n # Pythons < 3.4 this import lock is a global lock, which can prevent a race\n@@ -224,17 +247,4 @@\n # is per module so this approach will not work.\n Binding.init_static_locks()\n \n-if Binding.lib.SSLeay() < 0x10000000:\n- warnings.warn(\n- \"OpenSSL version 0.9.8 is no longer supported by the OpenSSL project, \"\n- \"please upgrade. The next version of cryptography will drop support \"\n- \"for it.\",\n- utils.DeprecatedIn12\n- )\n-elif Binding.lib.SSLeay() < 0x10001000:\n- warnings.warn(\n- \"OpenSSL versions less than 1.0.1 are no longer supported by the \"\n- \"OpenSSL project, please upgrade. A future version of cryptography \"\n- \"will drop support for these versions.\",\n- DeprecationWarning\n- )\n+_verify_openssl_version(Binding.lib.SSLeay())\n", "issue": "Raise an error on openssl 0.9.8\nWith an env var (probably `CRYPTOGRAPHY_ALLOW_OPENSSL_098`) to allow disabling it for this one release.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport os\nimport threading\nimport types\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLError = collections.namedtuple(\"_OpenSSLError\",\n [\"code\", \"lib\", \"func\", \"reason\"])\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n err_text_reason = ffi.string(\n lib.ERR_error_string(err.code, ffi.NULL)\n )\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef ffi_callback(signature, name, **kwargs):\n \"\"\"Callback dispatcher\n\n The ffi_callback() dispatcher keeps callbacks compatible between dynamic\n and static callbacks.\n \"\"\"\n def wrapper(func):\n if lib.Cryptography_STATIC_CALLBACKS:\n # def_extern() returns a decorator that sets the internal\n # function pointer and returns the original function unmodified.\n ffi.def_extern(name=name, **kwargs)(func)\n callback = getattr(lib, name)\n else:\n # callback() wraps the function in a cdata function.\n callback = ffi.callback(signature, **kwargs)(func)\n return callback\n return wrapper\n\n\n@ffi_callback(\"int (*)(unsigned char *, int)\",\n name=\"Cryptography_rand_bytes\",\n error=-1)\ndef _osrandom_rand_bytes(buf, size):\n signed = ffi.cast(\"char *\", buf)\n result = os.urandom(size)\n signed[0:size] = result\n return 1\n\n\n@ffi_callback(\"int (*)(void)\", name=\"Cryptography_rand_status\")\ndef _osrandom_rand_status():\n return 1\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n excluded_names = set()\n for condition, names in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names |= set(names)\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _locks = None\n _lock_cb_handle = None\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n _osrandom_engine_id = ffi.new(\"const char[]\", b\"osrandom\")\n _osrandom_engine_name = ffi.new(\"const char[]\", b\"osrandom_engine\")\n _osrandom_method = ffi.new(\n \"RAND_METHOD *\",\n dict(bytes=_osrandom_rand_bytes,\n pseudorand=_osrandom_rand_bytes,\n status=_osrandom_rand_status)\n )\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n _openssl_assert(cls.lib, cls.lib.ERR_peek_error() == 0)\n\n engine = cls.lib.ENGINE_new()\n _openssl_assert(cls.lib, engine != cls.ffi.NULL)\n try:\n result = cls.lib.ENGINE_set_id(engine, cls._osrandom_engine_id)\n _openssl_assert(cls.lib, result == 1)\n result = cls.lib.ENGINE_set_name(engine, cls._osrandom_engine_name)\n _openssl_assert(cls.lib, result == 1)\n result = cls.lib.ENGINE_set_RAND(engine, cls._osrandom_method)\n _openssl_assert(cls.lib, result == 1)\n result = cls.lib.ENGINE_add(engine)\n if result != 1:\n errors = _consume_errors(cls.lib)\n _openssl_assert(\n cls.lib,\n errors[0].reason == cls.lib.ENGINE_R_CONFLICTING_ENGINE_ID\n )\n\n finally:\n result = cls.lib.ENGINE_free(engine)\n _openssl_assert(cls.lib, result == 1)\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n wrapper = ffi_callback(\n \"void(int, int, const char *, int)\",\n name=\"Cryptography_locking_cb\",\n )\n cls._lock_cb_handle = wrapper(cls._lock_cb)\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\nif Binding.lib.SSLeay() < 0x10000000:\n warnings.warn(\n \"OpenSSL version 0.9.8 is no longer supported by the OpenSSL project, \"\n \"please upgrade. The next version of cryptography will drop support \"\n \"for it.\",\n utils.DeprecatedIn12\n )\nelif Binding.lib.SSLeay() < 0x10001000:\n warnings.warn(\n \"OpenSSL versions less than 1.0.1 are no longer supported by the \"\n \"OpenSSL project, please upgrade. A future version of cryptography \"\n \"will drop support for these versions.\",\n DeprecationWarning\n )\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]} | 3,045 | 654 |
gh_patches_debug_15752 | rasdani/github-patches | git_diff | boto__boto-2598 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Route Tables: update describe_route_tables to support additional route types (VPC peering connection, NIC)
Routes can be created to network interfaces and VPC peering connections, however these routes are not reflected properly in the DescribeRouteTables response.
Doc here:
- http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeRouteTables.html
...however, the doc does not provide actual examples of the AWS response for these routes.
As such, I've repro'ed both route types and captured the AWS response XML here:
```
import boto, boto.vpc
conn = boto.vpc.connect_to_region('us-west-1')
vpc1 = conn.create_vpc("10.0.0.0/16");
vpc2 = conn.create_vpc("11.0.0.0/16");
route_table1 = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc1.id})[0]
# PCX route
vpc_pcx = conn.create_vpc_peering_connection(vpc1.id, vpc2.id)
conn.create_route(route_table1.id, "11.0.0.0/16", vpc_peering_connection_id=vpc_pcx.id)
# NIC route
subnet = conn.create_subnet(vpc1.id, "10.0.0.0/18")
eni = conn.create_network_interface(subnet.id)
conn.create_route(route_table1.id, "10.0.0.0/15", interface_id=eni.id)
# Describe route table
In [21]: route_table1 = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc1.id})[0]
2014-09-09 23:11:07,715 boto [DEBUG]:Method: GET
2014-09-09 23:11:07,715 boto [DEBUG]:Path: /
2014-09-09 23:11:07,715 boto [DEBUG]:Data:
2014-09-09 23:11:07,715 boto [DEBUG]:Headers: {}
2014-09-09 23:11:07,715 boto [DEBUG]:Host: ec2.us-west-1.amazonaws.com
2014-09-09 23:11:07,716 boto [DEBUG]:Port: 443
2014-09-09 23:11:07,716 boto [DEBUG]:Params: {'Filter.1.Name': 'association.main', 'Filter.2.Value.1': u'vpc-3f658b5a', 'Action': 'DescribeRouteTables', 'Version': '2014-05-01', 'Filter.2.Name': 'vpc-id', 'Filter.1.Value.1': 'true'}
2014-09-09 23:11:07,716 boto [DEBUG]:Token: None
2014-09-09 23:11:07,717 boto [DEBUG]:using _calc_signature_2
2014-09-09 23:11:07,717 boto [DEBUG]:query string: AWSAccessKeyId=AKIAI7ZA6HB42ZXF3LOA&Action=DescribeRouteTables&Filter.1.Name=association.main&Filter.1.Value.1=true&Filter.2.Name=vpc-id&Filter.2.Value.1=vpc-3f658b5a&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2014-09-09T23%3A11%3A07Z&Version=2014-05-01
2014-09-09 23:11:07,718 boto [DEBUG]:string_to_sign: GET
ec2.us-west-1.amazonaws.com
/
AWSAccessKeyId=AKIAI7ZA6HB42ZXF3LOA&Action=DescribeRouteTables&Filter.1.Name=association.main&Filter.1.Value.1=true&Filter.2.Name=vpc-id&Filter.2.Value.1=vpc-3f658b5a&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2014-09-09T23%3A11%3A07Z&Version=2014-05-01
2014-09-09 23:11:07,718 boto [DEBUG]:len(b64)=44
2014-09-09 23:11:07,718 boto [DEBUG]:base64 encoded digest: qAcNssrx85LkIJIUymuvM7dB1v527ogS8PGrsicg1f4=
2014-09-09 23:11:07,718 boto [DEBUG]:query_string: AWSAccessKeyId=AKIAI7ZA6HB42ZXF3LOA&Action=DescribeRouteTables&Filter.1.Name=association.main&Filter.1.Value.1=true&Filter.2.Name=vpc-id&Filter.2.Value.1=vpc-3f658b5a&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2014-09-09T23%3A11%3A07Z&Version=2014-05-01 Signature: qAcNssrx85LkIJIUymuvM7dB1v527ogS8PGrsicg1f4=
2014-09-09 23:11:07,718 boto [DEBUG]:Final headers: {'Content-Length': 0, 'Host': 'ec2.us-west-1.amazonaws.com', 'User-Agent': 'Boto/2.32.1 Python/2.6.5 Linux/2.6.32-45-server'}
2014-09-09 23:11:07,840 boto [DEBUG]:Response headers: [('transfer-encoding', 'chunked'), ('vary', 'Accept-Encoding'), ('server', 'AmazonEC2'), ('content-type', 'text/xml;charset=UTF-8'), ('date', 'Tue, 09 Sep 2014 23:11:07 GMT')]
2014-09-09 23:11:07,840 boto [DEBUG]:<?xml version="1.0" encoding="UTF-8"?>
<DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2014-05-01/">
<requestId>e22ff1da-fb06-4e0f-bda6-8555c18f0af6</requestId>
<routeTableSet>
<item>
<routeTableId>rtb-a754bcc2</routeTableId>
<vpcId>vpc-3f658b5a</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/16</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
<item>
<destinationCidrBlock>11.0.0.0/16</destinationCidrBlock>
<vpcPeeringConnectionId>pcx-efc52b86</vpcPeeringConnectionId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>10.0.0.0/15</destinationCidrBlock>
<networkInterfaceId>eni-884ec1d1</networkInterfaceId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-111ef574</routeTableAssociationId>
<routeTableId>rtb-a754bcc2</routeTableId>
<main>true</main>
</item>
</associationSet>
<propagatingVgwSet/>
<tagSet/>
</item>
</routeTableSet>
</DescribeRouteTablesResponse>
```
</issue>
<code>
[start of boto/vpc/routetable.py]
1 # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a Route Table
24 """
25
26 from boto.ec2.ec2object import TaggedEC2Object
27 from boto.resultset import ResultSet
28
29 class RouteTable(TaggedEC2Object):
30
31 def __init__(self, connection=None):
32 super(RouteTable, self).__init__(connection)
33 self.id = None
34 self.vpc_id = None
35 self.routes = []
36 self.associations = []
37
38 def __repr__(self):
39 return 'RouteTable:%s' % self.id
40
41 def startElement(self, name, attrs, connection):
42 result = super(RouteTable, self).startElement(name, attrs, connection)
43
44 if result is not None:
45 # Parent found an interested element, just return it
46 return result
47
48 if name == 'routeSet':
49 self.routes = ResultSet([('item', Route)])
50 return self.routes
51 elif name == 'associationSet':
52 self.associations = ResultSet([('item', RouteAssociation)])
53 return self.associations
54 else:
55 return None
56
57 def endElement(self, name, value, connection):
58 if name == 'routeTableId':
59 self.id = value
60 elif name == 'vpcId':
61 self.vpc_id = value
62 else:
63 setattr(self, name, value)
64
65 class Route(object):
66 def __init__(self, connection=None):
67 self.destination_cidr_block = None
68 self.gateway_id = None
69 self.instance_id = None
70 self.state = None
71
72 def __repr__(self):
73 return 'Route:%s' % self.destination_cidr_block
74
75 def startElement(self, name, attrs, connection):
76 return None
77
78 def endElement(self, name, value, connection):
79 if name == 'destinationCidrBlock':
80 self.destination_cidr_block = value
81 elif name == 'gatewayId':
82 self.gateway_id = value
83 elif name == 'instanceId':
84 self.instance_id = value
85 elif name == 'state':
86 self.state = value
87
88 class RouteAssociation(object):
89 def __init__(self, connection=None):
90 self.id = None
91 self.route_table_id = None
92 self.subnet_id = None
93 self.main = False
94
95 def __repr__(self):
96 return 'RouteAssociation:%s' % self.id
97
98 def startElement(self, name, attrs, connection):
99 return None
100
101 def endElement(self, name, value, connection):
102 if name == 'routeTableAssociationId':
103 self.id = value
104 elif name == 'routeTableId':
105 self.route_table_id = value
106 elif name == 'subnetId':
107 self.subnet_id = value
108 elif name == 'main':
109 self.main = value == 'true'
110
[end of boto/vpc/routetable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/boto/vpc/routetable.py b/boto/vpc/routetable.py
--- a/boto/vpc/routetable.py
+++ b/boto/vpc/routetable.py
@@ -67,6 +67,8 @@
self.destination_cidr_block = None
self.gateway_id = None
self.instance_id = None
+ self.interface_id = None
+ self.vpc_peering_connection_id = None
self.state = None
def __repr__(self):
@@ -82,6 +84,10 @@
self.gateway_id = value
elif name == 'instanceId':
self.instance_id = value
+ elif name == 'networkInterfaceId':
+ self.interface_id = value
+ elif name == 'vpcPeeringConnectionId':
+ self.vpc_peering_connection_id = value
elif name == 'state':
self.state = value
| {"golden_diff": "diff --git a/boto/vpc/routetable.py b/boto/vpc/routetable.py\n--- a/boto/vpc/routetable.py\n+++ b/boto/vpc/routetable.py\n@@ -67,6 +67,8 @@\n self.destination_cidr_block = None\n self.gateway_id = None\n self.instance_id = None\n+ self.interface_id = None\n+ self.vpc_peering_connection_id = None\n self.state = None\n \n def __repr__(self):\n@@ -82,6 +84,10 @@\n self.gateway_id = value\n elif name == 'instanceId':\n self.instance_id = value\n+ elif name == 'networkInterfaceId':\n+ self.interface_id = value\n+ elif name == 'vpcPeeringConnectionId':\n+ self.vpc_peering_connection_id = value\n elif name == 'state':\n self.state = value\n", "issue": "Route Tables: update describe_route_tables to support additional route types (VPC peering connection, NIC)\nRoutes can be created to network interfaces and VPC peering connections, however these routes are not reflected properly in the DescribeRouteTables response.\n\nDoc here:\n- http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeRouteTables.html\n\n...however, the doc does not provide actual examples of the AWS response for these routes.\n\nAs such, I've repro'ed both route types and captured the AWS response XML here:\n\n```\nimport boto, boto.vpc\nconn = boto.vpc.connect_to_region('us-west-1')\n\nvpc1 = conn.create_vpc(\"10.0.0.0/16\");\nvpc2 = conn.create_vpc(\"11.0.0.0/16\");\nroute_table1 = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc1.id})[0]\n\n# PCX route\nvpc_pcx = conn.create_vpc_peering_connection(vpc1.id, vpc2.id)\nconn.create_route(route_table1.id, \"11.0.0.0/16\", vpc_peering_connection_id=vpc_pcx.id)\n\n# NIC route\nsubnet = conn.create_subnet(vpc1.id, \"10.0.0.0/18\")\neni = conn.create_network_interface(subnet.id)\nconn.create_route(route_table1.id, \"10.0.0.0/15\", interface_id=eni.id)\n\n# Describe route table\nIn [21]: route_table1 = conn.get_all_route_tables(filters={'association.main':'true','vpc-id':vpc1.id})[0]\n2014-09-09 23:11:07,715 boto [DEBUG]:Method: GET\n2014-09-09 23:11:07,715 boto [DEBUG]:Path: /\n2014-09-09 23:11:07,715 boto [DEBUG]:Data: \n2014-09-09 23:11:07,715 boto [DEBUG]:Headers: {}\n2014-09-09 23:11:07,715 boto [DEBUG]:Host: ec2.us-west-1.amazonaws.com\n2014-09-09 23:11:07,716 boto [DEBUG]:Port: 443\n2014-09-09 23:11:07,716 boto [DEBUG]:Params: {'Filter.1.Name': 'association.main', 'Filter.2.Value.1': u'vpc-3f658b5a', 'Action': 'DescribeRouteTables', 'Version': '2014-05-01', 'Filter.2.Name': 'vpc-id', 'Filter.1.Value.1': 'true'}\n2014-09-09 23:11:07,716 boto [DEBUG]:Token: None\n2014-09-09 23:11:07,717 boto [DEBUG]:using _calc_signature_2\n2014-09-09 23:11:07,717 boto [DEBUG]:query string: AWSAccessKeyId=AKIAI7ZA6HB42ZXF3LOA&Action=DescribeRouteTables&Filter.1.Name=association.main&Filter.1.Value.1=true&Filter.2.Name=vpc-id&Filter.2.Value.1=vpc-3f658b5a&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2014-09-09T23%3A11%3A07Z&Version=2014-05-01\n2014-09-09 23:11:07,718 boto [DEBUG]:string_to_sign: GET\nec2.us-west-1.amazonaws.com\n/\nAWSAccessKeyId=AKIAI7ZA6HB42ZXF3LOA&Action=DescribeRouteTables&Filter.1.Name=association.main&Filter.1.Value.1=true&Filter.2.Name=vpc-id&Filter.2.Value.1=vpc-3f658b5a&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2014-09-09T23%3A11%3A07Z&Version=2014-05-01\n2014-09-09 23:11:07,718 boto [DEBUG]:len(b64)=44\n2014-09-09 23:11:07,718 boto [DEBUG]:base64 encoded digest: qAcNssrx85LkIJIUymuvM7dB1v527ogS8PGrsicg1f4=\n2014-09-09 23:11:07,718 boto [DEBUG]:query_string: AWSAccessKeyId=AKIAI7ZA6HB42ZXF3LOA&Action=DescribeRouteTables&Filter.1.Name=association.main&Filter.1.Value.1=true&Filter.2.Name=vpc-id&Filter.2.Value.1=vpc-3f658b5a&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp=2014-09-09T23%3A11%3A07Z&Version=2014-05-01 Signature: qAcNssrx85LkIJIUymuvM7dB1v527ogS8PGrsicg1f4=\n2014-09-09 23:11:07,718 boto [DEBUG]:Final headers: {'Content-Length': 0, 'Host': 'ec2.us-west-1.amazonaws.com', 'User-Agent': 'Boto/2.32.1 Python/2.6.5 Linux/2.6.32-45-server'}\n2014-09-09 23:11:07,840 boto [DEBUG]:Response headers: [('transfer-encoding', 'chunked'), ('vary', 'Accept-Encoding'), ('server', 'AmazonEC2'), ('content-type', 'text/xml;charset=UTF-8'), ('date', 'Tue, 09 Sep 2014 23:11:07 GMT')]\n2014-09-09 23:11:07,840 boto [DEBUG]:<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<DescribeRouteTablesResponse xmlns=\"http://ec2.amazonaws.com/doc/2014-05-01/\">\n <requestId>e22ff1da-fb06-4e0f-bda6-8555c18f0af6</requestId>\n <routeTableSet>\n <item>\n <routeTableId>rtb-a754bcc2</routeTableId>\n <vpcId>vpc-3f658b5a</vpcId>\n <routeSet>\n <item>\n <destinationCidrBlock>10.0.0.0/16</destinationCidrBlock>\n <gatewayId>local</gatewayId>\n <state>active</state>\n <origin>CreateRouteTable</origin>\n </item>\n <item>\n <destinationCidrBlock>11.0.0.0/16</destinationCidrBlock>\n <vpcPeeringConnectionId>pcx-efc52b86</vpcPeeringConnectionId>\n <state>blackhole</state>\n <origin>CreateRoute</origin>\n </item>\n <item>\n <destinationCidrBlock>10.0.0.0/15</destinationCidrBlock>\n <networkInterfaceId>eni-884ec1d1</networkInterfaceId>\n <state>blackhole</state>\n <origin>CreateRoute</origin>\n </item>\n </routeSet>\n <associationSet>\n <item>\n <routeTableAssociationId>rtbassoc-111ef574</routeTableAssociationId>\n <routeTableId>rtb-a754bcc2</routeTableId>\n <main>true</main>\n </item>\n </associationSet>\n <propagatingVgwSet/>\n <tagSet/>\n </item>\n </routeTableSet>\n</DescribeRouteTablesResponse>\n```\n\n", "before_files": [{"content": "# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a Route Table\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\nfrom boto.resultset import ResultSet\n\nclass RouteTable(TaggedEC2Object):\n\n def __init__(self, connection=None):\n super(RouteTable, self).__init__(connection)\n self.id = None\n self.vpc_id = None\n self.routes = []\n self.associations = []\n\n def __repr__(self):\n return 'RouteTable:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n result = super(RouteTable, self).startElement(name, attrs, connection)\n\n if result is not None:\n # Parent found an interested element, just return it\n return result\n\n if name == 'routeSet':\n self.routes = ResultSet([('item', Route)])\n return self.routes\n elif name == 'associationSet':\n self.associations = ResultSet([('item', RouteAssociation)])\n return self.associations\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableId':\n self.id = value\n elif name == 'vpcId':\n self.vpc_id = value\n else:\n setattr(self, name, value)\n\nclass Route(object):\n def __init__(self, connection=None):\n self.destination_cidr_block = None\n self.gateway_id = None\n self.instance_id = None\n self.state = None\n\n def __repr__(self):\n return 'Route:%s' % self.destination_cidr_block\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'destinationCidrBlock':\n self.destination_cidr_block = value\n elif name == 'gatewayId':\n self.gateway_id = value\n elif name == 'instanceId':\n self.instance_id = value\n elif name == 'state':\n self.state = value\n\nclass RouteAssociation(object):\n def __init__(self, connection=None):\n self.id = None\n self.route_table_id = None\n self.subnet_id = None\n self.main = False\n\n def __repr__(self):\n return 'RouteAssociation:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableAssociationId':\n self.id = value\n elif name == 'routeTableId':\n self.route_table_id = value\n elif name == 'subnetId':\n self.subnet_id = value\n elif name == 'main':\n self.main = value == 'true'\n", "path": "boto/vpc/routetable.py"}]} | 3,596 | 203 |
gh_patches_debug_27222 | rasdani/github-patches | git_diff | psychopy__psychopy-2835 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Working directory issues with 2020.1.1 (most likely related to the new run window)
See https://discourse.psychopy.org/t/working-directory-change-with-2020-1-and-later/11143 and https://discourse.psychopy.org/t/both-the-stim3d-py-and-trialhandler-py-programs-with-psychopy-2020-1-1-do-not-run-properly/11201
At least on Mac (have not tested on Windows), since upgrading to 2020.1.1, if you run a script, the working directory is set to /Applications/PsychoPy3.app/Contents/Resources, rather than the directory that contains the script. This completely breaks any script that uses relative paths for relevant files expecting the working directory to be the script's directory. This includes some of the demos (see second link above) and my PyHab project.
In the short run you can just add the following to the top of every file where it matters:
```
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
```
but this is basically a kludge and bad for backwards-compatibility. Does it need to be this way or can we get the new running window to use the script's directory as the working directory by default?
</issue>
<code>
[start of psychopy/__init__.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # Part of the PsychoPy library
5 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.
6 # Distributed under the terms of the GNU General Public License (GPL).
7
8 # --------------------------------------------------------------------------
9 # This file is automatically generated during build (do not edit directly).
10 # --------------------------------------------------------------------------
11
12 import os
13 import sys
14
15 __version__ = '2020.1.2'
16 __license__ = 'GNU GPLv3 (or more recent equivalent)'
17 __author__ = 'Jonathan Peirce'
18 __author_email__ = '[email protected]'
19 __maintainer_email__ = '[email protected]'
20 __url__ = 'http://www.psychopy.org/'
21 __download_url__ = 'https://github.com/psychopy/psychopy/releases/'
22 __git_sha__ = 'n/a'
23 __build_platform__ = 'n/a'
24
25 __all__ = ["gui", "misc", "visual", "core",
26 "event", "data", "sound", "microphone"]
27
28 # for developers the following allows access to the current git sha from
29 # their repository
30 if __git_sha__ == 'n/a':
31 from subprocess import check_output, PIPE
32 # see if we're in a git repo and fetch from there
33 try:
34 thisFileLoc = os.path.split(__file__)[0]
35 output = check_output(['git', 'rev-parse', '--short', 'HEAD'],
36 cwd=thisFileLoc, stderr=PIPE)
37 except Exception:
38 output = False
39 if output:
40 __git_sha__ = output.strip() # remove final linefeed
41
42 # update preferences and the user paths
43 if 'installing' not in locals():
44 from psychopy.preferences import prefs
45 for pathName in prefs.general['paths']:
46 sys.path.append(pathName)
47
48 from psychopy.tools.versionchooser import useVersion, ensureMinimal
49
50 # import readline here to get around an issue with sounddevice
51 # issues GH-2230 GH-2344 GH-2662
52 try:
53 import readline
54 except ImportError:
55 pass # all that will happen is the stderr/stdout might get redirected
56
57
[end of psychopy/__init__.py]
[start of setupApp.py]
1 #!/usr/bin/env python
2 ################
3 # see notes at bottom for requirements
4 from __future__ import absolute_import, print_function
5 import glob
6 import os
7 import sys
8 from sys import platform
9 from distutils.core import setup
10 from pkg_resources import parse_version
11 # import versioneer
12 import psychopy
13 version = psychopy.__version__
14
15 # regenerate __init__.py only if we're in the source repos (not in a zip file)
16 try:
17 import createInitFile # won't exist in a sdist.zip
18 writeNewInit=True
19 except:
20 writeNewInit=False
21 if writeNewInit:
22 vStr = createInitFile.createInitFile(dist='bdist')
23
24 #define the extensions to compile if necess
25 packageData = []
26 requires = []
27
28 if platform != 'darwin':
29 raise RuntimeError("setupApp.py is only for building Mac Standalone bundle")
30
31 import bdist_mpkg
32 import py2app
33 resources = glob.glob('psychopy/app/Resources/*')
34 resources.append('/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7/pyconfig.h')
35 frameworks = ["libavbin.dylib", "/usr/lib/libxml2.2.dylib", #"libyaml.dylib",
36 "libevent.dylib", "libffi.dylib",
37 "libmp3lame.0.dylib",
38 "/usr/local/Cellar/glfw/3.2.1/lib/libglfw.3.2.dylib",
39 ]
40 opencvLibs = glob.glob(os.path.join(sys.exec_prefix, 'lib', 'libopencv*.2.4.dylib'))
41 frameworks.extend(opencvLibs)
42
43 import macholib
44 #print("~"*60 + "macholib version: "+macholib.__version__)
45
46 if parse_version(macholib.__version__) <= parse_version('1.7'):
47 print("Applying macholib patch...")
48 import macholib.dyld
49 import macholib.MachOGraph
50 dyld_find_1_7 = macholib.dyld.dyld_find
51 def dyld_find(name, loader=None, **kwargs):
52 #print("~"*60 + "calling alternate dyld_find")
53 if loader is not None:
54 kwargs['loader_path'] = loader
55 return dyld_find_1_7(name, **kwargs)
56 macholib.MachOGraph.dyld_find = dyld_find
57
58 includes = ['Tkinter', 'tkFileDialog',
59 'imp', 'subprocess', 'shlex',
60 'shelve', # for scipy.io
61 '_elementtree', 'pyexpat', # for openpyxl
62 'hid',
63 'pyo', 'greenlet', 'zmq', 'tornado',
64 'psutil', # for iohub
65 'tobii_research', # need tobii_research file and tobiiresearch pkg
66 'pysoundcard', 'soundfile', 'sounddevice', 'readline',
67 'hid',
68 'xlwt', # writes excel files for pandas
69 'vlc', # install with pip install python-vlc
70 'msgpack_numpy',
71 'configparser',
72 ]
73 packages = ['wx', 'psychopy',
74 'pyglet', 'pygame', 'pytz', 'OpenGL', 'glfw',
75 'scipy', 'matplotlib', 'lxml', 'xml', 'openpyxl',
76 'moviepy', 'imageio', 'imageio_ffmpeg',
77 '_sounddevice_data', '_soundfile_data',
78 'cffi','pycparser',
79 'PIL', # 'Image',
80 'objc', 'Quartz', 'AppKit', 'QTKit', 'Cocoa',
81 'Foundation', 'CoreFoundation',
82 'pkg_resources', # needed for objc
83 'pyolib',
84 'requests', 'certifi', 'cryptography',
85 'pyosf',
86 # for unit testing
87 'coverage',
88 # handy external science libs
89 'serial',
90 'egi', 'pylink', 'tobiiresearch',
91 'pyxid', 'pyxid2', 'ftd2xx', # ftd2xx is used by cedrus
92 'pandas', 'tables', # 'cython',
93 'msgpack', 'yaml', 'gevent', # for ioHub
94 # these aren't needed, but liked
95 'psychopy_ext', 'pyfilesec',
96 'bidi', 'arabic_reshaper', # for right-left language conversions
97 # for Py3 compatibility
98 'future', 'past', 'lib2to3',
99 'json_tricks', # allows saving arrays/dates in json
100 'git', 'gitlab',
101 'astunparse', 'esprima', # for translating/adapting py/JS
102 'pylsl', 'pygaze', 'smite',
103 'cv2',
104 'badapted', 'darc_toolbox', # adaptive methods from Ben Vincent
105 'questplus',
106 'metapensiero.pj', 'dukpy', 'macropy',
107 ]
108
109 if sys.version_info.major >= 3:
110 packages.extend(['PyQt5'])
111 else:
112 # not available or not working under Python3:
113 includes.extend(['UserString', 'ioLabs', 'FileDialog'])
114 packages.extend(['PyQt4', 'labjack', 'rusocsci'])
115 # is available but py2app can't seem to find it:
116 packages.extend(['OpenGL'])
117
118 setup(
119 app=['psychopy/app/psychopyApp.py'],
120 options=dict(py2app=dict(
121 includes=includes,
122 packages=packages,
123 excludes=['bsddb', 'jinja2', 'IPython','ipython_genutils','nbconvert',
124 'libsz.2.dylib',
125 # 'stringprep',
126 'functools32',
127 ], # anything we need to forcibly exclude?
128 resources=resources,
129 argv_emulation=True,
130 site_packages=True,
131 frameworks=frameworks,
132 iconfile='psychopy/app/Resources/psychopy.icns',
133 plist=dict(
134 CFBundleIconFile='psychopy.icns',
135 CFBundleName = "PsychoPy3",
136 CFBundleShortVersionString = version, # must be in X.X.X format
137 CFBundleGetInfoString = "PsychoPy3 "+version,
138 CFBundleExecutable = "PsychoPy3",
139 CFBundleIdentifier = "org.psychopy.PsychoPy3",
140 CFBundleLicense = "GNU GPLv3+",
141 CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],
142 CFBundleTypeRole='Editor')],
143 LSEnvironment=dict(PATH="/usr/local/git/bin:/usr/local/bin:"
144 "/usr/local:/usr/bin:/usr/sbin"),
145 ),
146 )) # end of the options dict
147 )
148
149
150 # ugly hack for opencv2:
151 # As of opencv 2.4.5 the cv2.so binary used rpath to a fixed
152 # location to find libs and even more annoyingly it then appended
153 # 'lib' to the rpath as well. These were fine for the packaged
154 # framework python but the libs in an app bundle are different.
155 # So, create symlinks so they appear in the same place as in framework python
156 rpath = "dist/PsychoPy3.app/Contents/Resources/"
157 for libPath in opencvLibs:
158 libname = os.path.split(libPath)[-1]
159 realPath = "../../Frameworks/"+libname # relative path (w.r.t. the fake)
160 fakePath = os.path.join(rpath, "lib", libname)
161 os.symlink(realPath, fakePath)
162 # they even did this for Python lib itself, which is in diff location
163 realPath = "../Frameworks/Python.framework/Python" # relative to the fake path
164 fakePath = os.path.join(rpath, "Python")
165 os.symlink(realPath, fakePath)
166
167 if writeNewInit:
168 # remove unwanted info about this system post-build
169 createInitFile.createInitFile(dist=None)
170
171 # running testApp from within the app raises wx errors
172 # shutil.rmtree("dist/PsychoPy3.app/Contents/Resources/lib/python2.6/psychopy/tests/testTheApp")
173
[end of setupApp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/__init__.py b/psychopy/__init__.py
--- a/psychopy/__init__.py
+++ b/psychopy/__init__.py
@@ -12,7 +12,7 @@
import os
import sys
-__version__ = '2020.1.2'
+__version__ = '2020.1.3'
__license__ = 'GNU GPLv3 (or more recent equivalent)'
__author__ = 'Jonathan Peirce'
__author_email__ = '[email protected]'
diff --git a/setupApp.py b/setupApp.py
--- a/setupApp.py
+++ b/setupApp.py
@@ -134,10 +134,11 @@
CFBundleIconFile='psychopy.icns',
CFBundleName = "PsychoPy3",
CFBundleShortVersionString = version, # must be in X.X.X format
- CFBundleGetInfoString = "PsychoPy3 "+version,
+ CFBundleVersion = version,
CFBundleExecutable = "PsychoPy3",
CFBundleIdentifier = "org.psychopy.PsychoPy3",
CFBundleLicense = "GNU GPLv3+",
+ NSHumanReadableCopyright = "Open Science Tools Ltd.",
CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],
CFBundleTypeRole='Editor')],
LSEnvironment=dict(PATH="/usr/local/git/bin:/usr/local/bin:"
| {"golden_diff": "diff --git a/psychopy/__init__.py b/psychopy/__init__.py\n--- a/psychopy/__init__.py\n+++ b/psychopy/__init__.py\n@@ -12,7 +12,7 @@\n import os\n import sys\n \n-__version__ = '2020.1.2'\n+__version__ = '2020.1.3'\n __license__ = 'GNU GPLv3 (or more recent equivalent)'\n __author__ = 'Jonathan Peirce'\n __author_email__ = '[email protected]'\ndiff --git a/setupApp.py b/setupApp.py\n--- a/setupApp.py\n+++ b/setupApp.py\n@@ -134,10 +134,11 @@\n CFBundleIconFile='psychopy.icns',\n CFBundleName = \"PsychoPy3\",\n CFBundleShortVersionString = version, # must be in X.X.X format\n- CFBundleGetInfoString = \"PsychoPy3 \"+version,\n+ CFBundleVersion = version,\n CFBundleExecutable = \"PsychoPy3\",\n CFBundleIdentifier = \"org.psychopy.PsychoPy3\",\n CFBundleLicense = \"GNU GPLv3+\",\n+ NSHumanReadableCopyright = \"Open Science Tools Ltd.\",\n CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],\n CFBundleTypeRole='Editor')],\n LSEnvironment=dict(PATH=\"/usr/local/git/bin:/usr/local/bin:\"\n", "issue": "Working directory issues with 2020.1.1 (most likely related to the new run window)\nSee https://discourse.psychopy.org/t/working-directory-change-with-2020-1-and-later/11143 and https://discourse.psychopy.org/t/both-the-stim3d-py-and-trialhandler-py-programs-with-psychopy-2020-1-1-do-not-run-properly/11201\r\n\r\nAt least on Mac (have not tested on Windows), since upgrading to 2020.1.1, if you run a script, the working directory is set to /Applications/PsychoPy3.app/Contents/Resources, rather than the directory that contains the script. This completely breaks any script that uses relative paths for relevant files expecting the working directory to be the script's directory. This includes some of the demos (see second link above) and my PyHab project.\r\n\r\nIn the short run you can just add the following to the top of every file where it matters:\r\n\r\n```\r\nimport os\r\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\r\n```\r\n\r\nbut this is basically a kludge and bad for backwards-compatibility. Does it need to be this way or can we get the new running window to use the script's directory as the working directory by default?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\n# --------------------------------------------------------------------------\n# This file is automatically generated during build (do not edit directly).\n# --------------------------------------------------------------------------\n\nimport os\nimport sys\n\n__version__ = '2020.1.2'\n__license__ = 'GNU GPLv3 (or more recent equivalent)'\n__author__ = 'Jonathan Peirce'\n__author_email__ = '[email protected]'\n__maintainer_email__ = '[email protected]'\n__url__ = 'http://www.psychopy.org/'\n__download_url__ = 'https://github.com/psychopy/psychopy/releases/'\n__git_sha__ = 'n/a'\n__build_platform__ = 'n/a'\n\n__all__ = [\"gui\", \"misc\", \"visual\", \"core\",\n \"event\", \"data\", \"sound\", \"microphone\"]\n\n# for developers the following allows access to the current git sha from\n# their repository\nif __git_sha__ == 'n/a':\n from subprocess import check_output, PIPE\n # see if we're in a git repo and fetch from there\n try:\n thisFileLoc = os.path.split(__file__)[0]\n output = check_output(['git', 'rev-parse', '--short', 'HEAD'],\n cwd=thisFileLoc, stderr=PIPE)\n except Exception:\n output = False\n if output:\n __git_sha__ = output.strip() # remove final linefeed\n\n# update preferences and the user paths\nif 'installing' not in locals():\n from psychopy.preferences import prefs\n for pathName in prefs.general['paths']:\n sys.path.append(pathName)\n\n from psychopy.tools.versionchooser import useVersion, ensureMinimal\n\n# import readline here to get around an issue with sounddevice\n# issues GH-2230 GH-2344 GH-2662\ntry:\n import readline\nexcept ImportError:\n pass # all that will happen is the stderr/stdout might get redirected\n\n", "path": "psychopy/__init__.py"}, {"content": "#!/usr/bin/env python\n################\n# see notes at bottom for requirements\nfrom __future__ import absolute_import, print_function\nimport glob\nimport os\nimport sys\nfrom sys import platform\nfrom distutils.core import setup\nfrom pkg_resources import parse_version\n# import versioneer\nimport psychopy\nversion = psychopy.__version__\n\n# regenerate __init__.py only if we're in the source repos (not in a zip file)\ntry:\n import createInitFile # won't exist in a sdist.zip\n writeNewInit=True\nexcept:\n writeNewInit=False\nif writeNewInit:\n vStr = createInitFile.createInitFile(dist='bdist')\n\n#define the extensions to compile if necess\npackageData = []\nrequires = []\n\nif platform != 'darwin':\n raise RuntimeError(\"setupApp.py is only for building Mac Standalone bundle\")\n\nimport bdist_mpkg\nimport py2app\nresources = glob.glob('psychopy/app/Resources/*')\nresources.append('/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7/pyconfig.h')\nframeworks = [\"libavbin.dylib\", \"/usr/lib/libxml2.2.dylib\", #\"libyaml.dylib\",\n \"libevent.dylib\", \"libffi.dylib\",\n \"libmp3lame.0.dylib\",\n \"/usr/local/Cellar/glfw/3.2.1/lib/libglfw.3.2.dylib\",\n ]\nopencvLibs = glob.glob(os.path.join(sys.exec_prefix, 'lib', 'libopencv*.2.4.dylib'))\nframeworks.extend(opencvLibs)\n\nimport macholib\n#print(\"~\"*60 + \"macholib version: \"+macholib.__version__)\n\nif parse_version(macholib.__version__) <= parse_version('1.7'):\n print(\"Applying macholib patch...\")\n import macholib.dyld\n import macholib.MachOGraph\n dyld_find_1_7 = macholib.dyld.dyld_find\n def dyld_find(name, loader=None, **kwargs):\n #print(\"~\"*60 + \"calling alternate dyld_find\")\n if loader is not None:\n kwargs['loader_path'] = loader\n return dyld_find_1_7(name, **kwargs)\n macholib.MachOGraph.dyld_find = dyld_find\n\nincludes = ['Tkinter', 'tkFileDialog',\n 'imp', 'subprocess', 'shlex',\n 'shelve', # for scipy.io\n '_elementtree', 'pyexpat', # for openpyxl\n 'hid',\n 'pyo', 'greenlet', 'zmq', 'tornado',\n 'psutil', # for iohub\n 'tobii_research', # need tobii_research file and tobiiresearch pkg\n 'pysoundcard', 'soundfile', 'sounddevice', 'readline',\n 'hid',\n 'xlwt', # writes excel files for pandas\n 'vlc', # install with pip install python-vlc\n 'msgpack_numpy',\n 'configparser',\n ]\npackages = ['wx', 'psychopy',\n 'pyglet', 'pygame', 'pytz', 'OpenGL', 'glfw',\n 'scipy', 'matplotlib', 'lxml', 'xml', 'openpyxl',\n 'moviepy', 'imageio', 'imageio_ffmpeg',\n '_sounddevice_data', '_soundfile_data',\n 'cffi','pycparser',\n 'PIL', # 'Image',\n 'objc', 'Quartz', 'AppKit', 'QTKit', 'Cocoa',\n 'Foundation', 'CoreFoundation',\n 'pkg_resources', # needed for objc\n 'pyolib',\n 'requests', 'certifi', 'cryptography',\n 'pyosf',\n # for unit testing\n 'coverage',\n # handy external science libs\n 'serial',\n 'egi', 'pylink', 'tobiiresearch',\n 'pyxid', 'pyxid2', 'ftd2xx', # ftd2xx is used by cedrus\n 'pandas', 'tables', # 'cython',\n 'msgpack', 'yaml', 'gevent', # for ioHub\n # these aren't needed, but liked\n 'psychopy_ext', 'pyfilesec',\n 'bidi', 'arabic_reshaper', # for right-left language conversions\n # for Py3 compatibility\n 'future', 'past', 'lib2to3',\n 'json_tricks', # allows saving arrays/dates in json\n 'git', 'gitlab',\n 'astunparse', 'esprima', # for translating/adapting py/JS\n 'pylsl', 'pygaze', 'smite',\n 'cv2',\n 'badapted', 'darc_toolbox', # adaptive methods from Ben Vincent\n 'questplus',\n 'metapensiero.pj', 'dukpy', 'macropy',\n ]\n\nif sys.version_info.major >= 3:\n packages.extend(['PyQt5'])\nelse:\n # not available or not working under Python3:\n includes.extend(['UserString', 'ioLabs', 'FileDialog'])\n packages.extend(['PyQt4', 'labjack', 'rusocsci'])\n # is available but py2app can't seem to find it:\n packages.extend(['OpenGL'])\n\nsetup(\n app=['psychopy/app/psychopyApp.py'],\n options=dict(py2app=dict(\n includes=includes,\n packages=packages,\n excludes=['bsddb', 'jinja2', 'IPython','ipython_genutils','nbconvert',\n 'libsz.2.dylib',\n # 'stringprep',\n 'functools32',\n ], # anything we need to forcibly exclude?\n resources=resources,\n argv_emulation=True,\n site_packages=True,\n frameworks=frameworks,\n iconfile='psychopy/app/Resources/psychopy.icns',\n plist=dict(\n CFBundleIconFile='psychopy.icns',\n CFBundleName = \"PsychoPy3\",\n CFBundleShortVersionString = version, # must be in X.X.X format\n CFBundleGetInfoString = \"PsychoPy3 \"+version,\n CFBundleExecutable = \"PsychoPy3\",\n CFBundleIdentifier = \"org.psychopy.PsychoPy3\",\n CFBundleLicense = \"GNU GPLv3+\",\n CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],\n CFBundleTypeRole='Editor')],\n LSEnvironment=dict(PATH=\"/usr/local/git/bin:/usr/local/bin:\"\n \"/usr/local:/usr/bin:/usr/sbin\"),\n ),\n )) # end of the options dict\n)\n\n\n# ugly hack for opencv2:\n# As of opencv 2.4.5 the cv2.so binary used rpath to a fixed\n# location to find libs and even more annoyingly it then appended\n# 'lib' to the rpath as well. These were fine for the packaged\n# framework python but the libs in an app bundle are different.\n# So, create symlinks so they appear in the same place as in framework python\nrpath = \"dist/PsychoPy3.app/Contents/Resources/\"\nfor libPath in opencvLibs:\n libname = os.path.split(libPath)[-1]\n realPath = \"../../Frameworks/\"+libname # relative path (w.r.t. the fake)\n fakePath = os.path.join(rpath, \"lib\", libname)\n os.symlink(realPath, fakePath)\n# they even did this for Python lib itself, which is in diff location\nrealPath = \"../Frameworks/Python.framework/Python\" # relative to the fake path\nfakePath = os.path.join(rpath, \"Python\")\nos.symlink(realPath, fakePath)\n\nif writeNewInit:\n # remove unwanted info about this system post-build\n createInitFile.createInitFile(dist=None)\n\n# running testApp from within the app raises wx errors\n# shutil.rmtree(\"dist/PsychoPy3.app/Contents/Resources/lib/python2.6/psychopy/tests/testTheApp\")\n", "path": "setupApp.py"}]} | 3,608 | 328 |
gh_patches_debug_4846 | rasdani/github-patches | git_diff | carpentries__amy-438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Event's start date must be earlier than it's end date
We should enforce that the start date is early that the end date.
</issue>
<code>
[start of workshops/forms.py]
1 from django import forms
2 from django.forms import HiddenInput, CheckboxSelectMultiple
3
4 from crispy_forms.helper import FormHelper
5 from crispy_forms.layout import Layout, Div, HTML, Submit, Field
6 from crispy_forms.bootstrap import FormActions
7 from django_countries.fields import CountryField
8 from selectable import forms as selectable
9
10 from workshops.models import (
11 Award, Event, Lesson, Person, Task, KnowledgeDomain,
12 )
13 from workshops import lookups
14
15 INSTRUCTORS_NUM = 10 # how many instrutors to return from a search by default
16
17 AUTOCOMPLETE_HELP_TEXT = (
18 "Autocomplete field; type characters to view available options, "
19 "then select desired item from list."
20 )
21
22 DATE_HELP_TEXT = "Select date using widget, or enter in YYYY-MM-DD format."
23
24
25 class BootstrapHelper(FormHelper):
26 form_class = 'form-horizontal'
27 label_class = 'col-lg-2'
28 field_class = 'col-lg-8'
29
30 def __init__(self, form=None):
31 super().__init__(form)
32
33 self.attrs['role'] = 'form'
34 self.inputs.append(Submit('submit', 'Submit'))
35
36
37 class BootstrapHelperWithAdd(BootstrapHelper):
38 def __init__(self, form=None):
39 super().__init__(form)
40
41 self.inputs[-1] = Submit('submit', 'Add')
42
43
44 class BootstrapHelperFilter(FormHelper):
45 form_method = 'get'
46
47 def __init__(self, form=None):
48 super().__init__(form)
49 self.attrs['role'] = 'form'
50 self.inputs.append(Submit('', 'Submit'))
51
52
53 bootstrap_helper = BootstrapHelper()
54 bootstrap_helper_with_add = BootstrapHelperWithAdd()
55 bootstrap_helper_filter = BootstrapHelperFilter()
56
57
58 class InstructorsForm(forms.Form):
59 '''Represent instructor matching form.'''
60
61 latitude = forms.FloatField(label='Latitude',
62 min_value=-90.0,
63 max_value=90.0,
64 required=False)
65 longitude = forms.FloatField(label='Longitude',
66 min_value=-180.0,
67 max_value=180.0,
68 required=False)
69 airport = selectable.AutoCompleteSelectField(
70 lookup_class=lookups.AirportLookup,
71 label='Airport',
72 required=False,
73 widget=selectable.AutoComboboxSelectWidget(
74 lookup_class=lookups.AirportLookup,
75 ),
76 )
77
78 country = CountryField().formfield(required=False)
79
80 lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),
81 widget=CheckboxSelectMultiple(),
82 required=False)
83
84 def __init__(self, *args, **kwargs):
85 '''Build checkboxes for qualifications dynamically.'''
86 super(InstructorsForm, self).__init__(*args, **kwargs)
87 self.helper = FormHelper(self)
88 self.helper.form_class = 'form-inline'
89 self.helper.form_method = 'get'
90 self.helper.layout = Layout(
91 Div(
92 Div(
93 'latitude',
94 'longitude',
95 css_class='panel-body'
96 ),
97 css_class='panel panel-default ',
98 ),
99 HTML('<p>OR</p>'),
100 Div(
101 Div(
102 'airport',
103 css_class='panel-body'
104 ),
105 css_class='panel panel-default ',
106 ),
107 HTML('<p>OR</p>'),
108 Div(
109 Div(
110 'country',
111 css_class='panel-body'
112 ),
113 css_class='panel panel-default ',
114 ),
115 'lessons',
116 FormActions(
117 Submit('submit', 'Submit'),
118 ),
119 )
120
121 def clean(self):
122 cleaned_data = super(InstructorsForm, self).clean()
123 airport = cleaned_data.get('airport')
124 lat = cleaned_data.get('latitude')
125 long = cleaned_data.get('longitude')
126 country = cleaned_data.get('country')
127
128 sum = bool(airport) + bool(lat and long) + bool(country)
129 # user can specify only one: either airport, or lat&long, or country
130 if sum != 1:
131 raise forms.ValidationError('Must specify an airport, or latitude'
132 ' and longitude, or a country.')
133 return cleaned_data
134
135
136 class PersonBulkAddForm(forms.Form):
137 '''Represent CSV upload form for bulk adding people.'''
138
139 file = forms.FileField()
140
141
142 class SearchForm(forms.Form):
143 '''Represent general searching form.'''
144
145 term = forms.CharField(label='term',
146 max_length=100)
147 in_sites = forms.BooleanField(label='in sites',
148 required=False,
149 initial=True)
150 in_events = forms.BooleanField(label='in events',
151 required=False,
152 initial=True)
153 in_persons = forms.BooleanField(label='in persons',
154 required=False,
155 initial=True)
156 in_airports = forms.BooleanField(label='in airports',
157 required=False,
158 initial=True)
159
160
161 class DebriefForm(forms.Form):
162 '''Represent general debrief form.'''
163 begin_date = forms.DateField(
164 label='Begin date as YYYY-MM-DD',
165 input_formats=['%Y-%m-%d', ]
166 )
167 end_date = forms.DateField(
168 label='End date as YYYY-MD-DD',
169 input_formats=['%Y-%m-%d', ]
170 )
171
172
173 class EventForm(forms.ModelForm):
174
175 site = selectable.AutoCompleteSelectField(
176 lookup_class=lookups.SiteLookup,
177 label='Site',
178 required=True,
179 help_text=AUTOCOMPLETE_HELP_TEXT,
180 widget=selectable.AutoComboboxSelectWidget,
181 )
182
183 organizer = selectable.AutoCompleteSelectField(
184 lookup_class=lookups.SiteLookup,
185 label='Organizer',
186 required=False,
187 help_text=AUTOCOMPLETE_HELP_TEXT,
188 widget=selectable.AutoComboboxSelectWidget,
189 )
190
191 def __init__(self, *args, **kwargs):
192 super().__init__(*args, **kwargs)
193 self.fields['start'].help_text = DATE_HELP_TEXT
194 self.fields['end'].help_text = DATE_HELP_TEXT
195
196 def clean_slug(self):
197 # Ensure slug is not an integer value for Event.get_by_ident
198 data = self.cleaned_data['slug']
199
200 try:
201 int(data)
202 except ValueError:
203 pass
204 else:
205 raise forms.ValidationError("Slug must not be an integer-value.")
206
207 return data
208
209 class Meta:
210 model = Event
211 # reorder fields, don't display 'deleted' field
212 fields = ('slug', 'start', 'end', 'site', 'organizer',
213 'tags', 'url', 'reg_key', 'admin_fee', 'invoiced',
214 'attendance', 'notes')
215
216 class Media:
217 # thanks to this, {{ form.media }} in the template will generate
218 # a <link href=""> (for CSS files) or <script src=""> (for JS files)
219 js = ('calendar_popup.js', )
220
221
222 class TaskForm(forms.ModelForm):
223
224 person = selectable.AutoCompleteSelectField(
225 lookup_class=lookups.PersonLookup,
226 label='Person',
227 required=True,
228 help_text=AUTOCOMPLETE_HELP_TEXT,
229 widget=selectable.AutoComboboxSelectWidget,
230 )
231
232 class Meta:
233 model = Task
234 fields = '__all__'
235 widgets = {'event': HiddenInput}
236
237
238 class TaskFullForm(TaskForm):
239
240 event = selectable.AutoCompleteSelectField(
241 lookup_class=lookups.EventLookup,
242 label='Event',
243 required=True,
244 help_text=AUTOCOMPLETE_HELP_TEXT,
245 widget=selectable.AutoComboboxSelectWidget,
246 )
247
248 class Meta:
249 model = Task
250 fields = '__all__'
251
252
253 class PersonForm(forms.ModelForm):
254
255 airport = selectable.AutoCompleteSelectField(
256 lookup_class=lookups.AirportLookup,
257 label='Airport',
258 required=False,
259 help_text=AUTOCOMPLETE_HELP_TEXT,
260 widget=selectable.AutoComboboxSelectWidget,
261 )
262
263 lessons = forms.ModelMultipleChoiceField(required=False,
264 queryset=Lesson.objects.all())
265
266 domains = forms.ModelMultipleChoiceField(
267 required=False, queryset=KnowledgeDomain.objects.all()
268 )
269
270 class Meta:
271 model = Person
272 # don't display the 'password', 'user_permissions',
273 # 'groups' or 'is_superuser' fields
274 # + reorder fields
275 fields = ['username', 'personal', 'middle', 'family', 'may_contact',
276 'email', 'gender', 'airport', 'affiliation', 'github',
277 'twitter', 'url', 'notes', 'lessons', 'domains']
278
279
280 class PersonPermissionsForm(forms.ModelForm):
281 class Meta:
282 model = Person
283 # only display 'user_permissions', 'groups' and `is_superuser` fields
284 fields = [
285 'is_superuser',
286 'user_permissions',
287 'groups',
288 ]
289
290
291 class PersonMergeForm(forms.Form):
292
293 person_from = selectable.AutoCompleteSelectField(
294 lookup_class=lookups.PersonLookup,
295 label='Person From',
296 required=True,
297 help_text=AUTOCOMPLETE_HELP_TEXT,
298 widget=selectable.AutoComboboxSelectWidget,
299 )
300
301 person_to = selectable.AutoCompleteSelectField(
302 lookup_class=lookups.PersonLookup,
303 label='Person To',
304 required=True,
305 help_text=AUTOCOMPLETE_HELP_TEXT,
306 widget=selectable.AutoComboboxSelectWidget,
307 )
308
309
310 class BadgeAwardForm(forms.ModelForm):
311
312 person = selectable.AutoCompleteSelectField(
313 lookup_class=lookups.PersonLookup,
314 label='Person',
315 required=True,
316 help_text=AUTOCOMPLETE_HELP_TEXT,
317 widget=selectable.AutoComboboxSelectWidget,
318 )
319
320 event = selectable.AutoCompleteSelectField(
321 lookup_class=lookups.EventLookup,
322 label='Event',
323 required=False,
324 help_text=AUTOCOMPLETE_HELP_TEXT,
325 widget=selectable.AutoComboboxSelectWidget,
326 )
327
328 class Meta:
329 model = Award
330 fields = '__all__'
331 widgets = {'badge': HiddenInput}
332
333
334 class PersonAwardForm(forms.ModelForm):
335
336 event = selectable.AutoCompleteSelectField(
337 lookup_class=lookups.EventLookup,
338 label='Event',
339 required=False,
340 help_text=AUTOCOMPLETE_HELP_TEXT,
341 widget=selectable.AutoComboboxSelectWidget,
342 )
343
344 class Meta:
345 model = Award
346 fields = '__all__'
347 widgets = {'person': HiddenInput}
348
349
350 class PersonTaskForm(forms.ModelForm):
351
352 event = selectable.AutoCompleteSelectField(
353 lookup_class=lookups.EventLookup,
354 label='Event',
355 required=True,
356 help_text=AUTOCOMPLETE_HELP_TEXT,
357 widget=selectable.AutoComboboxSelectWidget,
358 )
359
360 class Meta:
361 model = Task
362 fields = '__all__'
363 widgets = {'person': HiddenInput}
364
[end of workshops/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/workshops/forms.py b/workshops/forms.py
--- a/workshops/forms.py
+++ b/workshops/forms.py
@@ -206,6 +206,15 @@
return data
+ def clean_end(self):
+ """Ensure end >= start."""
+ start = self.cleaned_data['start']
+ end = self.cleaned_data['end']
+
+ if start and end and end < start:
+ raise forms.ValidationError('Must not be earlier than start date.')
+ return end
+
class Meta:
model = Event
# reorder fields, don't display 'deleted' field
| {"golden_diff": "diff --git a/workshops/forms.py b/workshops/forms.py\n--- a/workshops/forms.py\n+++ b/workshops/forms.py\n@@ -206,6 +206,15 @@\n \n return data\n \n+ def clean_end(self):\n+ \"\"\"Ensure end >= start.\"\"\"\n+ start = self.cleaned_data['start']\n+ end = self.cleaned_data['end']\n+\n+ if start and end and end < start:\n+ raise forms.ValidationError('Must not be earlier than start date.')\n+ return end\n+\n class Meta:\n model = Event\n # reorder fields, don't display 'deleted' field\n", "issue": "Event's start date must be earlier than it's end date\nWe should enforce that the start date is early that the end date.\n\n", "before_files": [{"content": "from django import forms\nfrom django.forms import HiddenInput, CheckboxSelectMultiple\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, HTML, Submit, Field\nfrom crispy_forms.bootstrap import FormActions\nfrom django_countries.fields import CountryField\nfrom selectable import forms as selectable\n\nfrom workshops.models import (\n Award, Event, Lesson, Person, Task, KnowledgeDomain,\n)\nfrom workshops import lookups\n\nINSTRUCTORS_NUM = 10 # how many instrutors to return from a search by default\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\nDATE_HELP_TEXT = \"Select date using widget, or enter in YYYY-MM-DD format.\"\n\n\nclass BootstrapHelper(FormHelper):\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperWithAdd(BootstrapHelper):\n def __init__(self, form=None):\n super().__init__(form)\n\n self.inputs[-1] = Submit('submit', 'Add')\n\n\nclass BootstrapHelperFilter(FormHelper):\n form_method = 'get'\n\n def __init__(self, form=None):\n super().__init__(form)\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('', 'Submit'))\n\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_with_add = BootstrapHelperWithAdd()\nbootstrap_helper_filter = BootstrapHelperFilter()\n\n\nclass InstructorsForm(forms.Form):\n '''Represent instructor matching form.'''\n\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n widget=selectable.AutoComboboxSelectWidget(\n lookup_class=lookups.AirportLookup,\n ),\n )\n\n country = CountryField().formfield(required=False)\n\n lessons = forms.ModelMultipleChoiceField(queryset=Lesson.objects.all(),\n widget=CheckboxSelectMultiple(),\n required=False)\n\n def __init__(self, *args, **kwargs):\n '''Build checkboxes for qualifications dynamically.'''\n super(InstructorsForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-inline'\n self.helper.form_method = 'get'\n self.helper.layout = Layout(\n Div(\n Div(\n 'latitude',\n 'longitude',\n css_class='panel-body'\n ),\n css_class='panel panel-default ',\n ),\n HTML('<p>OR</p>'),\n Div(\n Div(\n 'airport',\n css_class='panel-body'\n ),\n css_class='panel panel-default ',\n ),\n HTML('<p>OR</p>'),\n Div(\n Div(\n 'country',\n css_class='panel-body'\n ),\n css_class='panel panel-default ',\n ),\n 'lessons',\n FormActions(\n Submit('submit', 'Submit'),\n ),\n )\n\n def clean(self):\n cleaned_data = super(InstructorsForm, self).clean()\n airport = cleaned_data.get('airport')\n lat = cleaned_data.get('latitude')\n long = cleaned_data.get('longitude')\n country = cleaned_data.get('country')\n\n sum = bool(airport) + bool(lat and long) + bool(country)\n # user can specify only one: either airport, or lat&long, or country\n if sum != 1:\n raise forms.ValidationError('Must specify an airport, or latitude'\n ' and longitude, or a country.')\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_sites = forms.BooleanField(label='in sites',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n in_airports = forms.BooleanField(label='in airports',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n\n site = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Site',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n organizer = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Organizer',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['start'].help_text = DATE_HELP_TEXT\n self.fields['end'].help_text = DATE_HELP_TEXT\n\n def clean_slug(self):\n # Ensure slug is not an integer value for Event.get_by_ident\n data = self.cleaned_data['slug']\n\n try:\n int(data)\n except ValueError:\n pass\n else:\n raise forms.ValidationError(\"Slug must not be an integer-value.\")\n\n return data\n\n class Meta:\n model = Event\n # reorder fields, don't display 'deleted' field\n fields = ('slug', 'start', 'end', 'site', 'organizer',\n 'tags', 'url', 'reg_key', 'admin_fee', 'invoiced',\n 'attendance', 'notes')\n\n class Media:\n # thanks to this, {{ form.media }} in the template will generate\n # a <link href=\"\"> (for CSS files) or <script src=\"\"> (for JS files)\n js = ('calendar_popup.js', )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'event': HiddenInput}\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n lessons = forms.ModelMultipleChoiceField(required=False,\n queryset=Lesson.objects.all())\n\n domains = forms.ModelMultipleChoiceField(\n required=False, queryset=KnowledgeDomain.objects.all()\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions',\n # 'groups' or 'is_superuser' fields\n # + reorder fields\n fields = ['username', 'personal', 'middle', 'family', 'may_contact',\n 'email', 'gender', 'airport', 'affiliation', 'github',\n 'twitter', 'url', 'notes', 'lessons', 'domains']\n\n\nclass PersonPermissionsForm(forms.ModelForm):\n class Meta:\n model = Person\n # only display 'user_permissions', 'groups' and `is_superuser` fields\n fields = [\n 'is_superuser',\n 'user_permissions',\n 'groups',\n ]\n\n\nclass PersonMergeForm(forms.Form):\n\n person_from = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person From',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n person_to = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person To',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n\nclass PersonAwardForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'person': HiddenInput}\n\n\nclass PersonTaskForm(forms.ModelForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n fields = '__all__'\n widgets = {'person': HiddenInput}\n", "path": "workshops/forms.py"}]} | 3,816 | 137 |
gh_patches_debug_7041 | rasdani/github-patches | git_diff | mozilla__bugbug-2806 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change needsdiagnosis model to consider moved milestone as `needsdiagnosis = True`
We have observed that needsdiagnosis model classifies certain issues that potentially need diagnosis as `needsdiagnosis = False`. While this is expected, I think it might be getting worse, as the issues data is unbalanced and has much more data points for `needsdiagnosis = False`. We've started a discussion in https://github.com/mozilla/webcompat-team-okrs/issues/256
It's worth mentioning that in the [recent sample of 22 issues](https://docs.google.com/spreadsheets/d/1F9vcSpLQ_hNBeZinsytGXlfXpJLW6vh7C0BJYtd9hIY/edit?pli=1#gid=1640243023) most of the issues that looked like false negatives didn't end up needing diagnosis (not reproducible, out of the scope of the project, etc.), so they're true negatives (prediction was correct for 21 of them). We'll continue tracking them to get a more representative sample.
As an example, issues that looked like they need diagnosis, but in the end, they didn't (prediction was correct):
https://github.com/webcompat/web-bugs/issues/100746
https://github.com/webcompat/web-bugs/issues/100676
https://github.com/webcompat/web-bugs/issues/100687
Issues that are false negatives (prediction was incorrect):
https://github.com/webcompat/web-bugs/issues/100495
https://github.com/webcompat/web-bugs/issues/100645
I was thinking of including an additional set of issues that will contribute to `needsdiagnosis = True` pool. We have recently added a `moved` [milestone](https://github.com/webcompat/web-bugs/issues?q=is%3Aissue+milestone%3Amoved+is%3Aclosed). These issues often don't need diagnosis and are moved to bugzilla or elsewhere, but their content should be contributed to `needsdiagnosis = True` rather than false.
</issue>
<code>
[start of bugbug/models/needsdiagnosis.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7
8 import xgboost
9 from sklearn.compose import ColumnTransformer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import feature_cleanup, issue_features, utils
13 from bugbug.model import IssueModel
14
15 logger = logging.getLogger(__name__)
16
17
18 class NeedsDiagnosisModel(IssueModel):
19 def __init__(self, lemmatization=False):
20 IssueModel.__init__(
21 self, owner="webcompat", repo="web-bugs", lemmatization=lemmatization
22 )
23
24 self.calculate_importance = False
25
26 feature_extractors = []
27
28 cleanup_functions = [
29 feature_cleanup.fileref(),
30 feature_cleanup.url(),
31 feature_cleanup.synonyms(),
32 ]
33
34 self.extraction_pipeline = Pipeline(
35 [
36 (
37 "issue_extractor",
38 issue_features.IssueExtractor(
39 feature_extractors, cleanup_functions, rollback=True
40 ),
41 ),
42 (
43 "union",
44 ColumnTransformer(
45 [
46 ("title", self.text_vectorizer(min_df=0.0001), "title"),
47 (
48 "first_comment",
49 self.text_vectorizer(min_df=0.0001),
50 "first_comment",
51 ),
52 ]
53 ),
54 ),
55 ]
56 )
57
58 self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())
59 self.clf.set_params(predictor="cpu_predictor")
60
61 def get_labels(self):
62 classes = {}
63
64 for issue in self.github.get_issues():
65 # Skip issues with empty title or body
66 if issue["title"] is None or issue["body"] is None:
67 continue
68
69 # Skip issues that are not moderated yet as they don't have a meaningful title or body
70 if issue["title"] == "In the moderation queue.":
71 continue
72
73 for event in issue["events"]:
74 if (
75 event["event"] == "milestoned"
76 and event["milestone"]["title"] == "needsdiagnosis"
77 ):
78 classes[issue["number"]] = 0
79
80 if issue["number"] not in classes:
81 classes[issue["number"]] = 1
82
83 logger.info(
84 f"{sum(1 for label in classes.values() if label == 1)} issues have not been moved to needsdiagnosis"
85 )
86 logger.info(
87 f"{sum(1 for label in classes.values() if label == 0)} issues have been moved to needsdiagnosis"
88 )
89
90 return classes, [0, 1]
91
92 def get_feature_names(self):
93 return self.extraction_pipeline.named_steps["union"].get_feature_names()
94
[end of bugbug/models/needsdiagnosis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/needsdiagnosis.py b/bugbug/models/needsdiagnosis.py
--- a/bugbug/models/needsdiagnosis.py
+++ b/bugbug/models/needsdiagnosis.py
@@ -71,9 +71,9 @@
continue
for event in issue["events"]:
- if (
- event["event"] == "milestoned"
- and event["milestone"]["title"] == "needsdiagnosis"
+ if event["event"] == "milestoned" and (
+ event["milestone"]["title"] == "needsdiagnosis"
+ or event["milestone"]["title"] == "moved"
):
classes[issue["number"]] = 0
| {"golden_diff": "diff --git a/bugbug/models/needsdiagnosis.py b/bugbug/models/needsdiagnosis.py\n--- a/bugbug/models/needsdiagnosis.py\n+++ b/bugbug/models/needsdiagnosis.py\n@@ -71,9 +71,9 @@\n continue\n \n for event in issue[\"events\"]:\n- if (\n- event[\"event\"] == \"milestoned\"\n- and event[\"milestone\"][\"title\"] == \"needsdiagnosis\"\n+ if event[\"event\"] == \"milestoned\" and (\n+ event[\"milestone\"][\"title\"] == \"needsdiagnosis\"\n+ or event[\"milestone\"][\"title\"] == \"moved\"\n ):\n classes[issue[\"number\"]] = 0\n", "issue": "Change needsdiagnosis model to consider moved milestone as `needsdiagnosis = True`\nWe have observed that needsdiagnosis model classifies certain issues that potentially need diagnosis as `needsdiagnosis = False`. While this is expected, I think it might be getting worse, as the issues data is unbalanced and has much more data points for `needsdiagnosis = False`. We've started a discussion in https://github.com/mozilla/webcompat-team-okrs/issues/256 \r\n\r\nIt's worth mentioning that in the [recent sample of 22 issues](https://docs.google.com/spreadsheets/d/1F9vcSpLQ_hNBeZinsytGXlfXpJLW6vh7C0BJYtd9hIY/edit?pli=1#gid=1640243023) most of the issues that looked like false negatives didn't end up needing diagnosis (not reproducible, out of the scope of the project, etc.), so they're true negatives (prediction was correct for 21 of them). We'll continue tracking them to get a more representative sample.\r\n\r\nAs an example, issues that looked like they need diagnosis, but in the end, they didn't (prediction was correct):\r\nhttps://github.com/webcompat/web-bugs/issues/100746\r\nhttps://github.com/webcompat/web-bugs/issues/100676\r\nhttps://github.com/webcompat/web-bugs/issues/100687\r\n\r\nIssues that are false negatives (prediction was incorrect): \r\nhttps://github.com/webcompat/web-bugs/issues/100495\r\nhttps://github.com/webcompat/web-bugs/issues/100645\r\n\r\nI was thinking of including an additional set of issues that will contribute to `needsdiagnosis = True` pool. We have recently added a `moved` [milestone](https://github.com/webcompat/web-bugs/issues?q=is%3Aissue+milestone%3Amoved+is%3Aclosed). These issues often don't need diagnosis and are moved to bugzilla or elsewhere, but their content should be contributed to `needsdiagnosis = True` rather than false. \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\n\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import feature_cleanup, issue_features, utils\nfrom bugbug.model import IssueModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass NeedsDiagnosisModel(IssueModel):\n def __init__(self, lemmatization=False):\n IssueModel.__init__(\n self, owner=\"webcompat\", repo=\"web-bugs\", lemmatization=lemmatization\n )\n\n self.calculate_importance = False\n\n feature_extractors = []\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"issue_extractor\",\n issue_features.IssueExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.0001),\n \"first_comment\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for issue in self.github.get_issues():\n # Skip issues with empty title or body\n if issue[\"title\"] is None or issue[\"body\"] is None:\n continue\n\n # Skip issues that are not moderated yet as they don't have a meaningful title or body\n if issue[\"title\"] == \"In the moderation queue.\":\n continue\n\n for event in issue[\"events\"]:\n if (\n event[\"event\"] == \"milestoned\"\n and event[\"milestone\"][\"title\"] == \"needsdiagnosis\"\n ):\n classes[issue[\"number\"]] = 0\n\n if issue[\"number\"] not in classes:\n classes[issue[\"number\"]] = 1\n\n logger.info(\n f\"{sum(1 for label in classes.values() if label == 1)} issues have not been moved to needsdiagnosis\"\n )\n logger.info(\n f\"{sum(1 for label in classes.values() if label == 0)} issues have been moved to needsdiagnosis\"\n )\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/needsdiagnosis.py"}]} | 1,800 | 160 |
gh_patches_debug_6599 | rasdani/github-patches | git_diff | svthalia__concrexit-2585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'Event' object has no attribute 'number_regs'
Sentry Issue: [CONCREXIT-HC](https://sentry.io/organizations/thalia/issues/3639420824/?referrer=github_integration)
```
AttributeError: 'Event' object has no attribute 'number_regs'
(11 additional frame(s) were not displayed)
...
File "rest_framework/serializers.py", line 253, in data
self._data = self.to_representation(self.instance)
File "rest_framework/serializers.py", line 522, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "rest_framework/serializers.py", line 522, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "rest_framework/fields.py", line 1838, in to_representation
return method(value)
File "events/api/v2/serializers/event.py", line 86, in _num_participants
participant_count = instance.number_regs
```
</issue>
<code>
[start of website/events/api/v2/serializers/event.py]
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
4 from announcements.api.v2.serializers import SlideSerializer
5 from documents.api.v2.serializers.document import DocumentSerializer
6 from events import services
7 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
8 from events.models import Event, EventRegistration
9 from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer
10 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer
11 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
12 CleanedModelSerializer,
13 )
14 from utils.snippets import create_google_maps_url
15
16
17 class EventSerializer(CleanedModelSerializer):
18 """Serializer for events."""
19
20 class Meta:
21 model = Event
22 fields = (
23 "pk",
24 "title",
25 "description",
26 "caption",
27 "start",
28 "end",
29 "category",
30 "registration_start",
31 "registration_end",
32 "cancel_deadline",
33 "optional_registrations",
34 "location",
35 "price",
36 "fine",
37 "num_participants",
38 "max_participants",
39 "no_registration_message",
40 "cancel_too_late_message",
41 "has_fields",
42 "food_event",
43 "maps_url",
44 "user_permissions",
45 "user_registration",
46 "organisers",
47 "slide",
48 "documents",
49 )
50
51 description = CleanedHTMLSerializer()
52 organisers = MemberGroupSerializer(many=True)
53 user_registration = serializers.SerializerMethodField("_user_registration")
54 num_participants = serializers.SerializerMethodField("_num_participants")
55 maps_url = serializers.SerializerMethodField("_maps_url")
56 price = PaymentAmountSerializer()
57 fine = PaymentAmountSerializer()
58 slide = SlideSerializer()
59 documents = DocumentSerializer(many=True)
60 user_permissions = serializers.SerializerMethodField("_user_permissions")
61
62 def _user_registration(self, instance):
63 try:
64 if self.context["request"].member:
65 reg = instance.eventregistration_set.get(
66 member=self.context["request"].member
67 )
68 return EventRegistrationSerializer(
69 reg,
70 context=self.context,
71 fields=(
72 "pk",
73 "present",
74 "queue_position",
75 "is_cancelled",
76 "is_late_cancellation",
77 "date",
78 "payment",
79 ),
80 ).data
81 except EventRegistration.DoesNotExist:
82 pass
83 return None
84
85 def _num_participants(self, instance):
86 participant_count = instance.number_regs
87 if instance.max_participants and participant_count > instance.max_participants:
88 return instance.max_participants
89 return participant_count
90
91 def _user_permissions(self, instance):
92 member = self.context["request"].member
93 return services.event_permissions(member, instance)
94
95 def _maps_url(self, instance):
96 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
97
[end of website/events/api/v2/serializers/event.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py
--- a/website/events/api/v2/serializers/event.py
+++ b/website/events/api/v2/serializers/event.py
@@ -83,10 +83,7 @@
return None
def _num_participants(self, instance):
- participant_count = instance.number_regs
- if instance.max_participants and participant_count > instance.max_participants:
- return instance.max_participants
- return participant_count
+ return instance.participants.count()
def _user_permissions(self, instance):
member = self.context["request"].member
| {"golden_diff": "diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -83,10 +83,7 @@\n return None\n \n def _num_participants(self, instance):\n- participant_count = instance.number_regs\n- if instance.max_participants and participant_count > instance.max_participants:\n- return instance.max_participants\n- return participant_count\n+ return instance.participants.count()\n \n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n", "issue": "AttributeError: 'Event' object has no attribute 'number_regs'\nSentry Issue: [CONCREXIT-HC](https://sentry.io/organizations/thalia/issues/3639420824/?referrer=github_integration)\n\n```\nAttributeError: 'Event' object has no attribute 'number_regs'\n(11 additional frame(s) were not displayed)\n...\n File \"rest_framework/serializers.py\", line 253, in data\n self._data = self.to_representation(self.instance)\n File \"rest_framework/serializers.py\", line 522, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/serializers.py\", line 522, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/fields.py\", line 1838, in to_representation\n return method(value)\n File \"events/api/v2/serializers/event.py\", line 86, in _num_participants\n participant_count = instance.number_regs\n```\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom announcements.api.v2.serializers import SlideSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event, EventRegistration\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(CleanedModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organisers\",\n \"slide\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organisers = MemberGroupSerializer(many=True)\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n price = PaymentAmountSerializer()\n fine = PaymentAmountSerializer()\n slide = SlideSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n member=self.context[\"request\"].member\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n fields=(\n \"pk\",\n \"present\",\n \"queue_position\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"date\",\n \"payment\",\n ),\n ).data\n except EventRegistration.DoesNotExist:\n pass\n return None\n\n def _num_participants(self, instance):\n participant_count = instance.number_regs\n if instance.max_participants and participant_count > instance.max_participants:\n return instance.max_participants\n return participant_count\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}]} | 1,582 | 150 |
gh_patches_debug_2279 | rasdani/github-patches | git_diff | geopandas__geopandas-648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Descartes dependency
In the docs, geopandas lists descartes and matplotlib as optional dependencies. However, descartes is listed as an install_requires in the setup.py.
One of the two should be updated. I'd prefer to be able to pip install geopandas without installing matplotlib.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env/python
2 """Installation script
3
4 """
5
6 import os
7
8 try:
9 from setuptools import setup
10 except ImportError:
11 from distutils.core import setup
12
13 import versioneer
14
15 LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to
16 `pandas`_ objects.
17
18 The goal of GeoPandas is to make working with geospatial data in
19 python easier. It combines the capabilities of `pandas`_ and `shapely`_,
20 providing geospatial operations in pandas and a high-level interface
21 to multiple geometries to shapely. GeoPandas enables you to easily do
22 operations in python that would otherwise require a spatial database
23 such as PostGIS.
24
25 .. _pandas: http://pandas.pydata.org
26 .. _shapely: http://toblerity.github.io/shapely
27 """
28
29 if os.environ.get('READTHEDOCS', False) == 'True':
30 INSTALL_REQUIRES = []
31 else:
32 INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']
33
34 # get all data dirs in the datasets module
35 data_files = []
36
37 for item in os.listdir("geopandas/datasets"):
38 if not item.startswith('__'):
39 if os.path.isdir(os.path.join("geopandas/datasets/", item)):
40 data_files.append(os.path.join("datasets", item, '*'))
41 elif item.endswith('.zip'):
42 data_files.append(os.path.join("datasets", item))
43
44
45 setup(name='geopandas',
46 version=versioneer.get_version(),
47 description='Geographic pandas extensions',
48 license='BSD',
49 author='GeoPandas contributors',
50 author_email='[email protected]',
51 url='http://geopandas.org',
52 long_description=LONG_DESCRIPTION,
53 packages=['geopandas', 'geopandas.io', 'geopandas.tools',
54 'geopandas.datasets',
55 'geopandas.tests', 'geopandas.tools.tests'],
56 package_data={'geopandas': data_files},
57 install_requires=INSTALL_REQUIRES,
58 cmdclass=versioneer.get_cmdclass())
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@
if os.environ.get('READTHEDOCS', False) == 'True':
INSTALL_REQUIRES = []
else:
- INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']
+ INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'pyproj']
# get all data dirs in the datasets module
data_files = []
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,7 @@\n if os.environ.get('READTHEDOCS', False) == 'True':\n INSTALL_REQUIRES = []\n else:\n- INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']\n+ INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'pyproj']\n \n # get all data dirs in the datasets module\n data_files = []\n", "issue": "Descartes dependency\nIn the docs, geopandas lists descartes and matplotlib as optional dependencies. However, descartes is listed as an install_requires in the setup.py.\r\n\r\nOne of the two should be updated. I'd prefer to be able to pip install geopandas without installing matplotlib.\n", "before_files": [{"content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://toblerity.github.io/shapely\n\"\"\"\n\nif os.environ.get('READTHEDOCS', False) == 'True':\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith('__'):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, '*'))\n elif item.endswith('.zip'):\n data_files.append(os.path.join(\"datasets\", item))\n\n\nsetup(name='geopandas',\n version=versioneer.get_version(),\n description='Geographic pandas extensions',\n license='BSD',\n author='GeoPandas contributors',\n author_email='[email protected]',\n url='http://geopandas.org',\n long_description=LONG_DESCRIPTION,\n packages=['geopandas', 'geopandas.io', 'geopandas.tools',\n 'geopandas.datasets',\n 'geopandas.tests', 'geopandas.tools.tests'],\n package_data={'geopandas': data_files},\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass())\n", "path": "setup.py"}]} | 1,161 | 123 |
gh_patches_debug_35062 | rasdani/github-patches | git_diff | TabbycatDebate__tabbycat-1406 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reporting of total speaks in emails uses integers rather than floats
This was a bug report to the FB page, just reposting here for better tracking and if anyone else wants to look at it. Haven't tried to reproduce/look at the code but the report seems clear
In the (default?) email template that is sent to adjs the line *Proposition: Team A (Loss - X total speaks)* reports *X total speaks* as an integer which can lead to incorrect totals in formats with half points.
</issue>
<code>
[start of tabbycat/notifications/utils.py]
1 """Email generator functions
2
3 These functions assemble the necessary arguments to be parsed in email templates
4 to be sent to relevant parties. All these functions return a tuple with the first
5 element being a context dictionary with the available variables to be parsed in
6 the message. The second element is the Person object. All these functions are
7 called by NotificationQueueConsumer, which inserts the variables into a message,
8 using the participant object to fetch their email address and to record.
9
10 Objects should be fetched from the database here as it is an asyncronous process,
11 thus the object itself cannot be passed.
12 """
13
14 from django.utils.safestring import mark_safe
15 from django.utils.translation import gettext as _
16
17 from adjallocation.allocation import AdjudicatorAllocation
18 from draw.models import Debate
19 from results.result import BaseConsensusDebateResultWithSpeakers, DebateResult, VotingDebateResult
20 from results.utils import side_and_position_names
21 from options.utils import use_team_code_names
22 from participants.models import Person
23 from participants.prefetch import populate_win_counts
24 from tournaments.models import Round, Tournament
25
26
27 adj_position_names = {
28 AdjudicatorAllocation.POSITION_CHAIR: _("the chair"),
29 AdjudicatorAllocation.POSITION_ONLY: _("the only"),
30 AdjudicatorAllocation.POSITION_PANELLIST: _("a panellist"),
31 AdjudicatorAllocation.POSITION_TRAINEE: _("a trainee"),
32 }
33
34
35 def _assemble_panel(adjs):
36 adj_string = []
37 for adj, pos in adjs:
38 adj_string.append("%s (%s)" % (adj.name, adj_position_names[pos]))
39
40 return ", ".join(adj_string)
41
42
43 def adjudicator_assignment_email_generator(to, url, round_id):
44 emails = []
45 round = Round.objects.get(id=round_id)
46 tournament = round.tournament
47 draw = round.debate_set_with_prefetches(speakers=False).all()
48 use_codes = use_team_code_names(tournament, False)
49
50 for debate in draw:
51 matchup = debate.matchup_codes if use_codes else debate.matchup
52 context = {
53 'ROUND': round.name,
54 'VENUE': debate.venue.display_name if debate.venue is not None else _("TBA"),
55 'PANEL': _assemble_panel(debate.adjudicators.with_positions()),
56 'DRAW': matchup
57 }
58
59 for adj, pos in debate.adjudicators.with_positions():
60 try:
61 to.remove(adj.id)
62 except ValueError:
63 continue
64
65 context_user = context.copy()
66 context_user['USER'] = adj.name
67 context_user['POSITION'] = adj_position_names[pos]
68
69 if adj.url_key:
70 context_user['URL'] = url + adj.url_key + '/'
71
72 emails.append((context_user, adj))
73
74 return emails
75
76
77 def randomized_url_email_generator(to, url, tournament_id):
78 emails = []
79 tournament = Tournament.objects.get(id=tournament_id)
80
81 for instance in tournament.participants:
82 try:
83 to.remove(instance.id)
84 except ValueError:
85 continue
86 url_ind = url + instance.url_key + '/'
87
88 variables = {'USER': instance.name, 'URL': url_ind, 'KEY': instance.url_key, 'TOURN': str(tournament)}
89
90 emails.append((variables, instance))
91
92 return emails
93
94
95 def ballots_email_generator(to, debate_id):
96 emails = []
97 debate = Debate.objects.get(id=debate_id)
98 tournament = debate.round.tournament
99 results = DebateResult(debate.confirmed_ballot)
100 round_name = _("%(tournament)s %(round)s @ %(room)s") % {'tournament': str(tournament),
101 'round': debate.round.name, 'room': debate.venue.name}
102
103 use_codes = use_team_code_names(debate.round.tournament, False)
104
105 def _create_ballot(result, scoresheet):
106 ballot = "<ul>"
107
108 for side, (side_name, pos_names) in zip(tournament.sides, side_and_position_names(tournament)):
109 side_string = ""
110 if tournament.pref('teams_in_debate') == 'bp':
111 side_string += _("<li>%(side)s: %(team)s (%(points)d points with %(speaks)d total speaks)")
112 points = 4 - scoresheet.rank(side)
113 else:
114 side_string += _("<li>%(side)s: %(team)s (%(points)s - %(speaks)d total speaks)")
115 points = _("Win") if side == scoresheet.winner() else _("Loss")
116
117 ballot += side_string % {
118 'side': side_name,
119 'team': result.debateteams[side].team.code_name if use_codes else result.debateteams[side].team.short_name,
120 'speaks': scoresheet.get_total(side),
121 'points': points
122 }
123
124 ballot += "<ul>"
125
126 for pos, pos_name in zip(tournament.positions, pos_names):
127 ballot += _("<li>%(pos)s: %(speaker)s (%(score)s)</li>") % {
128 'pos': pos_name,
129 'speaker': result.get_speaker(side, pos).name,
130 'score': scoresheet.get_score(side, pos)
131 }
132
133 ballot += "</ul></li>"
134
135 ballot += "</ul>"
136
137 return mark_safe(ballot)
138
139 if isinstance(results, VotingDebateResult):
140 for (adj, ballot) in results.scoresheets.items():
141 if adj.email is None:
142 continue
143
144 context = {'DEBATE': round_name, 'USER': adj.name, 'SCORES': _create_ballot(results, ballot)}
145 emails.append((context, adj))
146 elif isinstance(results, BaseConsensusDebateResultWithSpeakers):
147 context = {'DEBATE': round_name, 'SCORES': _create_ballot(results, results.scoresheet)}
148
149 for adj in debate.debateadjudicator_set.all():
150 if adj.adjudicator.email is None:
151 continue
152
153 context_user = context.copy()
154 context_user['USER'] = adj.adjudicator.name
155
156 emails.append((context_user, adj.adjudicator))
157
158 return emails
159
160
161 def standings_email_generator(to, url, round_id):
162 emails = []
163 round = Round.objects.get(id=round_id)
164 tournament = round.tournament
165
166 teams = round.active_teams.prefetch_related('speaker_set')
167 populate_win_counts(teams)
168
169 context = {
170 'TOURN': str(tournament),
171 'ROUND': round.name,
172 'URL': url if tournament.pref('public_team_standings') else ""
173 }
174
175 for team in teams:
176 context_team = context.copy()
177 context_team['POINTS'] = str(team.points_count)
178 context_team['TEAM'] = team.short_name
179
180 for speaker in team.speaker_set.all():
181 try:
182 to.remove(speaker.id)
183 except ValueError:
184 continue
185
186 context_user = context_team.copy()
187 context_user['USER'] = speaker.name
188
189 emails.append((context_user, speaker))
190
191 return emails
192
193
194 def motion_release_email_generator(to, round_id):
195 emails = []
196 round = Round.objects.get(id=round_id)
197
198 def _create_motion_list():
199 motion_list = "<ul>"
200 for motion in round.motion_set.all():
201 motion_list += _("<li>%(text)s (%(ref)s)</li>") % {'text': motion.text, 'ref': motion.reference}
202
203 if motion.info_slide:
204 motion_list += " %s\n" % (motion.info_slide)
205
206 motion_list += "</ul>"
207
208 return mark_safe(motion_list)
209
210 context = {
211 'TOURN': str(round.tournament),
212 'ROUND': round.name,
213 'MOTIONS': _create_motion_list()
214 }
215
216 people = Person.objects.filter(id__in=to)
217 for person in people:
218 context_user = context.copy()
219 context_user['USER'] = person.name
220
221 emails.append((context_user, person))
222
223 return emails
224
225
226 def team_speaker_email_generator(to, tournament_id):
227 emails = []
228 tournament = Tournament.objects.get(id=tournament_id)
229
230 for team in tournament.team_set.all().prefetch_related('speaker_set', 'break_categories').select_related('institution'):
231 context = {
232 'TOURN': str(tournament),
233 'SHORT': team.short_name,
234 'LONG': team.long_name,
235 'CODE': team.code_name,
236 'BREAK': _(", ").join([breakq.name for breakq in team.break_categories.all()]),
237 'SPEAKERS': _(", ").join([p.name for p in team.speaker_set.all()]),
238 'INSTITUTION': str(team.institution),
239 'EMOJI': team.emoji
240 }
241
242 for speaker in team.speakers:
243 try:
244 to.remove(speaker.id)
245 except ValueError:
246 continue
247
248 context_user = context.copy()
249 context_user['USER'] = speaker.name
250
251 emails.append((context_user, speaker))
252
253 return emails
254
255
256 def team_draw_email_generator(to, url, round_id):
257 emails = []
258 round = Round.objects.get(id=round_id)
259 tournament = round.tournament
260 draw = round.debate_set_with_prefetches(speakers=True).all()
261 use_codes = use_team_code_names(tournament, False)
262
263 for debate in draw:
264 matchup = debate.matchup_codes if use_codes else debate.matchup
265 context = {
266 'ROUND': round.name,
267 'VENUE': debate.venue.name,
268 'PANEL': _assemble_panel(debate.adjudicators.with_positions()),
269 'DRAW': matchup
270 }
271
272 for dt in debate.debateteam_set.all():
273 context_team = context.copy()
274 context_team['TEAM'] = dt.team.code_name if use_codes else dt.team.short_name
275 context_team['SIDE'] = dt.get_side_name(tournament=tournament)
276
277 for speaker in dt.team.speakers:
278 try:
279 to.remove(speaker.id)
280 except ValueError:
281 continue
282
283 context_user = context_team.copy()
284 context_user['USER'] = speaker.name
285
286 emails.append((context_user, speaker))
287
288 return emails
289
[end of tabbycat/notifications/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tabbycat/notifications/utils.py b/tabbycat/notifications/utils.py
--- a/tabbycat/notifications/utils.py
+++ b/tabbycat/notifications/utils.py
@@ -11,6 +11,7 @@
thus the object itself cannot be passed.
"""
+from django.utils import formats
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
@@ -108,16 +109,16 @@
for side, (side_name, pos_names) in zip(tournament.sides, side_and_position_names(tournament)):
side_string = ""
if tournament.pref('teams_in_debate') == 'bp':
- side_string += _("<li>%(side)s: %(team)s (%(points)d points with %(speaks)d total speaks)")
+ side_string += _("<li>%(side)s: %(team)s (%(points)d points with %(speaks)s total speaks)")
points = 4 - scoresheet.rank(side)
else:
- side_string += _("<li>%(side)s: %(team)s (%(points)s - %(speaks)d total speaks)")
+ side_string += _("<li>%(side)s: %(team)s (%(points)s - %(speaks)s total speaks)")
points = _("Win") if side == scoresheet.winner() else _("Loss")
ballot += side_string % {
'side': side_name,
'team': result.debateteams[side].team.code_name if use_codes else result.debateteams[side].team.short_name,
- 'speaks': scoresheet.get_total(side),
+ 'speaks': formats.localize(scoresheet.get_total(side)),
'points': points
}
@@ -127,7 +128,7 @@
ballot += _("<li>%(pos)s: %(speaker)s (%(score)s)</li>") % {
'pos': pos_name,
'speaker': result.get_speaker(side, pos).name,
- 'score': scoresheet.get_score(side, pos)
+ 'score': formats.localize(scoresheet.get_score(side, pos))
}
ballot += "</ul></li>"
| {"golden_diff": "diff --git a/tabbycat/notifications/utils.py b/tabbycat/notifications/utils.py\n--- a/tabbycat/notifications/utils.py\n+++ b/tabbycat/notifications/utils.py\n@@ -11,6 +11,7 @@\n thus the object itself cannot be passed.\n \"\"\"\n \n+from django.utils import formats\n from django.utils.safestring import mark_safe\n from django.utils.translation import gettext as _\n \n@@ -108,16 +109,16 @@\n for side, (side_name, pos_names) in zip(tournament.sides, side_and_position_names(tournament)):\n side_string = \"\"\n if tournament.pref('teams_in_debate') == 'bp':\n- side_string += _(\"<li>%(side)s: %(team)s (%(points)d points with %(speaks)d total speaks)\")\n+ side_string += _(\"<li>%(side)s: %(team)s (%(points)d points with %(speaks)s total speaks)\")\n points = 4 - scoresheet.rank(side)\n else:\n- side_string += _(\"<li>%(side)s: %(team)s (%(points)s - %(speaks)d total speaks)\")\n+ side_string += _(\"<li>%(side)s: %(team)s (%(points)s - %(speaks)s total speaks)\")\n points = _(\"Win\") if side == scoresheet.winner() else _(\"Loss\")\n \n ballot += side_string % {\n 'side': side_name,\n 'team': result.debateteams[side].team.code_name if use_codes else result.debateteams[side].team.short_name,\n- 'speaks': scoresheet.get_total(side),\n+ 'speaks': formats.localize(scoresheet.get_total(side)),\n 'points': points\n }\n \n@@ -127,7 +128,7 @@\n ballot += _(\"<li>%(pos)s: %(speaker)s (%(score)s)</li>\") % {\n 'pos': pos_name,\n 'speaker': result.get_speaker(side, pos).name,\n- 'score': scoresheet.get_score(side, pos)\n+ 'score': formats.localize(scoresheet.get_score(side, pos))\n }\n \n ballot += \"</ul></li>\"\n", "issue": "Reporting of total speaks in emails uses integers rather than floats\nThis was a bug report to the FB page, just reposting here for better tracking and if anyone else wants to look at it. Haven't tried to reproduce/look at the code but the report seems clear\r\n\r\nIn the (default?) email template that is sent to adjs the line *Proposition: Team A (Loss - X total speaks)* reports *X total speaks* as an integer which can lead to incorrect totals in formats with half points. \n", "before_files": [{"content": "\"\"\"Email generator functions\n\nThese functions assemble the necessary arguments to be parsed in email templates\nto be sent to relevant parties. All these functions return a tuple with the first\nelement being a context dictionary with the available variables to be parsed in\nthe message. The second element is the Person object. All these functions are\ncalled by NotificationQueueConsumer, which inserts the variables into a message,\nusing the participant object to fetch their email address and to record.\n\nObjects should be fetched from the database here as it is an asyncronous process,\nthus the object itself cannot be passed.\n\"\"\"\n\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _\n\nfrom adjallocation.allocation import AdjudicatorAllocation\nfrom draw.models import Debate\nfrom results.result import BaseConsensusDebateResultWithSpeakers, DebateResult, VotingDebateResult\nfrom results.utils import side_and_position_names\nfrom options.utils import use_team_code_names\nfrom participants.models import Person\nfrom participants.prefetch import populate_win_counts\nfrom tournaments.models import Round, Tournament\n\n\nadj_position_names = {\n AdjudicatorAllocation.POSITION_CHAIR: _(\"the chair\"),\n AdjudicatorAllocation.POSITION_ONLY: _(\"the only\"),\n AdjudicatorAllocation.POSITION_PANELLIST: _(\"a panellist\"),\n AdjudicatorAllocation.POSITION_TRAINEE: _(\"a trainee\"),\n}\n\n\ndef _assemble_panel(adjs):\n adj_string = []\n for adj, pos in adjs:\n adj_string.append(\"%s (%s)\" % (adj.name, adj_position_names[pos]))\n\n return \", \".join(adj_string)\n\n\ndef adjudicator_assignment_email_generator(to, url, round_id):\n emails = []\n round = Round.objects.get(id=round_id)\n tournament = round.tournament\n draw = round.debate_set_with_prefetches(speakers=False).all()\n use_codes = use_team_code_names(tournament, False)\n\n for debate in draw:\n matchup = debate.matchup_codes if use_codes else debate.matchup\n context = {\n 'ROUND': round.name,\n 'VENUE': debate.venue.display_name if debate.venue is not None else _(\"TBA\"),\n 'PANEL': _assemble_panel(debate.adjudicators.with_positions()),\n 'DRAW': matchup\n }\n\n for adj, pos in debate.adjudicators.with_positions():\n try:\n to.remove(adj.id)\n except ValueError:\n continue\n\n context_user = context.copy()\n context_user['USER'] = adj.name\n context_user['POSITION'] = adj_position_names[pos]\n\n if adj.url_key:\n context_user['URL'] = url + adj.url_key + '/'\n\n emails.append((context_user, adj))\n\n return emails\n\n\ndef randomized_url_email_generator(to, url, tournament_id):\n emails = []\n tournament = Tournament.objects.get(id=tournament_id)\n\n for instance in tournament.participants:\n try:\n to.remove(instance.id)\n except ValueError:\n continue\n url_ind = url + instance.url_key + '/'\n\n variables = {'USER': instance.name, 'URL': url_ind, 'KEY': instance.url_key, 'TOURN': str(tournament)}\n\n emails.append((variables, instance))\n\n return emails\n\n\ndef ballots_email_generator(to, debate_id):\n emails = []\n debate = Debate.objects.get(id=debate_id)\n tournament = debate.round.tournament\n results = DebateResult(debate.confirmed_ballot)\n round_name = _(\"%(tournament)s %(round)s @ %(room)s\") % {'tournament': str(tournament),\n 'round': debate.round.name, 'room': debate.venue.name}\n\n use_codes = use_team_code_names(debate.round.tournament, False)\n\n def _create_ballot(result, scoresheet):\n ballot = \"<ul>\"\n\n for side, (side_name, pos_names) in zip(tournament.sides, side_and_position_names(tournament)):\n side_string = \"\"\n if tournament.pref('teams_in_debate') == 'bp':\n side_string += _(\"<li>%(side)s: %(team)s (%(points)d points with %(speaks)d total speaks)\")\n points = 4 - scoresheet.rank(side)\n else:\n side_string += _(\"<li>%(side)s: %(team)s (%(points)s - %(speaks)d total speaks)\")\n points = _(\"Win\") if side == scoresheet.winner() else _(\"Loss\")\n\n ballot += side_string % {\n 'side': side_name,\n 'team': result.debateteams[side].team.code_name if use_codes else result.debateteams[side].team.short_name,\n 'speaks': scoresheet.get_total(side),\n 'points': points\n }\n\n ballot += \"<ul>\"\n\n for pos, pos_name in zip(tournament.positions, pos_names):\n ballot += _(\"<li>%(pos)s: %(speaker)s (%(score)s)</li>\") % {\n 'pos': pos_name,\n 'speaker': result.get_speaker(side, pos).name,\n 'score': scoresheet.get_score(side, pos)\n }\n\n ballot += \"</ul></li>\"\n\n ballot += \"</ul>\"\n\n return mark_safe(ballot)\n\n if isinstance(results, VotingDebateResult):\n for (adj, ballot) in results.scoresheets.items():\n if adj.email is None:\n continue\n\n context = {'DEBATE': round_name, 'USER': adj.name, 'SCORES': _create_ballot(results, ballot)}\n emails.append((context, adj))\n elif isinstance(results, BaseConsensusDebateResultWithSpeakers):\n context = {'DEBATE': round_name, 'SCORES': _create_ballot(results, results.scoresheet)}\n\n for adj in debate.debateadjudicator_set.all():\n if adj.adjudicator.email is None:\n continue\n\n context_user = context.copy()\n context_user['USER'] = adj.adjudicator.name\n\n emails.append((context_user, adj.adjudicator))\n\n return emails\n\n\ndef standings_email_generator(to, url, round_id):\n emails = []\n round = Round.objects.get(id=round_id)\n tournament = round.tournament\n\n teams = round.active_teams.prefetch_related('speaker_set')\n populate_win_counts(teams)\n\n context = {\n 'TOURN': str(tournament),\n 'ROUND': round.name,\n 'URL': url if tournament.pref('public_team_standings') else \"\"\n }\n\n for team in teams:\n context_team = context.copy()\n context_team['POINTS'] = str(team.points_count)\n context_team['TEAM'] = team.short_name\n\n for speaker in team.speaker_set.all():\n try:\n to.remove(speaker.id)\n except ValueError:\n continue\n\n context_user = context_team.copy()\n context_user['USER'] = speaker.name\n\n emails.append((context_user, speaker))\n\n return emails\n\n\ndef motion_release_email_generator(to, round_id):\n emails = []\n round = Round.objects.get(id=round_id)\n\n def _create_motion_list():\n motion_list = \"<ul>\"\n for motion in round.motion_set.all():\n motion_list += _(\"<li>%(text)s (%(ref)s)</li>\") % {'text': motion.text, 'ref': motion.reference}\n\n if motion.info_slide:\n motion_list += \" %s\\n\" % (motion.info_slide)\n\n motion_list += \"</ul>\"\n\n return mark_safe(motion_list)\n\n context = {\n 'TOURN': str(round.tournament),\n 'ROUND': round.name,\n 'MOTIONS': _create_motion_list()\n }\n\n people = Person.objects.filter(id__in=to)\n for person in people:\n context_user = context.copy()\n context_user['USER'] = person.name\n\n emails.append((context_user, person))\n\n return emails\n\n\ndef team_speaker_email_generator(to, tournament_id):\n emails = []\n tournament = Tournament.objects.get(id=tournament_id)\n\n for team in tournament.team_set.all().prefetch_related('speaker_set', 'break_categories').select_related('institution'):\n context = {\n 'TOURN': str(tournament),\n 'SHORT': team.short_name,\n 'LONG': team.long_name,\n 'CODE': team.code_name,\n 'BREAK': _(\", \").join([breakq.name for breakq in team.break_categories.all()]),\n 'SPEAKERS': _(\", \").join([p.name for p in team.speaker_set.all()]),\n 'INSTITUTION': str(team.institution),\n 'EMOJI': team.emoji\n }\n\n for speaker in team.speakers:\n try:\n to.remove(speaker.id)\n except ValueError:\n continue\n\n context_user = context.copy()\n context_user['USER'] = speaker.name\n\n emails.append((context_user, speaker))\n\n return emails\n\n\ndef team_draw_email_generator(to, url, round_id):\n emails = []\n round = Round.objects.get(id=round_id)\n tournament = round.tournament\n draw = round.debate_set_with_prefetches(speakers=True).all()\n use_codes = use_team_code_names(tournament, False)\n\n for debate in draw:\n matchup = debate.matchup_codes if use_codes else debate.matchup\n context = {\n 'ROUND': round.name,\n 'VENUE': debate.venue.name,\n 'PANEL': _assemble_panel(debate.adjudicators.with_positions()),\n 'DRAW': matchup\n }\n\n for dt in debate.debateteam_set.all():\n context_team = context.copy()\n context_team['TEAM'] = dt.team.code_name if use_codes else dt.team.short_name\n context_team['SIDE'] = dt.get_side_name(tournament=tournament)\n\n for speaker in dt.team.speakers:\n try:\n to.remove(speaker.id)\n except ValueError:\n continue\n\n context_user = context_team.copy()\n context_user['USER'] = speaker.name\n\n emails.append((context_user, speaker))\n\n return emails\n", "path": "tabbycat/notifications/utils.py"}]} | 3,575 | 474 |
gh_patches_debug_38231 | rasdani/github-patches | git_diff | pyro-ppl__pyro-365 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect result from Delta's batch_log_pdf
It looks like there may be a bug in Delta's `batch_log_pdf` method. When the value we're computing the log prob of doesn't match the parameter I expect `batch_log_pdf` to return `-inf` but it doesn't. For example:
```
x = Variable(torch.Tensor([[1.0]]))
y = Variable(torch.Tensor([[2.0]]))
# This is OK, it returns zero as expected:
print(Delta(x).batch_log_pdf(x))
# Here I expect -inf, but get 2.5500e+08
print(Delta(x).batch_log_pdf(y))
```
`log_pdf` works as expected.
(This isn't high priority for me.)
</issue>
<code>
[start of pyro/distributions/__init__.py]
1 # abstract base class
2 from pyro.distributions.bernoulli import Bernoulli
3 from pyro.distributions.beta import Beta
4 from pyro.distributions.categorical import Categorical
5 from pyro.distributions.cauchy import Cauchy
6 from pyro.distributions.half_cauchy import HalfCauchy
7 from pyro.distributions.delta import Delta
8 from pyro.distributions.distribution import Distribution # noqa: F401
9 # specific distributions
10 from pyro.distributions.diag_normal import DiagNormal
11 from pyro.distributions.dirichlet import Dirichlet
12 from pyro.distributions.exponential import Exponential
13 from pyro.distributions.gamma import Gamma
14 from pyro.distributions.log_normal import LogNormal
15 from pyro.distributions.multinomial import Multinomial
16 from pyro.distributions.poisson import Poisson
17 from pyro.distributions.random_primitive import RandomPrimitive
18 from pyro.distributions.uniform import Uniform
19
20 # function aliases
21 diagnormal = DiagNormal()
22 lognormal = RandomPrimitive(LogNormal)
23 categorical = Categorical()
24 bernoulli = RandomPrimitive(Bernoulli)
25 beta = RandomPrimitive(Beta)
26 delta = Delta()
27 exponential = RandomPrimitive(Exponential)
28 gamma = RandomPrimitive(Gamma)
29 multinomial = RandomPrimitive(Multinomial)
30 poisson = RandomPrimitive(Poisson)
31 uniform = RandomPrimitive(Uniform)
32 dirichlet = RandomPrimitive(Dirichlet)
33 cauchy = RandomPrimitive(Cauchy)
34 halfcauchy = RandomPrimitive(HalfCauchy)
35
[end of pyro/distributions/__init__.py]
[start of pyro/distributions/delta.py]
1 import torch
2 from torch.autograd import Variable
3
4 from pyro.distributions.distribution import Distribution
5
6
7 class Delta(Distribution):
8 """
9 :param v: support element *(any)*
10
11 Discrete distribution that assigns probability one to the single element in
12 its support. Delta distribution parameterized by a random choice should not
13 be used with MCMC based inference, as doing so produces incorrect results.
14 """
15 enumerable = True
16
17 def _sanitize_input(self, v):
18 if v is not None:
19 # stateless distribution
20 return v
21 elif self.v is not None:
22 # stateful distribution
23 return self.v
24 else:
25 raise ValueError("Parameter(s) were None")
26
27 def __init__(self, v=None, batch_size=1, *args, **kwargs):
28 """
29 Params:
30 `v` - value
31 """
32 self.v = v
33 if v is not None:
34 if v.dim() == 1 and batch_size > 1:
35 self.v = v.expand(v, v.size(0))
36 super(Delta, self).__init__(*args, **kwargs)
37
38 def sample(self, v=None):
39 v = self._sanitize_input(v)
40 if isinstance(v, Variable):
41 return v
42 return Variable(v)
43
44 def batch_log_pdf(self, x, v=None, batch_size=1):
45 v = self._sanitize_input(v)
46 if x.dim == 1:
47 x = x.expand(batch_size, x.size(0))
48 return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999
49
50 def log_pdf(self, x, v=None, *args, **kwargs):
51 v = self._sanitize_input(v)
52 if torch.equal(x.data, v.data.expand_as(x.data)):
53 return Variable(torch.zeros(1).type_as(v.data))
54 return Variable(torch.Tensor([-float("inf")]).type_as(v.data))
55
56 def support(self, v=None):
57 """
58 Returns the delta distribution's support, as a tensor along the first dimension.
59
60 :param v: torch variable where each element of the tensor represents the point at
61 which the delta distribution is concentrated.
62 :return: torch variable enumerating the support of the delta distribution.
63 :rtype: torch.autograd.Variable.
64 """
65 v = self._sanitize_input(v)
66 # univariate case
67 return Variable(v.data)
68
[end of pyro/distributions/delta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/__init__.py b/pyro/distributions/__init__.py
--- a/pyro/distributions/__init__.py
+++ b/pyro/distributions/__init__.py
@@ -23,7 +23,7 @@
categorical = Categorical()
bernoulli = RandomPrimitive(Bernoulli)
beta = RandomPrimitive(Beta)
-delta = Delta()
+delta = RandomPrimitive(Delta)
exponential = RandomPrimitive(Exponential)
gamma = RandomPrimitive(Gamma)
multinomial = RandomPrimitive(Multinomial)
diff --git a/pyro/distributions/delta.py b/pyro/distributions/delta.py
--- a/pyro/distributions/delta.py
+++ b/pyro/distributions/delta.py
@@ -14,44 +14,40 @@
"""
enumerable = True
- def _sanitize_input(self, v):
- if v is not None:
- # stateless distribution
- return v
- elif self.v is not None:
- # stateful distribution
- return self.v
- else:
- raise ValueError("Parameter(s) were None")
-
- def __init__(self, v=None, batch_size=1, *args, **kwargs):
+ def __init__(self, v, batch_size=None, *args, **kwargs):
"""
Params:
`v` - value
"""
self.v = v
- if v is not None:
- if v.dim() == 1 and batch_size > 1:
- self.v = v.expand(v, v.size(0))
+ if not isinstance(self.v, Variable):
+ self.v = Variable(self.v)
+ if v.dim() == 1 and batch_size is not None:
+ self.v = v.expand(v, v.size(0))
super(Delta, self).__init__(*args, **kwargs)
- def sample(self, v=None):
- v = self._sanitize_input(v)
- if isinstance(v, Variable):
- return v
- return Variable(v)
+ def batch_shape(self, x=None):
+ event_dim = 1
+ v = self.v
+ if x is not None and x.size() != v.size():
+ v = self.v.expand_as(x)
+ return v.size()[:-event_dim]
+
+ def event_shape(self):
+ event_dim = 1
+ return self.v.size()[-event_dim:]
+
+ def shape(self, x=None):
+ return self.batch_shape(x) + self.event_shape()
- def batch_log_pdf(self, x, v=None, batch_size=1):
- v = self._sanitize_input(v)
- if x.dim == 1:
- x = x.expand(batch_size, x.size(0))
- return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999
+ def sample(self):
+ return self.v
- def log_pdf(self, x, v=None, *args, **kwargs):
- v = self._sanitize_input(v)
- if torch.equal(x.data, v.data.expand_as(x.data)):
- return Variable(torch.zeros(1).type_as(v.data))
- return Variable(torch.Tensor([-float("inf")]).type_as(v.data))
+ def batch_log_pdf(self, x):
+ v = self.v
+ if x.size() != v.size():
+ v = v.expand_as(x)
+ return torch.sum(torch.eq(x, v).float().log(), -1)
def support(self, v=None):
"""
@@ -62,6 +58,4 @@
:return: torch variable enumerating the support of the delta distribution.
:rtype: torch.autograd.Variable.
"""
- v = self._sanitize_input(v)
- # univariate case
- return Variable(v.data)
+ return Variable(self.v.data)
| {"golden_diff": "diff --git a/pyro/distributions/__init__.py b/pyro/distributions/__init__.py\n--- a/pyro/distributions/__init__.py\n+++ b/pyro/distributions/__init__.py\n@@ -23,7 +23,7 @@\n categorical = Categorical()\n bernoulli = RandomPrimitive(Bernoulli)\n beta = RandomPrimitive(Beta)\n-delta = Delta()\n+delta = RandomPrimitive(Delta)\n exponential = RandomPrimitive(Exponential)\n gamma = RandomPrimitive(Gamma)\n multinomial = RandomPrimitive(Multinomial)\ndiff --git a/pyro/distributions/delta.py b/pyro/distributions/delta.py\n--- a/pyro/distributions/delta.py\n+++ b/pyro/distributions/delta.py\n@@ -14,44 +14,40 @@\n \"\"\"\n enumerable = True\n \n- def _sanitize_input(self, v):\n- if v is not None:\n- # stateless distribution\n- return v\n- elif self.v is not None:\n- # stateful distribution\n- return self.v\n- else:\n- raise ValueError(\"Parameter(s) were None\")\n-\n- def __init__(self, v=None, batch_size=1, *args, **kwargs):\n+ def __init__(self, v, batch_size=None, *args, **kwargs):\n \"\"\"\n Params:\n `v` - value\n \"\"\"\n self.v = v\n- if v is not None:\n- if v.dim() == 1 and batch_size > 1:\n- self.v = v.expand(v, v.size(0))\n+ if not isinstance(self.v, Variable):\n+ self.v = Variable(self.v)\n+ if v.dim() == 1 and batch_size is not None:\n+ self.v = v.expand(v, v.size(0))\n super(Delta, self).__init__(*args, **kwargs)\n \n- def sample(self, v=None):\n- v = self._sanitize_input(v)\n- if isinstance(v, Variable):\n- return v\n- return Variable(v)\n+ def batch_shape(self, x=None):\n+ event_dim = 1\n+ v = self.v\n+ if x is not None and x.size() != v.size():\n+ v = self.v.expand_as(x)\n+ return v.size()[:-event_dim]\n+\n+ def event_shape(self):\n+ event_dim = 1\n+ return self.v.size()[-event_dim:]\n+\n+ def shape(self, x=None):\n+ return self.batch_shape(x) + self.event_shape()\n \n- def batch_log_pdf(self, x, v=None, batch_size=1):\n- v = self._sanitize_input(v)\n- if x.dim == 1:\n- x = x.expand(batch_size, x.size(0))\n- return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999\n+ def sample(self):\n+ return self.v\n \n- def log_pdf(self, x, v=None, *args, **kwargs):\n- v = self._sanitize_input(v)\n- if torch.equal(x.data, v.data.expand_as(x.data)):\n- return Variable(torch.zeros(1).type_as(v.data))\n- return Variable(torch.Tensor([-float(\"inf\")]).type_as(v.data))\n+ def batch_log_pdf(self, x):\n+ v = self.v\n+ if x.size() != v.size():\n+ v = v.expand_as(x)\n+ return torch.sum(torch.eq(x, v).float().log(), -1)\n \n def support(self, v=None):\n \"\"\"\n@@ -62,6 +58,4 @@\n :return: torch variable enumerating the support of the delta distribution.\n :rtype: torch.autograd.Variable.\n \"\"\"\n- v = self._sanitize_input(v)\n- # univariate case\n- return Variable(v.data)\n+ return Variable(self.v.data)\n", "issue": "Incorrect result from Delta's batch_log_pdf\nIt looks like there may be a bug in Delta's `batch_log_pdf` method. When the value we're computing the log prob of doesn't match the parameter I expect `batch_log_pdf` to return `-inf` but it doesn't. For example:\r\n\r\n```\r\nx = Variable(torch.Tensor([[1.0]]))\r\ny = Variable(torch.Tensor([[2.0]]))\r\n\r\n# This is OK, it returns zero as expected:\r\nprint(Delta(x).batch_log_pdf(x))\r\n\r\n# Here I expect -inf, but get 2.5500e+08\r\nprint(Delta(x).batch_log_pdf(y))\r\n```\r\n\r\n`log_pdf` works as expected.\r\n\r\n(This isn't high priority for me.)\n", "before_files": [{"content": "# abstract base class\nfrom pyro.distributions.bernoulli import Bernoulli\nfrom pyro.distributions.beta import Beta\nfrom pyro.distributions.categorical import Categorical\nfrom pyro.distributions.cauchy import Cauchy\nfrom pyro.distributions.half_cauchy import HalfCauchy\nfrom pyro.distributions.delta import Delta\nfrom pyro.distributions.distribution import Distribution # noqa: F401\n# specific distributions\nfrom pyro.distributions.diag_normal import DiagNormal\nfrom pyro.distributions.dirichlet import Dirichlet\nfrom pyro.distributions.exponential import Exponential\nfrom pyro.distributions.gamma import Gamma\nfrom pyro.distributions.log_normal import LogNormal\nfrom pyro.distributions.multinomial import Multinomial\nfrom pyro.distributions.poisson import Poisson\nfrom pyro.distributions.random_primitive import RandomPrimitive\nfrom pyro.distributions.uniform import Uniform\n\n# function aliases\ndiagnormal = DiagNormal()\nlognormal = RandomPrimitive(LogNormal)\ncategorical = Categorical()\nbernoulli = RandomPrimitive(Bernoulli)\nbeta = RandomPrimitive(Beta)\ndelta = Delta()\nexponential = RandomPrimitive(Exponential)\ngamma = RandomPrimitive(Gamma)\nmultinomial = RandomPrimitive(Multinomial)\npoisson = RandomPrimitive(Poisson)\nuniform = RandomPrimitive(Uniform)\ndirichlet = RandomPrimitive(Dirichlet)\ncauchy = RandomPrimitive(Cauchy)\nhalfcauchy = RandomPrimitive(HalfCauchy)\n", "path": "pyro/distributions/__init__.py"}, {"content": "import torch\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\n\n\nclass Delta(Distribution):\n \"\"\"\n :param v: support element *(any)*\n\n Discrete distribution that assigns probability one to the single element in\n its support. Delta distribution parameterized by a random choice should not\n be used with MCMC based inference, as doing so produces incorrect results.\n \"\"\"\n enumerable = True\n\n def _sanitize_input(self, v):\n if v is not None:\n # stateless distribution\n return v\n elif self.v is not None:\n # stateful distribution\n return self.v\n else:\n raise ValueError(\"Parameter(s) were None\")\n\n def __init__(self, v=None, batch_size=1, *args, **kwargs):\n \"\"\"\n Params:\n `v` - value\n \"\"\"\n self.v = v\n if v is not None:\n if v.dim() == 1 and batch_size > 1:\n self.v = v.expand(v, v.size(0))\n super(Delta, self).__init__(*args, **kwargs)\n\n def sample(self, v=None):\n v = self._sanitize_input(v)\n if isinstance(v, Variable):\n return v\n return Variable(v)\n\n def batch_log_pdf(self, x, v=None, batch_size=1):\n v = self._sanitize_input(v)\n if x.dim == 1:\n x = x.expand(batch_size, x.size(0))\n return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999\n\n def log_pdf(self, x, v=None, *args, **kwargs):\n v = self._sanitize_input(v)\n if torch.equal(x.data, v.data.expand_as(x.data)):\n return Variable(torch.zeros(1).type_as(v.data))\n return Variable(torch.Tensor([-float(\"inf\")]).type_as(v.data))\n\n def support(self, v=None):\n \"\"\"\n Returns the delta distribution's support, as a tensor along the first dimension.\n\n :param v: torch variable where each element of the tensor represents the point at\n which the delta distribution is concentrated.\n :return: torch variable enumerating the support of the delta distribution.\n :rtype: torch.autograd.Variable.\n \"\"\"\n v = self._sanitize_input(v)\n # univariate case\n return Variable(v.data)\n", "path": "pyro/distributions/delta.py"}]} | 1,754 | 859 |
gh_patches_debug_7945 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4253 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/booster/mixed_precision/mixed_precision_base.py]
1 from abc import ABC, abstractmethod
2 from typing import Callable, Optional, Tuple
3
4 import torch.nn as nn
5 from torch.optim import Optimizer
6
7 from colossalai.interface import OptimizerWrapper
8
9
10 class MixedPrecision(ABC):
11 """
12 An abstract class for mixed precision training.
13 """
14
15 @abstractmethod
16 def configure(self,
17 model: nn.Module,
18 optimizer: Optional[Optimizer] = None,
19 criterion: Optional[Callable] = None,
20 ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:
21 # TODO: implement this method
22 pass
23
[end of colossalai/booster/mixed_precision/mixed_precision_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/booster/mixed_precision/mixed_precision_base.py b/colossalai/booster/mixed_precision/mixed_precision_base.py
--- a/colossalai/booster/mixed_precision/mixed_precision_base.py
+++ b/colossalai/booster/mixed_precision/mixed_precision_base.py
@@ -13,10 +13,11 @@
"""
@abstractmethod
- def configure(self,
- model: nn.Module,
- optimizer: Optional[Optimizer] = None,
- criterion: Optional[Callable] = None,
- ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:
+ def configure(
+ self,
+ model: nn.Module,
+ optimizer: Optional[Optimizer] = None,
+ criterion: Optional[Callable] = None,
+ ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:
# TODO: implement this method
pass
| {"golden_diff": "diff --git a/colossalai/booster/mixed_precision/mixed_precision_base.py b/colossalai/booster/mixed_precision/mixed_precision_base.py\n--- a/colossalai/booster/mixed_precision/mixed_precision_base.py\n+++ b/colossalai/booster/mixed_precision/mixed_precision_base.py\n@@ -13,10 +13,11 @@\n \"\"\"\n \n @abstractmethod\n- def configure(self,\n- model: nn.Module,\n- optimizer: Optional[Optimizer] = None,\n- criterion: Optional[Callable] = None,\n- ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:\n+ def configure(\n+ self,\n+ model: nn.Module,\n+ optimizer: Optional[Optimizer] = None,\n+ criterion: Optional[Callable] = None,\n+ ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:\n # TODO: implement this method\n pass\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Callable, Optional, Tuple\n\nimport torch.nn as nn\nfrom torch.optim import Optimizer\n\nfrom colossalai.interface import OptimizerWrapper\n\n\nclass MixedPrecision(ABC):\n \"\"\"\n An abstract class for mixed precision training.\n \"\"\"\n\n @abstractmethod\n def configure(self,\n model: nn.Module,\n optimizer: Optional[Optimizer] = None,\n criterion: Optional[Callable] = None,\n ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:\n # TODO: implement this method\n pass\n", "path": "colossalai/booster/mixed_precision/mixed_precision_base.py"}]} | 735 | 207 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.